chenyukun 1 год назад
Родитель
Сommit
79546134f9
13 измененных файлов: 510 добавлений и 294 удалений
  1. +96
    -25
      concurrency/FileUploadThread.py
  2. +0
    -1
      concurrency/IntelligentRecognitionProcess.py
  3. +145
    -101
      concurrency/PushVideoStreamProcess.py
  4. +142
    -126
      concurrency/PushVideoStreamProcess2.py
  5. +2
    -2
      config/service/dsp_dev_service.yml
  6. +3
    -4
      config/service/dsp_prod_service.yml
  7. +3
    -4
      config/service/dsp_test_service.yml
  8. +21
    -11
      enums/ModelTypeEnum.py
  9. +24
    -12
      enums/ModelTypeEnum2.py
  10. +40
    -0
      start.sh
  11. +19
    -0
      stop.sh
  12. +9
    -6
      util/ModelUtils.py
  13. +6
    -2
      util/ModelUtils2.py

+ 96
- 25
concurrency/FileUploadThread.py Просмотреть файл

@@ -30,7 +30,9 @@ class ImageFileUpload(FileUpload):

@staticmethod
def handle_image(frame_msg, frame_step):
det_xywh, frame, current_frame, all_frames, font_config = frame_msg
# (high_score_image["code"], all_frames, draw_config["font_config"])
# high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
det_result, all_frames, font_config = frame_msg
'''
det_xywh:{
'code':{
@@ -42,25 +44,62 @@ class ImageFileUpload(FileUpload):
'''
model_info = []
# 更加模型编码解析数据
for code, det_list in det_xywh.items():
for code, det_list in det_result.items():
if len(det_list) > 0:
for cls, target_list in det_list.items():
if len(target_list) > 0:
frame, iframe, cls_list = target_list
if len(cls_list) > 0:
aFrame = frame.copy()
for target in target_list:
for target in cls_list:
# detect_targets_code, box, score, label_array, color
draw_painting_joint(target[1], aFrame, target[3], target[2], target[4], font_config)
model_info.append({"modelCode": str(code), "detectTargetCode": str(cls), "aFrame": aFrame})
model_info.append({
"or_frame": frame,
"modelCode": str(code),
"detectTargetCode": str(cls),
"aFrame": aFrame,
"current_frame": iframe,
"last_frame": iframe + frame_step
})
if len(model_info) > 0:
image_result = {
"or_frame": frame,
"model_info": model_info,
"current_frame": current_frame,
"last_frame": current_frame + frame_step
}
return image_result
return model_info
return None

# @staticmethod
# def handle_image(frame_msg, frame_step):
# # (high_score_image["code"], all_frames, draw_config["font_config"])
# # high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
# det_xywh, frame, current_frame, all_frames, font_config = frame_msg
# '''
# det_xywh:{
# 'code':{
# 1: [[detect_targets_code, box, score, label_array, color]]
# }
# }
# 模型编号:modeCode
# 检测目标:detectTargetCode
# '''
# model_info = []
# # 更加模型编码解析数据
# for code, det_list in det_xywh.items():
# if len(det_list) > 0:
# for cls, target_list in det_list.items():
# if len(target_list) > 0:
# aFrame = frame.copy()
# for target in target_list:
# # detect_targets_code, box, score, label_array, color
# draw_painting_joint(target[1], aFrame, target[3], target[2], target[4], font_config)
# model_info.append({"modelCode": str(code), "detectTargetCode": str(cls), "aFrame": aFrame})
# if len(model_info) > 0:
# image_result = {
# "or_frame": frame,
# "model_info": model_info,
# "current_frame": current_frame,
# "last_frame": current_frame + frame_step
# }
# return image_result
# return None

def run(self):
msg, context = self._msg, self._context
service = context["service"]
@@ -90,20 +129,18 @@ class ImageFileUpload(FileUpload):
if image_msg[0] == 1:
image_result = self.handle_image(image_msg[1], frame_step)
if image_result is not None:
task = []
or_image = cv2.imencode(".jpg", image_result["or_frame"])[1]
or_image_name = build_image_name(image_result["current_frame"],
image_result["last_frame"],
analyse_type,
"OR", "0", "0", request_id)
or_future = t.submit(aliyunOssSdk.put_object, or_image_name, or_image.tobytes())
task.append(or_future)
model_info_list = image_result["model_info"]
msg_list = []
for model_info in model_info_list:
task, msg_list = [], []
for model_info in image_result:
or_image = cv2.imencode(".jpg", model_info["or_frame"])[1]
or_image_name = build_image_name(model_info["current_frame"],
model_info["last_frame"],
analyse_type,
"OR", "0", "0", request_id)
or_future = t.submit(aliyunOssSdk.put_object, or_image_name, or_image.tobytes())
task.append(or_future)
ai_image = cv2.imencode(".jpg", model_info["aFrame"])[1]
ai_image_name = build_image_name(image_result["current_frame"],
image_result["last_frame"],
ai_image_name = build_image_name(model_info["current_frame"],
model_info["last_frame"],
analyse_type,
"AI",
model_info["modelCode"],
@@ -124,6 +161,40 @@ class ImageFileUpload(FileUpload):
for msg in msg_list:
put_queue(fb_queue, msg, timeout=2, is_ex=False)
del task, msg_list
# task = []
# or_image = cv2.imencode(".jpg", image_result["or_frame"])[1]
# or_image_name = build_image_name(image_result["current_frame"],
# image_result["last_frame"],
# analyse_type,
# "OR", "0", "0", request_id)
# or_future = t.submit(aliyunOssSdk.put_object, or_image_name, or_image.tobytes())
# task.append(or_future)
# model_info_list = image_result["model_info"]
# msg_list = []
# for model_info in model_info_list:
# ai_image = cv2.imencode(".jpg", model_info["aFrame"])[1]
# ai_image_name = build_image_name(image_result["current_frame"],
# image_result["last_frame"],
# analyse_type,
# "AI",
# model_info["modelCode"],
# model_info["detectTargetCode"],
# request_id)
# ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name,
# ai_image.tobytes())
# task.append(ai_future)
# msg_list.append(message_feedback(request_id,
# AnalysisStatus.RUNNING.value,
# analyse_type, "", "", "",
# or_image_name,
# ai_image_name,
# model_info['modelCode'],
# model_info['detectTargetCode']))
# for tk in task:
# tk.result()
# for msg in msg_list:
# put_queue(fb_queue, msg, timeout=2, is_ex=False)
# del task, msg_list
else:
sleep(1)
del image_msg

+ 0
- 1
concurrency/IntelligentRecognitionProcess.py Просмотреть файл

@@ -3,7 +3,6 @@ import base64
import os
from concurrent.futures import ThreadPoolExecutor
from os.path import join, exists, getsize
from profile import Profile
from time import time, sleep
from traceback import format_exc


+ 145
- 101
concurrency/PushVideoStreamProcess.py Просмотреть файл

@@ -117,11 +117,28 @@ class OnPushStreamProcess(PushStreamProcess):
for i, frame in enumerate(frame_list):
# 复制帧用来画图
copy_frame = frame.copy()
det_xywh, code_list, thread_p = {}, {}, []
det_xywh, thread_p = {}, []
for det in push_objs[i]:
rr = t.submit(self.handle_image, det_xywh, det, frame_score, copy_frame,
draw_config, code_list)
thread_p.append(rr)
code, det_result = det
# 每个单独模型处理
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
@@ -137,59 +154,65 @@ class OnPushStreamProcess(PushStreamProcess):
ai_video_file, ai_write_status, request_id)
push_stream_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status, request_id)
# 指定多少帧上传一次问题
if high_score_image.get("current_frame") is None:
high_score_image["current_frame"] = frame_index_list[i]
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
# 指定帧内处理逻辑
if frame_step >= diff_frame_num and high_score_image.get("code") is not None and len(det_xywh) > 0:
# 所有的模型编号
cache_codes = set(high_score_image["code"].keys())
# 遍历当前模型
for det_code, det_clss in det_xywh.items():
# 如果模型编号不在缓存中
if det_code not in cache_codes:
high_score_image["code"][det_code] = {}
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
# 如果模型编号在缓存中, 检查检测目标的情况
cache_clss = set(high_score_image["code"][det_code].keys())
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
if cls not in cache_clss:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
if len(det_xywh[det_code][cls]) > len(high_score_image["code"][det_code][cls][2]):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
elif len(det_xywh[det_code][cls]) == len(high_score_image["code"][det_code][cls][2]):
# [cls, box, score, label_array, color]
hs = 0
for ii, s in enumerate(cls_list):
if s[2] >= high_score_image["code"][det_code][cls][2][ii][2]:
hs += 1
if hs == len(cls_list):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
if diff_frame_num > frame_step:
if high_score_image.get("code") is not None:
high_score_image["or_frame"] = frame
put_queue(image_queue, (1, [high_score_image["code"], all_frames, draw_config["font_config"]]))
high_score_image["code"] = None
high_score_image["current_frame"] = None
# 如果有问题, 走下面的逻辑
if len(det_xywh) > 0:
flag = True
if len(high_score_image) > 0:
# 检查当前帧和上一帧的间距是多少, 如果小于指定间距, 不处理
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
if diff_frame_num < frame_step:
flag = False
det_codes = set(det_xywh.keys())
cache_codes = set(high_score_image["code"].keys())
# 如果是一样的模型
if det_codes == cache_codes:
for code in cache_codes:
det_clss = set(det_xywh[code].keys())
cache_clss = set(high_score_image["code"][code].keys())
# 如果检测目标的数量大于缓存的检测目标数量
if det_clss > cache_clss:
flag = True
break
elif det_clss.isdisjoint(cache_clss):
flag = True
break
# 如果检测目标的数量相等,判断检测目标识别的数量谁比较多
elif det_clss == cache_clss:
for cls in cache_clss:
# 如果检测目标的识别的数量大于缓存中的数量
if len(det_xywh[code][cls]) > \
high_score_image["code"][code][cls]:
flag = True
break
if flag:
break
# 如果现在的检测结果模型的结果多余上一次问题的模型数量,判断为不同问题, 需要上传图片
elif det_codes > cache_codes:
flag = True
# 如果检测的模型不一样
elif det_codes.isdisjoint(cache_codes):
flag = True
else:
high_score_image = {}
# 检查图片和上一张问题图片相似度是多少, 相似度高不处理
if picture_similarity and len(high_score_image) > 0:
# 比较上一次识别到问题的帧和当前问题帧的相似度
if picture_similarity and high_score_image.get("or_frame") is not None:
hash1 = ImageUtils.dHash(high_score_image["or_frame"])
hash2 = ImageUtils.dHash(frame)
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity_1 = 1 - dist * 1.0 / 64
if similarity_1 >= similarity:
flag = False
if flag:
high_score_image["or_frame"] = frame
high_score_image["current_frame"] = frame_index_list[i]
high_score_image["code"] = code_list
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
draw_config["font_config"])), timeout=2)
# 如果缓存问题是空的,给缓存添加问题
if flag and high_score_image.get("code") is None:
high_score_image["code"] = {}
for code, det_list in det_xywh.items():
if high_score_image["code"].get(code) is None:
high_score_image["code"][code] = {}
for cls, cls_list in det_list.items():
high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
push_p = push_stream_result.result(timeout=5)
ai_video_file = write_ai_video_result.result(timeout=5)
or_video_file = write_or_video_result.result(timeout=5)
@@ -288,14 +311,28 @@ class OffPushStreamProcess(PushStreamProcess):
# 复制帧用来画图
copy_frame = frame.copy()
# 所有问题记录字典
det_xywh = {}
code_list = {}
# 每帧可能存在多模型,多模型问题处理
thread_p = []
det_xywh, thread_p = {}, []
for det in push_objs[i]:
rr = t.submit(self.handle_image, det_xywh, det, frame_score, copy_frame,
draw_config, code_list)
thread_p.append(rr)
code, det_result = det
# 每个单独模型处理
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
@@ -309,59 +346,66 @@ class OffPushStreamProcess(PushStreamProcess):
ai_write_status, request_id)
push_stream_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status, request_id)
# 指定多少帧上传一次问题
if high_score_image.get("current_frame") is None:
high_score_image["current_frame"] = frame_index_list[i]
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
# 指定帧内处理逻辑
if frame_step >= diff_frame_num and high_score_image.get("code") is not None and len(det_xywh) > 0:
# 所有的模型编号
cache_codes = set(high_score_image["code"].keys())
# 遍历当前模型
for det_code, det_clss in det_xywh.items():
# 如果模型编号不在缓存中
if det_code not in cache_codes:
high_score_image["code"][det_code] = {}
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
# 如果模型编号在缓存中, 检查检测目标的情况
cache_clss = set(high_score_image["code"][det_code].keys())
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
# 如果检测目标在缓存中
if cls not in cache_clss:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
if len(det_xywh[det_code][cls]) > len(high_score_image["code"][det_code][cls][2]):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
elif len(det_xywh[det_code][cls]) == len(high_score_image["code"][det_code][cls][2]):
# [cls, box, score, label_array, color]
hs = 0
for ii, s in enumerate(cls_list):
if s[2] >= high_score_image["code"][det_code][cls][2][ii][2]:
hs += 1
if hs == len(cls_list):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
if diff_frame_num > frame_step:
if high_score_image.get("code") is not None:
high_score_image["or_frame"] = frame
put_queue(image_queue, (1, [high_score_image["code"], all_frames, draw_config["font_config"]]))
high_score_image["code"] = None
high_score_image["current_frame"] = None
# 如果有问题, 走下面的逻辑
if len(det_xywh) > 0:
flag = True
if len(high_score_image) > 0:
# 检查当前帧和上一帧的间距是多少, 如果小于指定间距, 不处理
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
if diff_frame_num < frame_step:
flag = False
det_codes = set(det_xywh.keys())
cache_codes = set(high_score_image["code"].keys())
# 如果是一样的模型
if det_codes == cache_codes:
for code in cache_codes:
det_clss = set(det_xywh[code].keys())
cache_clss = set(high_score_image["code"][code].keys())
# 如果检测目标的数量大于缓存的检测目标数量
if det_clss > cache_clss:
flag = True
break
elif det_clss.isdisjoint(cache_clss):
flag = True
break
# 如果检测目标的数量相等,判断检测目标识别的数量谁比较多
elif det_clss == cache_clss:
for cls in cache_clss:
# 如果检测目标的识别的数量大于缓存中的数量
if len(det_xywh[code][cls]) > \
high_score_image["code"][code][cls]:
flag = True
break
if flag:
break
# 如果现在的检测结果模型的结果多余上一次问题的模型数量,判断为不同问题, 需要上传图片
elif det_codes > cache_codes:
flag = True
# 如果检测的模型不一样
elif det_codes.isdisjoint(cache_codes):
flag = True
else:
high_score_image = {}
# 检查图片和上一张问题图片相似度是多少, 相似度高不处理
if picture_similarity and len(high_score_image) > 0:
# 比较上一次识别到问题的帧和当前问题帧的相似度
if picture_similarity and high_score_image.get("or_frame") is not None:
hash1 = ImageUtils.dHash(high_score_image["or_frame"])
hash2 = ImageUtils.dHash(frame)
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity_1 = 1 - dist * 1.0 / 64
if similarity_1 >= similarity:
flag = False
if flag:
high_score_image["or_frame"] = frame
high_score_image["current_frame"] = frame_index_list[i]
high_score_image["code"] = code_list
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
draw_config["font_config"])), timeout=2)
# 如果缓存问题是空的,给缓存添加问题
if flag and high_score_image.get("code") is None:
high_score_image["code"] = {}
for code, det_list in det_xywh.items():
if high_score_image["code"].get(code) is None:
high_score_image["code"][code] = {}
for cls, cls_list in det_list.items():
high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
push_p = push_stream_result.result(timeout=5)
ai_video_file = write_ai_video_result.result(timeout=5)
# 接收停止指令

+ 142
- 126
concurrency/PushVideoStreamProcess2.py Просмотреть файл

@@ -43,31 +43,6 @@ class PushStreamProcess2(Process):
logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)
self._context["logo"] = logo

@staticmethod
def handle_image(det_xywh, det, frame_score, copy_frame, draw_config, code_list):
code, det_result = det
# 每个单独模型处理
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
# box, img, label_array, score=0.5, color=None, config=None
draw_painting_joint(box, copy_frame, label_array, score, color, font_config)
if det_xywh.get(code) is None:
det_xywh[code], code_list[code] = {}, {}
cd = det_xywh[code].get(cls)
if cd is None:
code_list[code][cls] = 1
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
code_list[code][cls] += 1
det_xywh[code][cls].append([cls, box, score, label_array, color])


class OnPushStreamProcess2(PushStreamProcess2):
__slots__ = ()
@@ -125,14 +100,27 @@ class OnPushStreamProcess2(PushStreamProcess2):
# 复制帧用来画图
copy_frame = frame.copy()
# 所有问题记录字典
det_xywh, code_list, thread_p = {}, {}, []
det_xywh, thread_p = {}, []
# [模型1识别数组, 模型2识别数组, 模型3识别数组]
for s_det_list in push_objs:
code, det_list = s_det_list
rr = t.submit(self.handle_image, det_xywh, (code, det_list[i]), frame_score,
copy_frame,
draw_config, code_list)
thread_p.append(rr)
code, det_result = s_det_list[0], s_det_list[1][i]
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
@@ -149,59 +137,66 @@ class OnPushStreamProcess2(PushStreamProcess2):
push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status,
request_id)
# 指定多少帧上传一次问题
if high_score_image.get("current_frame") is None:
high_score_image["current_frame"] = frame_index_list[i]
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
# 指定帧内处理逻辑
if frame_step >= diff_frame_num and high_score_image.get("code") is not None and len(det_xywh) > 0:
# 所有的模型编号
cache_codes = set(high_score_image["code"].keys())
# 遍历当前模型
for det_code, det_clss in det_xywh.items():
# 如果模型编号不在缓存中
if det_code not in cache_codes:
high_score_image["code"][det_code] = {}
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
# 如果模型编号在缓存中, 检查检测目标的情况
cache_clss = set(high_score_image["code"][det_code].keys())
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
# 如果检测目标在缓存中
if cls not in cache_clss:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
if len(det_xywh[det_code][cls]) > len(high_score_image["code"][det_code][cls][2]):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
elif len(det_xywh[det_code][cls]) == len(high_score_image["code"][det_code][cls][2]):
# [cls, box, score, label_array, color]
hs = 0
for ii, s in enumerate(cls_list):
if s[2] >= high_score_image["code"][det_code][cls][2][ii][2]:
hs += 1
if hs == len(cls_list):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
if diff_frame_num > frame_step:
if high_score_image.get("code") is not None:
high_score_image["or_frame"] = frame
put_queue(image_queue, (1, [high_score_image["code"], all_frames, draw_config["font_config"]]))
high_score_image["code"] = None
high_score_image["current_frame"] = None
# 如果有问题, 走下面的逻辑
if len(det_xywh) > 0:
flag = True
if len(high_score_image) > 0:
# 检查当前帧和上一帧的间距是多少, 如果小于指定间距, 不处理
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
if diff_frame_num < frame_step:
flag = False
det_codes = set(det_xywh.keys())
cache_codes = set(high_score_image["code"].keys())
# 如果是一样的模型
if det_codes == cache_codes:
for code in cache_codes:
det_clss = set(det_xywh[code].keys())
cache_clss = set(high_score_image["code"][code].keys())
# 如果检测目标的数量大于缓存的检测目标数量
if det_clss > cache_clss:
flag = True
break
elif det_clss.isdisjoint(cache_clss):
flag = True
break
# 如果检测目标的数量相等,判断检测目标识别的数量谁比较多
elif det_clss == cache_clss:
for cls in cache_clss:
# 如果检测目标的识别的数量大于缓存中的数量
if len(det_xywh[code][cls]) > \
high_score_image["code"][code][cls]:
flag = True
break
if flag:
break
# 如果现在的检测结果模型的结果多余上一次问题的模型数量,判断为不同问题, 需要上传图片
elif det_codes > cache_codes:
flag = True
# 如果检测的模型不一样
elif det_codes.isdisjoint(cache_codes):
flag = True
else:
high_score_image = {}
# 检查图片和上一张问题图片相似度是多少, 相似度高不处理
if picture_similarity and len(high_score_image) > 0:
# 比较上一次识别到问题的帧和当前问题帧的相似度
if picture_similarity and high_score_image.get("or_frame") is not None:
hash1 = ImageUtils.dHash(high_score_image["or_frame"])
hash2 = ImageUtils.dHash(frame)
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity_1 = 1 - dist * 1.0 / 64
if similarity_1 >= similarity:
flag = False
if flag:
high_score_image["or_frame"] = frame
high_score_image["current_frame"] = frame_index_list[i]
high_score_image["code"] = code_list
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
draw_config["font_config"])))
# 如果缓存问题是空的,给缓存添加问题
if flag and high_score_image.get("code") is None:
high_score_image["code"] = {}
for code, det_list in det_xywh.items():
if high_score_image["code"].get(code) is None:
high_score_image["code"][code] = {}
for cls, cls_list in det_list.items():
high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
push_p = push_p_result.result(timeout=5)
ai_video_file = write_ai_video_result.result(timeout=5)
or_video_file = write_or_video_result.result(timeout=5)
@@ -300,12 +295,26 @@ class OffPushStreamProcess2(PushStreamProcess2):
# 复制帧用来画图
copy_frame = frame.copy()
# 所有问题记录字典
det_xywh, code_list, thread_p = {}, {}, []
det_xywh, thread_p = {}, []
for s_det_list in push_objs:
code, det_list = s_det_list
rr = t.submit(self.handle_image, det_xywh, (code, det_list[i]), frame_score,
copy_frame, draw_config, code_list)
thread_p.append(rr)
code, det_result = s_det_list[0], s_det_list[1][i]
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
@@ -320,59 +329,66 @@ class OffPushStreamProcess2(PushStreamProcess2):
push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status,
request_id)
# 指定多少帧上传一次问题
if high_score_image.get("current_frame") is None:
high_score_image["current_frame"] = frame_index_list[i]
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
# 指定帧内处理逻辑
if frame_step >= diff_frame_num and high_score_image.get("code") is not None and len(det_xywh) > 0:
# 所有的模型编号
cache_codes = set(high_score_image["code"].keys())
# 遍历当前模型
for det_code, det_clss in det_xywh.items():
# 如果模型编号不在缓存中
if det_code not in cache_codes:
high_score_image["code"][det_code] = {}
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
# 如果模型编号在缓存中, 检查检测目标的情况
cache_clss = set(high_score_image["code"][det_code].keys())
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
# 如果检测目标在缓存中
if cls not in cache_clss:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
if len(det_xywh[det_code][cls]) > len(high_score_image["code"][det_code][cls][2]):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
elif len(det_xywh[det_code][cls]) == len(high_score_image["code"][det_code][cls][2]):
# [cls, box, score, label_array, color]
hs = 0
for ii, s in enumerate(cls_list):
if s[2] >= high_score_image["code"][det_code][cls][2][ii][2]:
hs += 1
if hs == len(cls_list):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
if diff_frame_num > frame_step:
if high_score_image.get("code") is not None:
high_score_image["or_frame"] = frame
put_queue(image_queue, (1, [high_score_image["code"], all_frames, draw_config["font_config"]]))
high_score_image["code"] = None
high_score_image["current_frame"] = None
# 如果有问题, 走下面的逻辑
if len(det_xywh) > 0:
flag = True
if len(high_score_image) > 0:
# 检查当前帧和上一帧的间距是多少, 如果小于指定间距, 不处理
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
if diff_frame_num < frame_step:
flag = False
det_codes = set(det_xywh.keys())
cache_codes = set(high_score_image["code"].keys())
# 如果是一样的模型
if det_codes == cache_codes:
for code in cache_codes:
det_clss = set(det_xywh[code].keys())
cache_clss = set(high_score_image["code"][code].keys())
# 如果检测目标的数量大于缓存的检测目标数量
if det_clss > cache_clss:
flag = True
break
elif det_clss.isdisjoint(cache_clss):
flag = True
break
# 如果检测目标的数量相等,判断检测目标识别的数量谁比较多
elif det_clss == cache_clss:
for cls in cache_clss:
# 如果检测目标的识别的数量大于缓存中的数量
if len(det_xywh[code][cls]) > \
high_score_image["code"][code][cls]:
flag = True
break
if flag:
break
# 如果现在的检测结果模型的结果多余上一次问题的模型数量,判断为不同问题, 需要上传图片
elif det_codes > cache_codes:
flag = True
# 如果检测的模型不一样
elif det_codes.isdisjoint(cache_codes):
flag = True
else:
high_score_image = {}
# 检查图片和上一张问题图片相似度是多少, 相似度高不处理
if picture_similarity and len(high_score_image) > 0:
# 比较上一次识别到问题的帧和当前问题帧的相似度
if picture_similarity and high_score_image.get("or_frame") is not None:
hash1 = ImageUtils.dHash(high_score_image["or_frame"])
hash2 = ImageUtils.dHash(frame)
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity_1 = 1 - dist * 1.0 / 64
if similarity_1 >= similarity:
flag = False
if flag:
high_score_image["or_frame"] = frame
high_score_image["current_frame"] = frame_index_list[i]
high_score_image["code"] = code_list
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
draw_config["font_config"])))
# 如果缓存问题是空的,给缓存添加问题
if flag and high_score_image.get("code") is None:
high_score_image["code"] = {}
for code, det_list in det_xywh.items():
if high_score_image["code"].get(code) is None:
high_score_image["code"][code] = {}
for cls, cls_list in det_list.items():
high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
push_p = push_p_result.result(timeout=5)
ai_video_file = write_ai_video_result.result(timeout=5)
# 接收停止指令

+ 2
- 2
config/service/dsp_dev_service.yml Просмотреть файл

@@ -9,8 +9,8 @@ service:
frame_score: 0.4
# 图片相似度过滤
picture_similarity: true
similarity: 1
frame_step: 160
similarity: 0.65
frame_step: 300
timeout: 21600
cv2_pull_stream_timeout: 1000
cv2_read_stream_timeout: 1000

+ 3
- 4
config/service/dsp_prod_service.yml Просмотреть файл

@@ -4,14 +4,13 @@ video:
# 是否添加水印
video_add_water: false
service:

filter:
# 图片得分多少分以上返回图片
frame_score: 0.4
# 图片相似度过滤
picture_similarity: true
similarity: 1
frame_step: 160
similarity: 0.65
frame_step: 300
timeout: 21600
cv2_pull_stream_timeout: 1000
cv2_read_stream_timeout: 1000
@@ -20,7 +19,7 @@ service:
# 使用哪种识别方式
# 1 普通方式
# 2 模型追踪
model_type: 2
model_type: 1
limit: 3
task:
# 任务限制5个

+ 3
- 4
config/service/dsp_test_service.yml Просмотреть файл

@@ -4,14 +4,13 @@ video:
# 是否添加水印
video_add_water: false
service:

filter:
# 图片得分多少分以上返回图片
frame_score: 0.4
# 图片相似度过滤
picture_similarity: true
similarity: 1
frame_step: 160
similarity: 0.65
frame_step: 300
timeout: 21600
cv2_pull_stream_timeout: 1000
cv2_read_stream_timeout: 1000
@@ -20,7 +19,7 @@ service:
# 使用哪种识别方式
# 1 普通方式
# 2 模型追踪
model_type: 2
model_type: 1
limit: 3
task:
# 任务限制5个

+ 21
- 11
enums/ModelTypeEnum.py Просмотреть файл

@@ -4,6 +4,8 @@ from enum import Enum, unique
from common.Constant import COLOR

sys.path.extend(['..', '../AIlib2'])
from DMPR import DMPRModel
from DMPRUtils.jointUtil import dmpr_yolo
from segutils.segmodel import SegModel
from utilsK.queRiver import riverDetSegMixProcess
from segutils.trafficUtils import tracfficAccidentMixFunction
@@ -319,26 +321,33 @@ class ModelType(Enum):
'Segweights': '../AIlib2/weights/river2/stdc_360X640_%s_fp16.engine' % gpuName
})

CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement', lambda device, gpuName: {
CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement2', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["车辆", "垃圾", "商贩"],
'seg_nclass': 2, # 分割模型类别数目,默认2类
'segRegionCnt': 0,
'slopeIndex': [],
'labelnames': ["车辆", "垃圾", "商贩", "违停"],
'seg_nclass': 4, # 分割模型类别数目,默认2类
'segRegionCnt': 2,
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/cityMangement/yolov5_%s_fp16.engine" % gpuName,
'segPar': None,
'trtFlag_seg': True,
'Detweights': "../AIlib2/weights/cityMangement2/yolov5_%s_fp16.engine" % gpuName,
'segPar': {
'depth_factor': 32,
'NUM_FEATURE_MAP_CHANNEL': 6,
'dmpr_thresh': 0.3,
'dmprimg_size': 640,
'mixFunction': {
'function': dmpr_yolo,
'pars': {'carCls': 0, 'illCls': 3}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"ovlap_thres_crossCategory": 0.6,
"classes": 2,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
'Segweights': '../AIlib2/weights/cityMangement2/dmpr_%s.engine' % gpuName
})

DROWING_MODEL = ("17", "017", "人员落水模型", 'drowning', lambda device, gpuName: {
@@ -471,6 +480,7 @@ BAIDU_MODEL_TARGET_CONFIG = {

EPIDEMIC_PREVENTION_CONFIG = {1: "行程码", 2: "健康码"}


# 模型分析方式
@unique
class ModelMethodTypeEnum(Enum):

+ 24
- 12
enums/ModelTypeEnum2.py Просмотреть файл

@@ -10,6 +10,8 @@ from segutils.trafficUtils import tracfficAccidentMixFunction
from utilsK.drownUtils import mixDrowing_water_postprocess
from utilsK.noParkingUtils import mixNoParking_road_postprocess
from utilsK.illParkingUtils import illParking_postprocess
from DMPR import DMPRModel
from DMPRUtils.jointUtil import dmpr_yolo

'''
参数说明
@@ -494,12 +496,12 @@ class ModelType2(Enum):
}
})

CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement', lambda device, gpuName: {
CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement2', lambda device, gpuName: {
'device': device,
'labelnames': ["车辆", "垃圾", "商贩"],
'labelnames': ["车辆", "垃圾", "商贩", "违停"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/cityMangement/yolov5_%s_fp16.engine" % gpuName,
'Detweights': "../AIlib2/weights/cityMangement2/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
@@ -508,26 +510,35 @@ class ModelType2(Enum):
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'seg_nclass': 4,
'segRegionCnt': 2,
'segPar': {
'trtFlag_seg': True,
'depth_factor': 32,
'NUM_FEATURE_MAP_CHANNEL': 6,
'dmpr_thresh': 0.3,
'dmprimg_size': 640,
'mixFunction': {
'function': dmpr_yolo,
'pars': {'carCls': 0, 'illCls': 3}
}
},
'Segweights': '../AIlib2/weights/cityMangement2/dmpr_%s.engine' % gpuName,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"ovlap_thres_crossCategory": 0.6,
"classes": 2,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 40,
'txtFontSize': 20,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
'waterLineWidth': 2
}
})

@@ -600,7 +611,7 @@ class ModelType2(Enum):
'RGB_convert_first': True,
'mixFunction': {
'function': mixNoParking_road_postprocess,
'pars': {'modelSize': (640, 360), 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5,
'pars': {'modelSize': (640, 360), 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5,
'RoadArea': 16000}
}
},
@@ -666,6 +677,7 @@ BAIDU_MODEL_TARGET_CONFIG2 = {

EPIDEMIC_PREVENTION_CONFIG = {1: "行程码", 2: "健康码"}


# 模型分析方式
@unique
class ModelMethodTypeEnum2(Enum):

+ 40
- 0
start.sh Просмотреть файл

@@ -0,0 +1,40 @@
#!/bin/bash
current_dir=$(cd "$(dirname "$0")"; pwd)
active=$(basename "$(dirname "$current_dir")")
conda_env="alg"
echo "当前程序所在目录: $current_dir, 当前程序启动环境: $active"
if [[ "a${active}" != "adev" && "a${active}" != "atest" && "a${active}" != "aprod" ]]; then
echo "###############################################################";
echo "启动失败, 当前环境只支持dev、test、prod";
echo "环境是根据程序所在目录自动匹配的, 请检测程序路径配置是否正确!";
echo "###############################################################";
exit 1
fi
cd $current_dir
pid=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'`
if [ -n "$pid" ]; then
echo "alg进程已存在, 进程id: $pid"
kill -9 ${pid};
echo "杀掉当前alg进程, 进程号:$pid"
sleep 1
pid_1=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'`
if [ -n "$pid_1" ]; then
echo "###############################################################";
echo "杀掉alg进程失败!"
echo "###############################################################";
exit 1
else
echo "杀掉alg进程成功!!"
fi
fi
nohup /home/th/anaconda3/envs/${conda_env}/bin/python3.8 dsp_master.py ${active} > /dev/null 2>&1 &
sleep 1
pid_end=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'`
if [ -n "$pid_end" ]; then
echo "alg启动成功, $pid_end"
else
echo "###############################################################";
echo "alg启动失败!!!!!!!!!!!!!!!!!!!!!!"
echo "###############################################################";
exit 1
fi

+ 19
- 0
stop.sh Просмотреть файл

@@ -0,0 +1,19 @@
#!/bin/bash
conda_env="alg"
pid=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'`
if [ -n "$pid" ]; then
kill -9 ${pid};
echo "杀掉当前alg进程, 进程号:$pid"
fi
sleep 1
pid_end=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'`
if [ -n "$pid_end" ]; then
echo "###############################################################";
echo "alg停止失败!!!!!, $pid_end"
echo "###############################################################";
exit 1
else
echo "###############################################################";
echo "alg停止成功!!!!!!!!!!!!!!!!!!!!!!"
echo "###############################################################";
fi

+ 9
- 6
util/ModelUtils.py Просмотреть файл

@@ -18,7 +18,7 @@ from util.TorchUtils import select_device

sys.path.extend(['..', '../AIlib2'])
from AI import AI_process, AI_process_forest, get_postProcess_para, ocr_process
from stdc import stdcModel
from segutils.segmodel import SegModel
from models.experimental import attempt_load
from obbUtils.shipUtils import OBB_infer
@@ -26,7 +26,7 @@ from obbUtils.load_obb_model import load_model_decoder_OBB
import torch
import tensorrt as trt
from utilsK.jkmUtils import pre_process, post_process, get_return_data
from DMPR import DMPRModel
FONT_PATH = "../AIlib2/conf/platech.ttf"


@@ -48,10 +48,13 @@ class OneModel:
Detweights = par['Detweights']
with open(Detweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime:
model = runtime.deserialize_cuda_engine(f.read())
par['segPar']['seg_nclass'] = par['seg_nclass']
Segweights = par['Segweights']
if Segweights:
with open(Segweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime:
segmodel = runtime.deserialize_cuda_engine(f.read())
if modeType.value[3] == 'cityMangement2':
segmodel = DMPRModel(weights=Segweights, par=par['segPar'])
else:
segmodel = stdcModel(weights=Segweights, par=par['segPar'])
else:
segmodel = None
objectPar = {
@@ -444,10 +447,10 @@ MODEL_CONFIG = {
lambda x: model_process(x)),
# 城管模型
ModelType.CITY_MANGEMENT_MODEL.value[1]: (
lambda x, y, r, t, z, h: TwoModel(x, y, r, ModelType.CITY_MANGEMENT_MODEL, t, z, h),
lambda x, y, r, t, z, h: OneModel(x, y, r, ModelType.CITY_MANGEMENT_MODEL, t, z, h),
ModelType.CITY_MANGEMENT_MODEL,
lambda x, y, z: one_label(x, y, z),
lambda x: forest_process(x)
lambda x: model_process(x)
),
# 人员落水模型
ModelType.DROWING_MODEL.value[1]: (

+ 6
- 2
util/ModelUtils2.py Просмотреть файл

@@ -20,11 +20,13 @@ import tensorrt as trt

sys.path.extend(['..', '../AIlib2'])
from AI import AI_process, get_postProcess_para, get_postProcess_para_dic, AI_det_track, AI_det_track_batch
from stdc import stdcModel
from utilsK.jkmUtils import pre_process, post_process, get_return_data
from obbUtils.shipUtils import OBB_infer, OBB_tracker, draw_obb, OBB_tracker_batch
from obbUtils.load_obb_model import load_model_decoder_OBB
from trackUtils.sort import Sort
from trackUtils.sort_obb import OBB_Sort
from DMPR import DMPRModel

FONT_PATH = "../AIlib2/conf/platech.ttf"

@@ -43,8 +45,10 @@ class Model:
model = runtime.deserialize_cuda_engine(f.read())
Segweights = par['Segweights']
if Segweights:
with open(Segweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime:
segmodel = runtime.deserialize_cuda_engine(f.read())
if modeType.value[3] == 'cityMangement2':
segmodel = DMPRModel(weights=par['Segweights'], par = par['segPar'])
else:
segmodel = stdcModel(weights=par['Segweights'], par = par['segPar'])
else:
segmodel = None
trackPar = par['trackPar']

Загрузка…
Отмена
Сохранить