Browse Source

上传

tags/V2.8.3^2^2
chenyukun 1 year ago
parent
commit
79546134f9
13 changed files with 510 additions and 294 deletions
  1. +96
    -25
      concurrency/FileUploadThread.py
  2. +0
    -1
      concurrency/IntelligentRecognitionProcess.py
  3. +145
    -101
      concurrency/PushVideoStreamProcess.py
  4. +142
    -126
      concurrency/PushVideoStreamProcess2.py
  5. +2
    -2
      config/service/dsp_dev_service.yml
  6. +3
    -4
      config/service/dsp_prod_service.yml
  7. +3
    -4
      config/service/dsp_test_service.yml
  8. +21
    -11
      enums/ModelTypeEnum.py
  9. +24
    -12
      enums/ModelTypeEnum2.py
  10. +40
    -0
      start.sh
  11. +19
    -0
      stop.sh
  12. +9
    -6
      util/ModelUtils.py
  13. +6
    -2
      util/ModelUtils2.py

+ 96
- 25
concurrency/FileUploadThread.py View File



@staticmethod @staticmethod
def handle_image(frame_msg, frame_step): def handle_image(frame_msg, frame_step):
det_xywh, frame, current_frame, all_frames, font_config = frame_msg
# (high_score_image["code"], all_frames, draw_config["font_config"])
# high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
det_result, all_frames, font_config = frame_msg
''' '''
det_xywh:{ det_xywh:{
'code':{ 'code':{
''' '''
model_info = [] model_info = []
# 更加模型编码解析数据 # 更加模型编码解析数据
for code, det_list in det_xywh.items():
for code, det_list in det_result.items():
if len(det_list) > 0: if len(det_list) > 0:
for cls, target_list in det_list.items(): for cls, target_list in det_list.items():
if len(target_list) > 0:
frame, iframe, cls_list = target_list
if len(cls_list) > 0:
aFrame = frame.copy() aFrame = frame.copy()
for target in target_list:
for target in cls_list:
# detect_targets_code, box, score, label_array, color # detect_targets_code, box, score, label_array, color
draw_painting_joint(target[1], aFrame, target[3], target[2], target[4], font_config) draw_painting_joint(target[1], aFrame, target[3], target[2], target[4], font_config)
model_info.append({"modelCode": str(code), "detectTargetCode": str(cls), "aFrame": aFrame})
model_info.append({
"or_frame": frame,
"modelCode": str(code),
"detectTargetCode": str(cls),
"aFrame": aFrame,
"current_frame": iframe,
"last_frame": iframe + frame_step
})
if len(model_info) > 0: if len(model_info) > 0:
image_result = {
"or_frame": frame,
"model_info": model_info,
"current_frame": current_frame,
"last_frame": current_frame + frame_step
}
return image_result
return model_info
return None return None


# @staticmethod
# def handle_image(frame_msg, frame_step):
# # (high_score_image["code"], all_frames, draw_config["font_config"])
# # high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
# det_xywh, frame, current_frame, all_frames, font_config = frame_msg
# '''
# det_xywh:{
# 'code':{
# 1: [[detect_targets_code, box, score, label_array, color]]
# }
# }
# 模型编号:modeCode
# 检测目标:detectTargetCode
# '''
# model_info = []
# # 更加模型编码解析数据
# for code, det_list in det_xywh.items():
# if len(det_list) > 0:
# for cls, target_list in det_list.items():
# if len(target_list) > 0:
# aFrame = frame.copy()
# for target in target_list:
# # detect_targets_code, box, score, label_array, color
# draw_painting_joint(target[1], aFrame, target[3], target[2], target[4], font_config)
# model_info.append({"modelCode": str(code), "detectTargetCode": str(cls), "aFrame": aFrame})
# if len(model_info) > 0:
# image_result = {
# "or_frame": frame,
# "model_info": model_info,
# "current_frame": current_frame,
# "last_frame": current_frame + frame_step
# }
# return image_result
# return None

def run(self): def run(self):
msg, context = self._msg, self._context msg, context = self._msg, self._context
service = context["service"] service = context["service"]
if image_msg[0] == 1: if image_msg[0] == 1:
image_result = self.handle_image(image_msg[1], frame_step) image_result = self.handle_image(image_msg[1], frame_step)
if image_result is not None: if image_result is not None:
task = []
or_image = cv2.imencode(".jpg", image_result["or_frame"])[1]
or_image_name = build_image_name(image_result["current_frame"],
image_result["last_frame"],
analyse_type,
"OR", "0", "0", request_id)
or_future = t.submit(aliyunOssSdk.put_object, or_image_name, or_image.tobytes())
task.append(or_future)
model_info_list = image_result["model_info"]
msg_list = []
for model_info in model_info_list:
task, msg_list = [], []
for model_info in image_result:
or_image = cv2.imencode(".jpg", model_info["or_frame"])[1]
or_image_name = build_image_name(model_info["current_frame"],
model_info["last_frame"],
analyse_type,
"OR", "0", "0", request_id)
or_future = t.submit(aliyunOssSdk.put_object, or_image_name, or_image.tobytes())
task.append(or_future)
ai_image = cv2.imencode(".jpg", model_info["aFrame"])[1] ai_image = cv2.imencode(".jpg", model_info["aFrame"])[1]
ai_image_name = build_image_name(image_result["current_frame"],
image_result["last_frame"],
ai_image_name = build_image_name(model_info["current_frame"],
model_info["last_frame"],
analyse_type, analyse_type,
"AI", "AI",
model_info["modelCode"], model_info["modelCode"],
for msg in msg_list: for msg in msg_list:
put_queue(fb_queue, msg, timeout=2, is_ex=False) put_queue(fb_queue, msg, timeout=2, is_ex=False)
del task, msg_list del task, msg_list
# task = []
# or_image = cv2.imencode(".jpg", image_result["or_frame"])[1]
# or_image_name = build_image_name(image_result["current_frame"],
# image_result["last_frame"],
# analyse_type,
# "OR", "0", "0", request_id)
# or_future = t.submit(aliyunOssSdk.put_object, or_image_name, or_image.tobytes())
# task.append(or_future)
# model_info_list = image_result["model_info"]
# msg_list = []
# for model_info in model_info_list:
# ai_image = cv2.imencode(".jpg", model_info["aFrame"])[1]
# ai_image_name = build_image_name(image_result["current_frame"],
# image_result["last_frame"],
# analyse_type,
# "AI",
# model_info["modelCode"],
# model_info["detectTargetCode"],
# request_id)
# ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name,
# ai_image.tobytes())
# task.append(ai_future)
# msg_list.append(message_feedback(request_id,
# AnalysisStatus.RUNNING.value,
# analyse_type, "", "", "",
# or_image_name,
# ai_image_name,
# model_info['modelCode'],
# model_info['detectTargetCode']))
# for tk in task:
# tk.result()
# for msg in msg_list:
# put_queue(fb_queue, msg, timeout=2, is_ex=False)
# del task, msg_list
else: else:
sleep(1) sleep(1)
del image_msg del image_msg

+ 0
- 1
concurrency/IntelligentRecognitionProcess.py View File

import os import os
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from os.path import join, exists, getsize from os.path import join, exists, getsize
from profile import Profile
from time import time, sleep from time import time, sleep
from traceback import format_exc from traceback import format_exc



+ 145
- 101
concurrency/PushVideoStreamProcess.py View File

for i, frame in enumerate(frame_list): for i, frame in enumerate(frame_list):
# 复制帧用来画图 # 复制帧用来画图
copy_frame = frame.copy() copy_frame = frame.copy()
det_xywh, code_list, thread_p = {}, {}, []
det_xywh, thread_p = {}, []
for det in push_objs[i]: for det in push_objs[i]:
rr = t.submit(self.handle_image, det_xywh, det, frame_score, copy_frame,
draw_config, code_list)
thread_p.append(rr)
code, det_result = det
# 每个单独模型处理
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
if logo: if logo:
frame = add_water_pic(frame, logo, request_id) frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id) copy_frame = add_water_pic(copy_frame, logo, request_id)
ai_video_file, ai_write_status, request_id) ai_video_file, ai_write_status, request_id)
push_stream_result = t.submit(push_video_stream, frame_merge, push_p, push_url, push_stream_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status, request_id) p_push_status, request_id)
# 指定多少帧上传一次问题
if high_score_image.get("current_frame") is None:
high_score_image["current_frame"] = frame_index_list[i]
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
# 指定帧内处理逻辑
if frame_step >= diff_frame_num and high_score_image.get("code") is not None and len(det_xywh) > 0:
# 所有的模型编号
cache_codes = set(high_score_image["code"].keys())
# 遍历当前模型
for det_code, det_clss in det_xywh.items():
# 如果模型编号不在缓存中
if det_code not in cache_codes:
high_score_image["code"][det_code] = {}
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
# 如果模型编号在缓存中, 检查检测目标的情况
cache_clss = set(high_score_image["code"][det_code].keys())
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
if cls not in cache_clss:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
if len(det_xywh[det_code][cls]) > len(high_score_image["code"][det_code][cls][2]):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
elif len(det_xywh[det_code][cls]) == len(high_score_image["code"][det_code][cls][2]):
# [cls, box, score, label_array, color]
hs = 0
for ii, s in enumerate(cls_list):
if s[2] >= high_score_image["code"][det_code][cls][2][ii][2]:
hs += 1
if hs == len(cls_list):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
if diff_frame_num > frame_step:
if high_score_image.get("code") is not None:
high_score_image["or_frame"] = frame
put_queue(image_queue, (1, [high_score_image["code"], all_frames, draw_config["font_config"]]))
high_score_image["code"] = None
high_score_image["current_frame"] = None
# 如果有问题, 走下面的逻辑
if len(det_xywh) > 0: if len(det_xywh) > 0:
flag = True flag = True
if len(high_score_image) > 0:
# 检查当前帧和上一帧的间距是多少, 如果小于指定间距, 不处理
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
if diff_frame_num < frame_step:
flag = False
det_codes = set(det_xywh.keys())
cache_codes = set(high_score_image["code"].keys())
# 如果是一样的模型
if det_codes == cache_codes:
for code in cache_codes:
det_clss = set(det_xywh[code].keys())
cache_clss = set(high_score_image["code"][code].keys())
# 如果检测目标的数量大于缓存的检测目标数量
if det_clss > cache_clss:
flag = True
break
elif det_clss.isdisjoint(cache_clss):
flag = True
break
# 如果检测目标的数量相等,判断检测目标识别的数量谁比较多
elif det_clss == cache_clss:
for cls in cache_clss:
# 如果检测目标的识别的数量大于缓存中的数量
if len(det_xywh[code][cls]) > \
high_score_image["code"][code][cls]:
flag = True
break
if flag:
break
# 如果现在的检测结果模型的结果多余上一次问题的模型数量,判断为不同问题, 需要上传图片
elif det_codes > cache_codes:
flag = True
# 如果检测的模型不一样
elif det_codes.isdisjoint(cache_codes):
flag = True
else:
high_score_image = {}
# 检查图片和上一张问题图片相似度是多少, 相似度高不处理
if picture_similarity and len(high_score_image) > 0:
# 比较上一次识别到问题的帧和当前问题帧的相似度
if picture_similarity and high_score_image.get("or_frame") is not None:
hash1 = ImageUtils.dHash(high_score_image["or_frame"]) hash1 = ImageUtils.dHash(high_score_image["or_frame"])
hash2 = ImageUtils.dHash(frame) hash2 = ImageUtils.dHash(frame)
dist = ImageUtils.Hamming_distance(hash1, hash2) dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity_1 = 1 - dist * 1.0 / 64 similarity_1 = 1 - dist * 1.0 / 64
if similarity_1 >= similarity: if similarity_1 >= similarity:
flag = False flag = False
if flag:
high_score_image["or_frame"] = frame
high_score_image["current_frame"] = frame_index_list[i]
high_score_image["code"] = code_list
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
draw_config["font_config"])), timeout=2)
# 如果缓存问题是空的,给缓存添加问题
if flag and high_score_image.get("code") is None:
high_score_image["code"] = {}
for code, det_list in det_xywh.items():
if high_score_image["code"].get(code) is None:
high_score_image["code"][code] = {}
for cls, cls_list in det_list.items():
high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
push_p = push_stream_result.result(timeout=5) push_p = push_stream_result.result(timeout=5)
ai_video_file = write_ai_video_result.result(timeout=5) ai_video_file = write_ai_video_result.result(timeout=5)
or_video_file = write_or_video_result.result(timeout=5) or_video_file = write_or_video_result.result(timeout=5)
# 复制帧用来画图 # 复制帧用来画图
copy_frame = frame.copy() copy_frame = frame.copy()
# 所有问题记录字典 # 所有问题记录字典
det_xywh = {}
code_list = {}
# 每帧可能存在多模型,多模型问题处理
thread_p = []
det_xywh, thread_p = {}, []
for det in push_objs[i]: for det in push_objs[i]:
rr = t.submit(self.handle_image, det_xywh, det, frame_score, copy_frame,
draw_config, code_list)
thread_p.append(rr)
code, det_result = det
# 每个单独模型处理
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
if logo: if logo:
frame = add_water_pic(frame, logo, request_id) frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id) copy_frame = add_water_pic(copy_frame, logo, request_id)
ai_write_status, request_id) ai_write_status, request_id)
push_stream_result = t.submit(push_video_stream, frame_merge, push_p, push_url, push_stream_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status, request_id) p_push_status, request_id)
# 指定多少帧上传一次问题
if high_score_image.get("current_frame") is None:
high_score_image["current_frame"] = frame_index_list[i]
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
# 指定帧内处理逻辑
if frame_step >= diff_frame_num and high_score_image.get("code") is not None and len(det_xywh) > 0:
# 所有的模型编号
cache_codes = set(high_score_image["code"].keys())
# 遍历当前模型
for det_code, det_clss in det_xywh.items():
# 如果模型编号不在缓存中
if det_code not in cache_codes:
high_score_image["code"][det_code] = {}
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
# 如果模型编号在缓存中, 检查检测目标的情况
cache_clss = set(high_score_image["code"][det_code].keys())
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
# 如果检测目标在缓存中
if cls not in cache_clss:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
if len(det_xywh[det_code][cls]) > len(high_score_image["code"][det_code][cls][2]):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
elif len(det_xywh[det_code][cls]) == len(high_score_image["code"][det_code][cls][2]):
# [cls, box, score, label_array, color]
hs = 0
for ii, s in enumerate(cls_list):
if s[2] >= high_score_image["code"][det_code][cls][2][ii][2]:
hs += 1
if hs == len(cls_list):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
if diff_frame_num > frame_step:
if high_score_image.get("code") is not None:
high_score_image["or_frame"] = frame
put_queue(image_queue, (1, [high_score_image["code"], all_frames, draw_config["font_config"]]))
high_score_image["code"] = None
high_score_image["current_frame"] = None
# 如果有问题, 走下面的逻辑
if len(det_xywh) > 0: if len(det_xywh) > 0:
flag = True flag = True
if len(high_score_image) > 0:
# 检查当前帧和上一帧的间距是多少, 如果小于指定间距, 不处理
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
if diff_frame_num < frame_step:
flag = False
det_codes = set(det_xywh.keys())
cache_codes = set(high_score_image["code"].keys())
# 如果是一样的模型
if det_codes == cache_codes:
for code in cache_codes:
det_clss = set(det_xywh[code].keys())
cache_clss = set(high_score_image["code"][code].keys())
# 如果检测目标的数量大于缓存的检测目标数量
if det_clss > cache_clss:
flag = True
break
elif det_clss.isdisjoint(cache_clss):
flag = True
break
# 如果检测目标的数量相等,判断检测目标识别的数量谁比较多
elif det_clss == cache_clss:
for cls in cache_clss:
# 如果检测目标的识别的数量大于缓存中的数量
if len(det_xywh[code][cls]) > \
high_score_image["code"][code][cls]:
flag = True
break
if flag:
break
# 如果现在的检测结果模型的结果多余上一次问题的模型数量,判断为不同问题, 需要上传图片
elif det_codes > cache_codes:
flag = True
# 如果检测的模型不一样
elif det_codes.isdisjoint(cache_codes):
flag = True
else:
high_score_image = {}
# 检查图片和上一张问题图片相似度是多少, 相似度高不处理
if picture_similarity and len(high_score_image) > 0:
# 比较上一次识别到问题的帧和当前问题帧的相似度
if picture_similarity and high_score_image.get("or_frame") is not None:
hash1 = ImageUtils.dHash(high_score_image["or_frame"]) hash1 = ImageUtils.dHash(high_score_image["or_frame"])
hash2 = ImageUtils.dHash(frame) hash2 = ImageUtils.dHash(frame)
dist = ImageUtils.Hamming_distance(hash1, hash2) dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity_1 = 1 - dist * 1.0 / 64 similarity_1 = 1 - dist * 1.0 / 64
if similarity_1 >= similarity: if similarity_1 >= similarity:
flag = False flag = False
if flag:
high_score_image["or_frame"] = frame
high_score_image["current_frame"] = frame_index_list[i]
high_score_image["code"] = code_list
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
draw_config["font_config"])), timeout=2)
# 如果缓存问题是空的,给缓存添加问题
if flag and high_score_image.get("code") is None:
high_score_image["code"] = {}
for code, det_list in det_xywh.items():
if high_score_image["code"].get(code) is None:
high_score_image["code"][code] = {}
for cls, cls_list in det_list.items():
high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
push_p = push_stream_result.result(timeout=5) push_p = push_stream_result.result(timeout=5)
ai_video_file = write_ai_video_result.result(timeout=5) ai_video_file = write_ai_video_result.result(timeout=5)
# 接收停止指令 # 接收停止指令

+ 142
- 126
concurrency/PushVideoStreamProcess2.py View File

logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1) logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)
self._context["logo"] = logo self._context["logo"] = logo


@staticmethod
def handle_image(det_xywh, det, frame_score, copy_frame, draw_config, code_list):
code, det_result = det
# 每个单独模型处理
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
# box, img, label_array, score=0.5, color=None, config=None
draw_painting_joint(box, copy_frame, label_array, score, color, font_config)
if det_xywh.get(code) is None:
det_xywh[code], code_list[code] = {}, {}
cd = det_xywh[code].get(cls)
if cd is None:
code_list[code][cls] = 1
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
code_list[code][cls] += 1
det_xywh[code][cls].append([cls, box, score, label_array, color])



class OnPushStreamProcess2(PushStreamProcess2): class OnPushStreamProcess2(PushStreamProcess2):
__slots__ = () __slots__ = ()
# 复制帧用来画图 # 复制帧用来画图
copy_frame = frame.copy() copy_frame = frame.copy()
# 所有问题记录字典 # 所有问题记录字典
det_xywh, code_list, thread_p = {}, {}, []
det_xywh, thread_p = {}, []
# [模型1识别数组, 模型2识别数组, 模型3识别数组] # [模型1识别数组, 模型2识别数组, 模型3识别数组]
for s_det_list in push_objs: for s_det_list in push_objs:
code, det_list = s_det_list
rr = t.submit(self.handle_image, det_xywh, (code, det_list[i]), frame_score,
copy_frame,
draw_config, code_list)
thread_p.append(rr)
code, det_result = s_det_list[0], s_det_list[1][i]
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
if logo: if logo:
frame = add_water_pic(frame, logo, request_id) frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id) copy_frame = add_water_pic(copy_frame, logo, request_id)
push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url, push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status, p_push_status,
request_id) request_id)
# 指定多少帧上传一次问题
if high_score_image.get("current_frame") is None:
high_score_image["current_frame"] = frame_index_list[i]
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
# 指定帧内处理逻辑
if frame_step >= diff_frame_num and high_score_image.get("code") is not None and len(det_xywh) > 0:
# 所有的模型编号
cache_codes = set(high_score_image["code"].keys())
# 遍历当前模型
for det_code, det_clss in det_xywh.items():
# 如果模型编号不在缓存中
if det_code not in cache_codes:
high_score_image["code"][det_code] = {}
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
# 如果模型编号在缓存中, 检查检测目标的情况
cache_clss = set(high_score_image["code"][det_code].keys())
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
# 如果检测目标在缓存中
if cls not in cache_clss:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
if len(det_xywh[det_code][cls]) > len(high_score_image["code"][det_code][cls][2]):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
elif len(det_xywh[det_code][cls]) == len(high_score_image["code"][det_code][cls][2]):
# [cls, box, score, label_array, color]
hs = 0
for ii, s in enumerate(cls_list):
if s[2] >= high_score_image["code"][det_code][cls][2][ii][2]:
hs += 1
if hs == len(cls_list):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
if diff_frame_num > frame_step:
if high_score_image.get("code") is not None:
high_score_image["or_frame"] = frame
put_queue(image_queue, (1, [high_score_image["code"], all_frames, draw_config["font_config"]]))
high_score_image["code"] = None
high_score_image["current_frame"] = None
# 如果有问题, 走下面的逻辑
if len(det_xywh) > 0: if len(det_xywh) > 0:
flag = True flag = True
if len(high_score_image) > 0:
# 检查当前帧和上一帧的间距是多少, 如果小于指定间距, 不处理
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
if diff_frame_num < frame_step:
flag = False
det_codes = set(det_xywh.keys())
cache_codes = set(high_score_image["code"].keys())
# 如果是一样的模型
if det_codes == cache_codes:
for code in cache_codes:
det_clss = set(det_xywh[code].keys())
cache_clss = set(high_score_image["code"][code].keys())
# 如果检测目标的数量大于缓存的检测目标数量
if det_clss > cache_clss:
flag = True
break
elif det_clss.isdisjoint(cache_clss):
flag = True
break
# 如果检测目标的数量相等,判断检测目标识别的数量谁比较多
elif det_clss == cache_clss:
for cls in cache_clss:
# 如果检测目标的识别的数量大于缓存中的数量
if len(det_xywh[code][cls]) > \
high_score_image["code"][code][cls]:
flag = True
break
if flag:
break
# 如果现在的检测结果模型的结果多余上一次问题的模型数量,判断为不同问题, 需要上传图片
elif det_codes > cache_codes:
flag = True
# 如果检测的模型不一样
elif det_codes.isdisjoint(cache_codes):
flag = True
else:
high_score_image = {}
# 检查图片和上一张问题图片相似度是多少, 相似度高不处理
if picture_similarity and len(high_score_image) > 0:
# 比较上一次识别到问题的帧和当前问题帧的相似度
if picture_similarity and high_score_image.get("or_frame") is not None:
hash1 = ImageUtils.dHash(high_score_image["or_frame"]) hash1 = ImageUtils.dHash(high_score_image["or_frame"])
hash2 = ImageUtils.dHash(frame) hash2 = ImageUtils.dHash(frame)
dist = ImageUtils.Hamming_distance(hash1, hash2) dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity_1 = 1 - dist * 1.0 / 64 similarity_1 = 1 - dist * 1.0 / 64
if similarity_1 >= similarity: if similarity_1 >= similarity:
flag = False flag = False
if flag:
high_score_image["or_frame"] = frame
high_score_image["current_frame"] = frame_index_list[i]
high_score_image["code"] = code_list
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
draw_config["font_config"])))
# 如果缓存问题是空的,给缓存添加问题
if flag and high_score_image.get("code") is None:
high_score_image["code"] = {}
for code, det_list in det_xywh.items():
if high_score_image["code"].get(code) is None:
high_score_image["code"][code] = {}
for cls, cls_list in det_list.items():
high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
push_p = push_p_result.result(timeout=5) push_p = push_p_result.result(timeout=5)
ai_video_file = write_ai_video_result.result(timeout=5) ai_video_file = write_ai_video_result.result(timeout=5)
or_video_file = write_or_video_result.result(timeout=5) or_video_file = write_or_video_result.result(timeout=5)
# 复制帧用来画图 # 复制帧用来画图
copy_frame = frame.copy() copy_frame = frame.copy()
# 所有问题记录字典 # 所有问题记录字典
det_xywh, code_list, thread_p = {}, {}, []
det_xywh, thread_p = {}, []
for s_det_list in push_objs: for s_det_list in push_objs:
code, det_list = s_det_list
rr = t.submit(self.handle_image, det_xywh, (code, det_list[i]), frame_score,
copy_frame, draw_config, code_list)
thread_p.append(rr)
code, det_result = s_det_list[0], s_det_list[1][i]
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
if logo: if logo:
frame = add_water_pic(frame, logo, request_id) frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id) copy_frame = add_water_pic(copy_frame, logo, request_id)
push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url, push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status, p_push_status,
request_id) request_id)
# 指定多少帧上传一次问题
if high_score_image.get("current_frame") is None:
high_score_image["current_frame"] = frame_index_list[i]
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
# 指定帧内处理逻辑
if frame_step >= diff_frame_num and high_score_image.get("code") is not None and len(det_xywh) > 0:
# 所有的模型编号
cache_codes = set(high_score_image["code"].keys())
# 遍历当前模型
for det_code, det_clss in det_xywh.items():
# 如果模型编号不在缓存中
if det_code not in cache_codes:
high_score_image["code"][det_code] = {}
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
# 如果模型编号在缓存中, 检查检测目标的情况
cache_clss = set(high_score_image["code"][det_code].keys())
for cls, cls_list in det_xywh[det_code].items():
if len(cls_list) > 0:
# 如果检测目标在缓存中
if cls not in cache_clss:
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
else:
if len(det_xywh[det_code][cls]) > len(high_score_image["code"][det_code][cls][2]):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
elif len(det_xywh[det_code][cls]) == len(high_score_image["code"][det_code][cls][2]):
# [cls, box, score, label_array, color]
hs = 0
for ii, s in enumerate(cls_list):
if s[2] >= high_score_image["code"][det_code][cls][2][ii][2]:
hs += 1
if hs == len(cls_list):
high_score_image["code"][det_code][cls] = (frame, frame_index_list[i], cls_list)
if diff_frame_num > frame_step:
if high_score_image.get("code") is not None:
high_score_image["or_frame"] = frame
put_queue(image_queue, (1, [high_score_image["code"], all_frames, draw_config["font_config"]]))
high_score_image["code"] = None
high_score_image["current_frame"] = None
# 如果有问题, 走下面的逻辑
if len(det_xywh) > 0: if len(det_xywh) > 0:
flag = True flag = True
if len(high_score_image) > 0:
# 检查当前帧和上一帧的间距是多少, 如果小于指定间距, 不处理
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
if diff_frame_num < frame_step:
flag = False
det_codes = set(det_xywh.keys())
cache_codes = set(high_score_image["code"].keys())
# 如果是一样的模型
if det_codes == cache_codes:
for code in cache_codes:
det_clss = set(det_xywh[code].keys())
cache_clss = set(high_score_image["code"][code].keys())
# 如果检测目标的数量大于缓存的检测目标数量
if det_clss > cache_clss:
flag = True
break
elif det_clss.isdisjoint(cache_clss):
flag = True
break
# 如果检测目标的数量相等,判断检测目标识别的数量谁比较多
elif det_clss == cache_clss:
for cls in cache_clss:
# 如果检测目标的识别的数量大于缓存中的数量
if len(det_xywh[code][cls]) > \
high_score_image["code"][code][cls]:
flag = True
break
if flag:
break
# 如果现在的检测结果模型的结果多余上一次问题的模型数量,判断为不同问题, 需要上传图片
elif det_codes > cache_codes:
flag = True
# 如果检测的模型不一样
elif det_codes.isdisjoint(cache_codes):
flag = True
else:
high_score_image = {}
# 检查图片和上一张问题图片相似度是多少, 相似度高不处理
if picture_similarity and len(high_score_image) > 0:
# 比较上一次识别到问题的帧和当前问题帧的相似度
if picture_similarity and high_score_image.get("or_frame") is not None:
hash1 = ImageUtils.dHash(high_score_image["or_frame"]) hash1 = ImageUtils.dHash(high_score_image["or_frame"])
hash2 = ImageUtils.dHash(frame) hash2 = ImageUtils.dHash(frame)
dist = ImageUtils.Hamming_distance(hash1, hash2) dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity_1 = 1 - dist * 1.0 / 64 similarity_1 = 1 - dist * 1.0 / 64
if similarity_1 >= similarity: if similarity_1 >= similarity:
flag = False flag = False
if flag:
high_score_image["or_frame"] = frame
high_score_image["current_frame"] = frame_index_list[i]
high_score_image["code"] = code_list
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
draw_config["font_config"])))
# 如果缓存问题是空的,给缓存添加问题
if flag and high_score_image.get("code") is None:
high_score_image["code"] = {}
for code, det_list in det_xywh.items():
if high_score_image["code"].get(code) is None:
high_score_image["code"][code] = {}
for cls, cls_list in det_list.items():
high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
push_p = push_p_result.result(timeout=5) push_p = push_p_result.result(timeout=5)
ai_video_file = write_ai_video_result.result(timeout=5) ai_video_file = write_ai_video_result.result(timeout=5)
# 接收停止指令 # 接收停止指令

+ 2
- 2
config/service/dsp_dev_service.yml View File

frame_score: 0.4 frame_score: 0.4
# 图片相似度过滤 # 图片相似度过滤
picture_similarity: true picture_similarity: true
similarity: 1
frame_step: 160
similarity: 0.65
frame_step: 300
timeout: 21600 timeout: 21600
cv2_pull_stream_timeout: 1000 cv2_pull_stream_timeout: 1000
cv2_read_stream_timeout: 1000 cv2_read_stream_timeout: 1000

+ 3
- 4
config/service/dsp_prod_service.yml View File

# 是否添加水印 # 是否添加水印
video_add_water: false video_add_water: false
service: service:

filter: filter:
# 图片得分多少分以上返回图片 # 图片得分多少分以上返回图片
frame_score: 0.4 frame_score: 0.4
# 图片相似度过滤 # 图片相似度过滤
picture_similarity: true picture_similarity: true
similarity: 1
frame_step: 160
similarity: 0.65
frame_step: 300
timeout: 21600 timeout: 21600
cv2_pull_stream_timeout: 1000 cv2_pull_stream_timeout: 1000
cv2_read_stream_timeout: 1000 cv2_read_stream_timeout: 1000
# 使用哪种识别方式 # 使用哪种识别方式
# 1 普通方式 # 1 普通方式
# 2 模型追踪 # 2 模型追踪
model_type: 2
model_type: 1
limit: 3 limit: 3
task: task:
# 任务限制5个 # 任务限制5个

+ 3
- 4
config/service/dsp_test_service.yml View File

# 是否添加水印 # 是否添加水印
video_add_water: false video_add_water: false
service: service:

filter: filter:
# 图片得分多少分以上返回图片 # 图片得分多少分以上返回图片
frame_score: 0.4 frame_score: 0.4
# 图片相似度过滤 # 图片相似度过滤
picture_similarity: true picture_similarity: true
similarity: 1
frame_step: 160
similarity: 0.65
frame_step: 300
timeout: 21600 timeout: 21600
cv2_pull_stream_timeout: 1000 cv2_pull_stream_timeout: 1000
cv2_read_stream_timeout: 1000 cv2_read_stream_timeout: 1000
# 使用哪种识别方式 # 使用哪种识别方式
# 1 普通方式 # 1 普通方式
# 2 模型追踪 # 2 模型追踪
model_type: 2
model_type: 1
limit: 3 limit: 3
task: task:
# 任务限制5个 # 任务限制5个

+ 21
- 11
enums/ModelTypeEnum.py View File

from common.Constant import COLOR from common.Constant import COLOR


sys.path.extend(['..', '../AIlib2']) sys.path.extend(['..', '../AIlib2'])
from DMPR import DMPRModel
from DMPRUtils.jointUtil import dmpr_yolo
from segutils.segmodel import SegModel from segutils.segmodel import SegModel
from utilsK.queRiver import riverDetSegMixProcess from utilsK.queRiver import riverDetSegMixProcess
from segutils.trafficUtils import tracfficAccidentMixFunction from segutils.trafficUtils import tracfficAccidentMixFunction
'Segweights': '../AIlib2/weights/river2/stdc_360X640_%s_fp16.engine' % gpuName 'Segweights': '../AIlib2/weights/river2/stdc_360X640_%s_fp16.engine' % gpuName
}) })


CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement', lambda device, gpuName: {
CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement2', lambda device, gpuName: {
'device': device, 'device': device,
'gpu_name': gpuName, 'gpu_name': gpuName,
'labelnames': ["车辆", "垃圾", "商贩"],
'seg_nclass': 2, # 分割模型类别数目,默认2类
'segRegionCnt': 0,
'slopeIndex': [],
'labelnames': ["车辆", "垃圾", "商贩", "违停"],
'seg_nclass': 4, # 分割模型类别数目,默认2类
'segRegionCnt': 2,
'trtFlag_det': True, 'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/cityMangement/yolov5_%s_fp16.engine" % gpuName,
'segPar': None,
'trtFlag_seg': True,
'Detweights': "../AIlib2/weights/cityMangement2/yolov5_%s_fp16.engine" % gpuName,
'segPar': {
'depth_factor': 32,
'NUM_FEATURE_MAP_CHANNEL': 6,
'dmpr_thresh': 0.3,
'dmprimg_size': 640,
'mixFunction': {
'function': dmpr_yolo,
'pars': {'carCls': 0, 'illCls': 3}
}
},
'postFile': { 'postFile': {
"name": "post_process", "name": "post_process",
"conf_thres": 0.25, "conf_thres": 0.25,
"iou_thres": 0.45, "iou_thres": 0.45,
"ovlap_thres_crossCategory": 0.6,
"classes": 2,
"classes": 5,
"rainbows": COLOR "rainbows": COLOR
}, },
'Segweights': None
'Segweights': '../AIlib2/weights/cityMangement2/dmpr_%s.engine' % gpuName
}) })


DROWING_MODEL = ("17", "017", "人员落水模型", 'drowning', lambda device, gpuName: { DROWING_MODEL = ("17", "017", "人员落水模型", 'drowning', lambda device, gpuName: {


EPIDEMIC_PREVENTION_CONFIG = {1: "行程码", 2: "健康码"} EPIDEMIC_PREVENTION_CONFIG = {1: "行程码", 2: "健康码"}



# 模型分析方式 # 模型分析方式
@unique @unique
class ModelMethodTypeEnum(Enum): class ModelMethodTypeEnum(Enum):

+ 24
- 12
enums/ModelTypeEnum2.py View File

from utilsK.drownUtils import mixDrowing_water_postprocess from utilsK.drownUtils import mixDrowing_water_postprocess
from utilsK.noParkingUtils import mixNoParking_road_postprocess from utilsK.noParkingUtils import mixNoParking_road_postprocess
from utilsK.illParkingUtils import illParking_postprocess from utilsK.illParkingUtils import illParking_postprocess
from DMPR import DMPRModel
from DMPRUtils.jointUtil import dmpr_yolo


''' '''
参数说明 参数说明
} }
}) })


CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement', lambda device, gpuName: {
CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement2', lambda device, gpuName: {
'device': device, 'device': device,
'labelnames': ["车辆", "垃圾", "商贩"],
'labelnames': ["车辆", "垃圾", "商贩", "违停"],
'half': True, 'half': True,
'trtFlag_det': True, 'trtFlag_det': True,
'Detweights': "../AIlib2/weights/cityMangement/yolov5_%s_fp16.engine" % gpuName,
'Detweights': "../AIlib2/weights/cityMangement2/yolov5_%s_fp16.engine" % gpuName,
'trackPar': { 'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
}, },
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'seg_nclass': 4,
'segRegionCnt': 2,
'segPar': {
'trtFlag_seg': True,
'depth_factor': 32,
'NUM_FEATURE_MAP_CHANNEL': 6,
'dmpr_thresh': 0.3,
'dmprimg_size': 640,
'mixFunction': {
'function': dmpr_yolo,
'pars': {'carCls': 0, 'illCls': 3}
}
},
'Segweights': '../AIlib2/weights/cityMangement2/dmpr_%s.engine' % gpuName,
'postFile': { 'postFile': {
"name": "post_process", "name": "post_process",
"conf_thres": 0.25, "conf_thres": 0.25,
"iou_thres": 0.45, "iou_thres": 0.45,
"ovlap_thres_crossCategory": 0.6,
"classes": 2,
"classes": 5,
"rainbows": COLOR "rainbows": COLOR
}, },
'txtFontSize': 40,
'txtFontSize': 20,
'digitFont': { 'digitFont': {
'line_thickness': 2, 'line_thickness': 2,
'boxLine_thickness': 1, 'boxLine_thickness': 1,
'fontSize': 1.0, 'fontSize': 1.0,
'segLineShow': False, 'segLineShow': False,
'waterLineColor': (0, 255, 255), 'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
'waterLineWidth': 2
} }
}) })


'RGB_convert_first': True, 'RGB_convert_first': True,
'mixFunction': { 'mixFunction': {
'function': mixNoParking_road_postprocess, 'function': mixNoParking_road_postprocess,
'pars': {'modelSize': (640, 360), 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5,
'pars': {'modelSize': (640, 360), 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5,
'RoadArea': 16000} 'RoadArea': 16000}
} }
}, },


EPIDEMIC_PREVENTION_CONFIG = {1: "行程码", 2: "健康码"} EPIDEMIC_PREVENTION_CONFIG = {1: "行程码", 2: "健康码"}



# 模型分析方式 # 模型分析方式
@unique @unique
class ModelMethodTypeEnum2(Enum): class ModelMethodTypeEnum2(Enum):

+ 40
- 0
start.sh View File

#!/bin/bash
current_dir=$(cd "$(dirname "$0")"; pwd)
active=$(basename "$(dirname "$current_dir")")
conda_env="alg"
echo "当前程序所在目录: $current_dir, 当前程序启动环境: $active"
if [[ "a${active}" != "adev" && "a${active}" != "atest" && "a${active}" != "aprod" ]]; then
echo "###############################################################";
echo "启动失败, 当前环境只支持dev、test、prod";
echo "环境是根据程序所在目录自动匹配的, 请检测程序路径配置是否正确!";
echo "###############################################################";
exit 1
fi
cd $current_dir
pid=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'`
if [ -n "$pid" ]; then
echo "alg进程已存在, 进程id: $pid"
kill -9 ${pid};
echo "杀掉当前alg进程, 进程号:$pid"
sleep 1
pid_1=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'`
if [ -n "$pid_1" ]; then
echo "###############################################################";
echo "杀掉alg进程失败!"
echo "###############################################################";
exit 1
else
echo "杀掉alg进程成功!!"
fi
fi
nohup /home/th/anaconda3/envs/${conda_env}/bin/python3.8 dsp_master.py ${active} > /dev/null 2>&1 &
sleep 1
pid_end=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'`
if [ -n "$pid_end" ]; then
echo "alg启动成功, $pid_end"
else
echo "###############################################################";
echo "alg启动失败!!!!!!!!!!!!!!!!!!!!!!"
echo "###############################################################";
exit 1
fi

+ 19
- 0
stop.sh View File

#!/bin/bash
conda_env="alg"
pid=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'`
if [ -n "$pid" ]; then
kill -9 ${pid};
echo "杀掉当前alg进程, 进程号:$pid"
fi
sleep 1
pid_end=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'`
if [ -n "$pid_end" ]; then
echo "###############################################################";
echo "alg停止失败!!!!!, $pid_end"
echo "###############################################################";
exit 1
else
echo "###############################################################";
echo "alg停止成功!!!!!!!!!!!!!!!!!!!!!!"
echo "###############################################################";
fi

+ 9
- 6
util/ModelUtils.py View File



sys.path.extend(['..', '../AIlib2']) sys.path.extend(['..', '../AIlib2'])
from AI import AI_process, AI_process_forest, get_postProcess_para, ocr_process from AI import AI_process, AI_process_forest, get_postProcess_para, ocr_process
from stdc import stdcModel
from segutils.segmodel import SegModel from segutils.segmodel import SegModel
from models.experimental import attempt_load from models.experimental import attempt_load
from obbUtils.shipUtils import OBB_infer from obbUtils.shipUtils import OBB_infer
import torch import torch
import tensorrt as trt import tensorrt as trt
from utilsK.jkmUtils import pre_process, post_process, get_return_data from utilsK.jkmUtils import pre_process, post_process, get_return_data
from DMPR import DMPRModel
FONT_PATH = "../AIlib2/conf/platech.ttf" FONT_PATH = "../AIlib2/conf/platech.ttf"




Detweights = par['Detweights'] Detweights = par['Detweights']
with open(Detweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime: with open(Detweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime:
model = runtime.deserialize_cuda_engine(f.read()) model = runtime.deserialize_cuda_engine(f.read())
par['segPar']['seg_nclass'] = par['seg_nclass']
Segweights = par['Segweights'] Segweights = par['Segweights']
if Segweights: if Segweights:
with open(Segweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime:
segmodel = runtime.deserialize_cuda_engine(f.read())
if modeType.value[3] == 'cityMangement2':
segmodel = DMPRModel(weights=Segweights, par=par['segPar'])
else:
segmodel = stdcModel(weights=Segweights, par=par['segPar'])
else: else:
segmodel = None segmodel = None
objectPar = { objectPar = {
lambda x: model_process(x)), lambda x: model_process(x)),
# 城管模型 # 城管模型
ModelType.CITY_MANGEMENT_MODEL.value[1]: ( ModelType.CITY_MANGEMENT_MODEL.value[1]: (
lambda x, y, r, t, z, h: TwoModel(x, y, r, ModelType.CITY_MANGEMENT_MODEL, t, z, h),
lambda x, y, r, t, z, h: OneModel(x, y, r, ModelType.CITY_MANGEMENT_MODEL, t, z, h),
ModelType.CITY_MANGEMENT_MODEL, ModelType.CITY_MANGEMENT_MODEL,
lambda x, y, z: one_label(x, y, z), lambda x, y, z: one_label(x, y, z),
lambda x: forest_process(x)
lambda x: model_process(x)
), ),
# 人员落水模型 # 人员落水模型
ModelType.DROWING_MODEL.value[1]: ( ModelType.DROWING_MODEL.value[1]: (

+ 6
- 2
util/ModelUtils2.py View File



sys.path.extend(['..', '../AIlib2']) sys.path.extend(['..', '../AIlib2'])
from AI import AI_process, get_postProcess_para, get_postProcess_para_dic, AI_det_track, AI_det_track_batch from AI import AI_process, get_postProcess_para, get_postProcess_para_dic, AI_det_track, AI_det_track_batch
from stdc import stdcModel
from utilsK.jkmUtils import pre_process, post_process, get_return_data from utilsK.jkmUtils import pre_process, post_process, get_return_data
from obbUtils.shipUtils import OBB_infer, OBB_tracker, draw_obb, OBB_tracker_batch from obbUtils.shipUtils import OBB_infer, OBB_tracker, draw_obb, OBB_tracker_batch
from obbUtils.load_obb_model import load_model_decoder_OBB from obbUtils.load_obb_model import load_model_decoder_OBB
from trackUtils.sort import Sort from trackUtils.sort import Sort
from trackUtils.sort_obb import OBB_Sort from trackUtils.sort_obb import OBB_Sort
from DMPR import DMPRModel


FONT_PATH = "../AIlib2/conf/platech.ttf" FONT_PATH = "../AIlib2/conf/platech.ttf"


model = runtime.deserialize_cuda_engine(f.read()) model = runtime.deserialize_cuda_engine(f.read())
Segweights = par['Segweights'] Segweights = par['Segweights']
if Segweights: if Segweights:
with open(Segweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime:
segmodel = runtime.deserialize_cuda_engine(f.read())
if modeType.value[3] == 'cityMangement2':
segmodel = DMPRModel(weights=par['Segweights'], par = par['segPar'])
else:
segmodel = stdcModel(weights=par['Segweights'], par = par['segPar'])
else: else:
segmodel = None segmodel = None
trackPar = par['trackPar'] trackPar = par['trackPar']

Loading…
Cancel
Save