Przeglądaj źródła

1、新增多个模型 2、解决以往没有新问题单依然上报的bug

develop
YAO 8 miesięcy temu
rodzic
commit
2c4a959c3b
66 zmienionych plików z 827 dodań i 538 usunięć
  1. BIN
      common/__pycache__/Constant.cpython-38.pyc
  2. BIN
      common/__pycache__/YmlConstant.cpython-38.pyc
  3. BIN
      common/__pycache__/__init__.cpython-38.pyc
  4. +20
    -3
      concurrency/IntelligentRecognitionProcess.py
  5. +5
    -2
      concurrency/PullStreamThread.py
  6. +46
    -28
      concurrency/PushVideoStreamProcess.py
  7. +60
    -105
      concurrency/PushVideoStreamProcess2.py
  8. BIN
      concurrency/__pycache__/CommonThread.cpython-38.pyc
  9. BIN
      concurrency/__pycache__/FeedbackThread.cpython-38.pyc
  10. BIN
      concurrency/__pycache__/FileUploadThread.cpython-38.pyc
  11. BIN
      concurrency/__pycache__/HeartbeatThread.cpython-38.pyc
  12. BIN
      concurrency/__pycache__/IntelligentRecognitionProcess.cpython-38.pyc
  13. BIN
      concurrency/__pycache__/IntelligentRecognitionProcess2.cpython-38.pyc
  14. BIN
      concurrency/__pycache__/Pull2PushStreamProcess.cpython-38.pyc
  15. BIN
      concurrency/__pycache__/Pull2PushStreamThread.cpython-38.pyc
  16. BIN
      concurrency/__pycache__/PullStreamThread.cpython-38.pyc
  17. BIN
      concurrency/__pycache__/PullVideoStreamProcess.cpython-38.pyc
  18. BIN
      concurrency/__pycache__/PullVideoStreamProcess2.cpython-38.pyc
  19. BIN
      concurrency/__pycache__/PushVideoStreamProcess.cpython-38.pyc
  20. BIN
      concurrency/__pycache__/PushVideoStreamProcess2.cpython-38.pyc
  21. BIN
      concurrency/__pycache__/RecordingHeartbeatThread.cpython-38.pyc
  22. BIN
      concurrency/__pycache__/__init__.cpython-38.pyc
  23. BIN
      entity/__pycache__/FeedBack.cpython-38.pyc
  24. BIN
      entity/__pycache__/__init__.cpython-38.pyc
  25. +150
    -25
      enums/ModelTypeEnum.py
  26. +337
    -301
      enums/ModelTypeEnum2.py
  27. BIN
      enums/__pycache__/AnalysisStatusEnum.cpython-38.pyc
  28. BIN
      enums/__pycache__/AnalysisTypeEnum.cpython-38.pyc
  29. BIN
      enums/__pycache__/BaiduSdkEnum.cpython-38.pyc
  30. BIN
      enums/__pycache__/ExceptionEnum.cpython-38.pyc
  31. BIN
      enums/__pycache__/ModelTypeEnum.cpython-38.pyc
  32. BIN
      enums/__pycache__/ModelTypeEnum2.cpython-38.pyc
  33. BIN
      enums/__pycache__/RecordingStatusEnum.cpython-38.pyc
  34. BIN
      enums/__pycache__/StatusEnum.cpython-38.pyc
  35. BIN
      enums/__pycache__/__init__.cpython-38.pyc
  36. BIN
      exception/__pycache__/CustomerException.cpython-38.pyc
  37. BIN
      exception/__pycache__/__init__.cpython-38.pyc
  38. +1
    -1
      service/Dispatcher.py
  39. BIN
      service/__pycache__/Dispatcher.cpython-38.pyc
  40. BIN
      service/__pycache__/__init__.cpython-38.pyc
  41. +99
    -39
      util/ModelUtils.py
  42. +26
    -32
      util/ModelUtils2.py
  43. +83
    -2
      util/PlotsUtils.py
  44. BIN
      util/__pycache__/AliyunSdk.cpython-38.pyc
  45. BIN
      util/__pycache__/CpuUtils.cpython-38.pyc
  46. BIN
      util/__pycache__/Cv2Utils.cpython-38.pyc
  47. BIN
      util/__pycache__/FileUtils.cpython-38.pyc
  48. BIN
      util/__pycache__/GPUtils.cpython-38.pyc
  49. BIN
      util/__pycache__/ImageUtils.cpython-38.pyc
  50. BIN
      util/__pycache__/ImgBaiduSdk.cpython-38.pyc
  51. BIN
      util/__pycache__/KafkaUtils.cpython-38.pyc
  52. BIN
      util/__pycache__/LogUtils.cpython-38.pyc
  53. BIN
      util/__pycache__/ModelUtils.cpython-38.pyc
  54. BIN
      util/__pycache__/ModelUtils2.cpython-38.pyc
  55. BIN
      util/__pycache__/OcrBaiduSdk.cpython-38.pyc
  56. BIN
      util/__pycache__/PlotsUtils.cpython-38.pyc
  57. BIN
      util/__pycache__/PushStreamUtils.cpython-38.pyc
  58. BIN
      util/__pycache__/QueUtil.cpython-38.pyc
  59. BIN
      util/__pycache__/RWUtils.cpython-38.pyc
  60. BIN
      util/__pycache__/TimeUtils.cpython-38.pyc
  61. BIN
      util/__pycache__/TorchUtils.cpython-38.pyc
  62. BIN
      util/__pycache__/__init__.cpython-38.pyc
  63. BIN
      vodsdk/__pycache__/AliyunVodUploader.cpython-38.pyc
  64. BIN
      vodsdk/__pycache__/AliyunVodUtils.cpython-38.pyc
  65. BIN
      vodsdk/__pycache__/UploadVideoRequest.cpython-38.pyc
  66. BIN
      vodsdk/__pycache__/__init__.cpython-38.pyc

BIN
common/__pycache__/Constant.cpython-38.pyc Wyświetl plik


BIN
common/__pycache__/YmlConstant.cpython-38.pyc Wyświetl plik


BIN
common/__pycache__/__init__.cpython-38.pyc Wyświetl plik


+ 20
- 3
concurrency/IntelligentRecognitionProcess.py Wyświetl plik

@@ -9,6 +9,8 @@ from traceback import format_exc
import cv2

from multiprocessing import Process, Queue

import numpy as np
from loguru import logger

from common.Constant import init_progess, success_progess
@@ -121,7 +123,13 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
def ai_normal_dtection(model, frame, request_id):
model_conf, code = model
retResults = MODEL_CONFIG[code][3]([model_conf, frame, request_id])[0]
return code, retResults[2]
if type(retResults) is np.ndarray or len(retResults) == 0:
ret = retResults
if type(retResults) is np.ndarray:
ret = retResults.tolist()
else:
ret = retResults[2]
return code, ret

@staticmethod
def obj_det(self, model_array, frame, task_status, cframe, tt, request_id):
@@ -256,6 +264,8 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
draw_config[code]["allowedList"] = model_conf[2]
draw_config[code]["rainbows"] = model_conf[4]
draw_config[code]["label_arrays"] = model_param['label_arraylist']
if "label_dict" in model_param:
draw_config[code]["label_dict"] = model_param['label_dict']
# 多线程并发处理, 经过测试两个线程最优
det_array = []
for i, frame in enumerate(frame_list):
@@ -390,8 +400,13 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
def ai_normal_dtection(model, frame, request_id):
model_conf, code = model
retResults = MODEL_CONFIG[code][3]([model_conf, frame, request_id])[0]
# [float(cls_c), xc,yc,w,h, float(conf_c)]
return code, retResults[2]
if type(retResults) is np.ndarray or len(retResults) == 0:
ret = retResults
if type(retResults) is np.ndarray:
ret = retResults.tolist()
else:
ret = retResults[2]
return code, ret

@staticmethod
def obj_det(self, model_array, frame, task_status, cframe, tt, request_id):
@@ -539,6 +554,8 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
draw_config[code]["allowedList"] = model_conf[2]
draw_config[code]["rainbows"] = model_conf[4]
draw_config[code]["label_arrays"] = model_param['label_arraylist']
if "label_dict" in model_param:
draw_config[code]["label_dict"] = model_param['label_dict']
det_array = []
for i, frame in enumerate(frame_list):
det_result = t.submit(self.obj_det, self, model_array, frame, task_status,

+ 5
- 2
concurrency/PullStreamThread.py Wyświetl plik

@@ -69,7 +69,9 @@ class RecordingPullStreamThread(PullStreamThread):
if check_video_stream(width, height):
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id)
# 当是离线地址重试3次还是拉取不到视频流,关闭拉流管道,返回失败信息
if cv2_init_num > 3:
# 目前改为等待5分钟
# if cv2_init_num > 3:
if time() - start_time > 300:
logger.info("离线拉流重试失败, 重试次数: {}, requestId: {}", cv2_init_num, request_id)
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0],
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1])
@@ -78,7 +80,8 @@ class RecordingPullStreamThread(PullStreamThread):
if width is not None:
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RUNNING.value[0]}, timeout=2)
else:
if cv2_init_num < 2:
# if cv2_init_num < 2:
if time() - start_time < 300:
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RETRYING.value[0]}, timeout=2)
continue
# 当离线视频时, 队列满了, 等待1秒后再试

+ 46
- 28
concurrency/PushVideoStreamProcess.py Wyświetl plik

@@ -15,13 +15,14 @@ import psutil
from loguru import logger

from enums.ExceptionEnum import ExceptionType
from enums.ModelTypeEnum import ModelType
from exception.CustomerException import ServiceException
from util import ImageUtils
from util.Cv2Utils import video_conjuncing, write_or_video, write_ai_video, push_video_stream, close_all_p
from util.ImageUtils import url2Array, add_water_pic
from util.LogUtils import init_log

from util.PlotsUtils import draw_painting_joint, filterBox, xywh2xyxy2
from util.PlotsUtils import draw_painting_joint, filterBox, xywh2xyxy2, draw_name_joint

from util.QueUtil import get_no_block_queue, put_queue, clear_queue

@@ -130,26 +131,33 @@ class OnPushStreamProcess(PushStreamProcess):
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
try: # 应对NaN情况
box, score, cls = xywh2xyxy2(qs)
except:
continue
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
if ModelType.CHANNEL2_MODEL.value[1] == str(code) and cls == 2:
rr = t.submit(draw_name_joint, box, copy_frame, draw_config[code]["label_dict"], score, color, font_config, qs[6])
else:
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
qs_np = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
result_li = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
qs_np = np.row_stack((qs_np, result_li))
if not (ModelType.CHANNEL2_MODEL.value[1] == str(code) and cls == 2):
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
qs_np = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
result_li = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
qs_np = np.row_stack((qs_np, result_li))
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
@@ -184,14 +192,17 @@ class OnPushStreamProcess(PushStreamProcess):
qs_np_tmp[:, 11] += 1
qs_np_tmp = np.delete(qs_np_tmp, np.where((qs_np_tmp[:, 11] >= 75))[0], axis=0)
has = False
new_lab = []
for j in qs_reurn:
if j[11] == 1:
has = True
break
new_lab.append(j[9])
if has:
for q in qs_reurn:
if q[11] >= 1:
cls = int(q[9])
if not (cls in new_lab):
continue # 为了防止其他类别被带出
code = str(int(q[10])).zfill(3)
if det_xywh2.get(code) is None:
det_xywh2[code] = {}
@@ -325,22 +336,26 @@ class OffPushStreamProcess(PushStreamProcess):
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
if ModelType.CHANNEL2_MODEL.value[1] == str(code) and cls == 2:
rr = t.submit(draw_name_joint, box, copy_frame, draw_config[code]["label_dict"], score, color, font_config, qs[6])
else:
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
qs_np = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
result_li = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
qs_np = np.row_stack((qs_np, result_li))
if not (ModelType.CHANNEL2_MODEL.value[1] == str(code) and cls == 2):
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
qs_np = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
result_li = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
qs_np = np.row_stack((qs_np, result_li))
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
@@ -373,14 +388,17 @@ class OffPushStreamProcess(PushStreamProcess):
qs_np_tmp[:, 11] += 1
qs_np_tmp = np.delete(qs_np_tmp, np.where((qs_np_tmp[:, 11] >= 75))[0], axis=0)
has = False
new_lab = []
for j in qs_reurn:
if j[11] == 1:
has = True
break
new_lab.append(j[9])
if has:
for q in qs_reurn:
if q[11] >= 1:
cls = int(q[9])
if not (cls in new_lab):
continue # 为了防止其他类别被带出
code = str(int(q[10])).zfill(3)
if det_xywh2.get(code) is None:
det_xywh2[code] = {}

+ 60
- 105
concurrency/PushVideoStreamProcess2.py Wyświetl plik

@@ -61,10 +61,6 @@ class OnPushStreamProcess2(PushStreamProcess2):
frame_score = context["service"]["filter"]["frame_score"]
ex = None
ex_status = True
# 图片相似度开关
picture_similarity = bool(context["service"]["filter"]["picture_similarity"])
qs_np_tmp = None
pix_dis = 60
try:
init_log(base_dir, env)
logger.info("开始启动推流进程!requestId:{}", request_id)
@@ -73,6 +69,8 @@ class OnPushStreamProcess2(PushStreamProcess2):
# 第一个参数时间, 第二个参数重试次数
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0]
start_time = time()
minID = 0
maxID = 0
while True:
# 检测推流执行超时时间
if time() - start_time > service_timeout:
@@ -100,10 +98,9 @@ class OnPushStreamProcess2(PushStreamProcess2):
copy_frame = frame.copy()
# 所有问题记录字典
det_xywh, thread_p = {}, []
det_tmp = {}
det_xywh2 = {}
# 所有问题的矩阵集合
qs_np = None
qs_reurn = []

# [模型1识别数组, 模型2识别数组, 模型3识别数组]
for s_det_list in push_objs:
code, det_result = s_det_list[0], s_det_list[1][i]
@@ -120,17 +117,26 @@ class OnPushStreamProcess2(PushStreamProcess2):
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
is_new = False
if len(qs) == 8:
trackId = qs[7]
elif len(qs) == 5:
trackId = qs[4]
if trackId > minID:
is_new = True
if det_tmp.get(code) is None:
det_tmp[code] = [cls]
else:
if not (cls in det_tmp[code]):
det_tmp[code].append(cls)
qs_tmp = [cls, box, score, label_array, color, is_new]
if trackId > maxID:
maxID = trackId
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
qs_np = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
det_xywh[code][cls] = [qs_tmp]
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
result_li = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
qs_np = np.row_stack((qs_np, result_li))
det_xywh[code][cls].append(qs_tmp)
minID = maxID
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
@@ -147,41 +153,14 @@ class OnPushStreamProcess2(PushStreamProcess2):
push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status,
request_id)
if qs_np is not None:
if len(qs_np.shape) == 1:
qs_np = qs_np[np.newaxis,...]
qs_np_id = qs_np.copy()
b = np.ones(qs_np_id.shape[0])
qs_np_id = np.column_stack((qs_np_id,b))
if qs_np_tmp is None:
if picture_similarity:
qs_np_tmp = qs_np_id.copy()
b = np.zeros(qs_np.shape[0])
qs_reurn = np.column_stack((qs_np,b))
else:
qs_reurn = filterBox(qs_np, qs_np_tmp, pix_dis)
if picture_similarity:
qs_np_tmp = np.append(qs_np_tmp,qs_np_id,axis=0)
qs_np_tmp[:, 11] += 1
qs_np_tmp = np.delete(qs_np_tmp, np.where((qs_np_tmp[:, 11] >= 75))[0], axis=0)
for q in qs_reurn:
if q[11] == 0:
cls = int(q[9])
code = str(int(q[10])).zfill(3)
if det_xywh2.get(code) is None:
det_xywh2[code] = {}
cd = det_xywh2[code].get(cls)
score = q[8]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
label_array, color = label_arrays[cls], rainbows[cls]
box = [(int(q[0]), int(q[1])), (int(q[2]), int(q[3])),
(int(q[4]), int(q[5])), (int(q[6]), int(q[7]))]
if cd is None:
det_xywh2[code][cls] = [[cls, box, score, label_array, color]]
else:
det_xywh2[code][cls].append([cls, box, score, label_array, color])
if len(det_xywh2) > 0:
put_queue(image_queue, (1, [det_xywh2, frame, frame_index_list[i], all_frames, draw_config["font_config"]]))
if det_xywh:
for index, (key, value) in enumerate(det_xywh.items()):
for k in value.keys():
if (key in det_tmp.keys()) and (k in det_tmp[key]):
det_xywh2[key] = {}
det_xywh2[key][k] = det_xywh[key][k]
if len(det_xywh2) > 0:
put_queue(image_queue, (1, [det_xywh2, frame, frame_index_list[i], all_frames, draw_config["font_config"]]))

push_p = push_p_result.result(timeout=60)
ai_video_file = write_ai_video_result.result(timeout=60)
@@ -239,14 +218,6 @@ class OffPushStreamProcess2(PushStreamProcess2):
frame_score = context["service"]["filter"]["frame_score"]
ex = None
ex_status = True
high_score_image = {}
# 相似度, 默认值0.65
similarity = context["service"]["filter"]["similarity"]
# 图片相似度开关
picture_similarity = bool(context["service"]["filter"]["picture_similarity"])
frame_step = int(context["service"]["filter"]["frame_step"])
qs_np_tmp = None
pix_dis = 60
try:
init_log(base_dir, env)
logger.info("开始启动离线推流进程!requestId:{}", request_id)
@@ -255,6 +226,8 @@ class OffPushStreamProcess2(PushStreamProcess2):
# 第一个参数时间, 第二个参数重试次数
p_push_status, ai_write_status = [0, 0], [0, 0]
start_time = time()
minID = 0
maxID = 0
while True:
# 检测推流执行超时时间
if time() - start_time > service_timeout:
@@ -285,8 +258,7 @@ class OffPushStreamProcess2(PushStreamProcess2):
# 所有问题记录字典
det_xywh, thread_p = {}, []
det_xywh2 = {}
qs_np = None
qs_reurn = []
det_tmp = {}
for s_det_list in push_objs:
code, det_result = s_det_list[0], s_det_list[1][i]
if len(det_result) > 0:
@@ -302,17 +274,27 @@ class OffPushStreamProcess2(PushStreamProcess2):
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
is_new = False
if len(qs) == 8:
trackId = qs[7]
elif len(qs) == 5:
trackId = qs[4]
if trackId > minID:
is_new = True
if det_tmp.get(code) is None:
det_tmp[code] = [cls]
else:
if not (cls in det_tmp[code]):
det_tmp[code].append(cls)
qs_tmp = [cls, box, score, label_array, color, is_new]
if trackId > maxID:
maxID = trackId
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
qs_np = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
det_xywh[code][cls] = [qs_tmp]
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
result_li = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
qs_np = np.row_stack((qs_np, result_li))
det_xywh[code][cls].append(qs_tmp)
minID = maxID
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
@@ -327,41 +309,14 @@ class OffPushStreamProcess2(PushStreamProcess2):
push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status,
request_id)
if qs_np is not None:
if len(qs_np.shape) == 1:
qs_np = qs_np[np.newaxis,...]
qs_np_id = qs_np.copy()
b = np.ones(qs_np_id.shape[0])
qs_np_id = np.column_stack((qs_np_id,b))
if qs_np_tmp is None:
if picture_similarity:
qs_np_tmp = qs_np_id.copy()
b = np.zeros(qs_np.shape[0])
qs_reurn = np.column_stack((qs_np,b))
else:
qs_reurn = filterBox(qs_np, qs_np_tmp, pix_dis)
if picture_similarity:
qs_np_tmp = np.append(qs_np_tmp,qs_np_id,axis=0)
qs_np_tmp[:, 11] += 1
qs_np_tmp = np.delete(qs_np_tmp, np.where((qs_np_tmp[:, 11] >= 75))[0], axis=0)
for q in qs_reurn:
if q[11] == 0:
cls = int(q[9])
code = str(int(q[10])).zfill(3)
if det_xywh2.get(code) is None:
det_xywh2[code] = {}
cd = det_xywh2[code].get(cls)
score = q[8]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
label_array, color = label_arrays[cls], rainbows[cls]
box = [(int(q[0]), int(q[1])), (int(q[2]), int(q[3])),
(int(q[4]), int(q[5])), (int(q[6]), int(q[7]))]
if cd is None:
det_xywh2[code][cls] = [[cls, box, score, label_array, color]]
else:
det_xywh2[code][cls].append([cls, box, score, label_array, color])
if len(det_xywh2) > 0:
put_queue(image_queue, (1, [det_xywh2, frame, frame_index_list[i], all_frames, draw_config["font_config"]]))
if det_xywh:
for index, (key, value) in enumerate(det_xywh.items()):
for k in value.keys():
if (key in det_tmp.keys()) and (k in det_tmp[key]):
det_xywh2[key] = {}
det_xywh2[key][k] = det_xywh[key][k]
if len(det_xywh2) > 0:
put_queue(image_queue, (1, [det_xywh2, frame, frame_index_list[i], all_frames, draw_config["font_config"]]))
push_p = push_p_result.result(timeout=60)
ai_video_file = write_ai_video_result.result(timeout=60)
# 接收停止指令

BIN
concurrency/__pycache__/CommonThread.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/FeedbackThread.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/FileUploadThread.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/HeartbeatThread.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/IntelligentRecognitionProcess.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/IntelligentRecognitionProcess2.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/Pull2PushStreamProcess.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/Pull2PushStreamThread.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/PullStreamThread.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/PullVideoStreamProcess.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/PullVideoStreamProcess2.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/PushVideoStreamProcess.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/PushVideoStreamProcess2.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/RecordingHeartbeatThread.cpython-38.pyc Wyświetl plik


BIN
concurrency/__pycache__/__init__.cpython-38.pyc Wyświetl plik


BIN
entity/__pycache__/FeedBack.cpython-38.pyc Wyświetl plik


BIN
entity/__pycache__/__init__.cpython-38.pyc Wyświetl plik


+ 150
- 25
enums/ModelTypeEnum.py Wyświetl plik

@@ -12,6 +12,12 @@ from segutils.trafficUtils import tracfficAccidentMixFunction
from utilsK.drownUtils import mixDrowing_water_postprocess
from utilsK.noParkingUtils import mixNoParking_road_postprocess
from utilsK.illParkingUtils import illParking_postprocess
from stdc import stdcModel
from yolov5 import yolov5Model
from DMPRUtils.jointUtil import dmpr_yolo_stdc
from AI import default_mix
from ocr import ocrModel
from utilsK.channel2postUtils import channel2_post_process

'''
参数说明
@@ -82,7 +88,7 @@ class ModelType(Enum):

TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'highWay2', lambda device, gpuName: {
'device': str(device),
'labelnames': ["行人", "车辆", "纵向裂缝", "横向裂缝", "修补", "网状裂纹", "坑槽", "块状裂纹", "积水", "事故"],
'labelnames': ["行人", "车辆", "纵向裂缝", "横向裂缝", "修补", "网状裂纹", "坑槽", "块状裂纹", "积水", "影子", "事故"],
'trtFlag_seg': True,
'trtFlag_det': True,
'seg_nclass': 3,
@@ -102,7 +108,7 @@ class ModelType(Enum):
'roadVehicleAngle': 15,
'speedRoadVehicleAngleMax': 75,
'roundness': 1.0,
'cls': 9,
'cls': 10,
'vehicleFactor': 0.1,
'confThres': 0.25,
'roadIou': 0.6,
@@ -116,7 +122,7 @@ class ModelType(Enum):
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"classes": 10,
"rainbows": COLOR
},
# "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3]
@@ -174,7 +180,7 @@ class ModelType(Enum):
SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["烟雾", "火焰"],
'labelnames': ["火焰", "烟雾"],
'seg_nclass': 2, # 分割模型类别数目,默认2类
'segRegionCnt': 0,
'trtFlag_det': True,
@@ -322,33 +328,45 @@ class ModelType(Enum):
})

CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement2', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["车辆", "垃圾", "商贩", "违停"],
'seg_nclass': 4, # 分割模型类别数目,默认2类
'segRegionCnt': 2,
'trtFlag_det': True,
'trtFlag_seg': True,
'Detweights': "../AIlib2/weights/cityMangement2/yolov5_%s_fp16.engine" % gpuName,
'segPar': {
'depth_factor': 32,
'NUM_FEATURE_MAP_CHANNEL': 6,
'dmpr_thresh': 0.3,
'dmprimg_size': 640,
'mixFunction': {
'function': dmpr_yolo,
'pars': {'carCls': 0, 'illCls': 3, 'scaleRatio': 0.5, 'border':80}
}
},
'postProcess':{
'function':dmpr_yolo_stdc,
'pars':{'carCls':0 ,'illCls':3,'scaleRatio':0.5,'border':80}
},
'models':[
{
'weight':'../AIlib2/weights/conf/cityMangement3/yolov5.pt',
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.8,"1":0.4,"2":0.5,"3":0.5 } }
},
{
'weight':'../AIlib2/weights/conf/cityMangement3/dmpr.pth',
'par':{
'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.1, 'dmprimg_size':640,
'name':'dmpr'
},
'model':DMPRModel,
'name':'dmpr'
},
{
'weight':'../AIlib2/weights/conf/cityMangement3/stdc_360X640.pth',
'par':{
'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,'seg_nclass':2},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'postFile': {
"name": "post_process",
"conf_thres": 0.8,
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
"score_byClass": {'0':0.8, '1':0.5, '2':0.5},
'Segweights': '../AIlib2/weights/cityMangement2/dmpr_%s.engine' % gpuName
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'segRegionCnt':2,###分割模型结果需要保留的等值线数目
"pixScale": 1.2,
})

DROWING_MODEL = ("17", "017", "人员落水模型", 'drowning', lambda device, gpuName: {
@@ -459,7 +477,7 @@ class ModelType(Enum):
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"conf_thres": 0.8,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
@@ -468,6 +486,113 @@ class ModelType(Enum):
'Segweights': None
})

POTHOLE_MODEL = ("23", "023", "坑槽检测模型", 'pothole', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["坑槽"],
'seg_nclass': 2, # 分割模型类别数目,默认2类
'segRegionCnt': 0,
'slopeIndex': [],
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/pothole/yolov5_%s_fp16.engine" % gpuName,
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None,
})

CHANNEL2_MODEL = ("24", "024", "船只综合检测模型", 'channel2', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["国旗", "浮标", "船名", "船只","未挂国旗船只"],
'segRegionCnt': 0,
'postProcess':{'function':channel2_post_process,'name':'channel2','pars':{
'objs':[2],
'wRation':1/6.0,
'hRation':1/6.0,
'smallId':0,
'bigId':3,
'newId':4,
'recScale':1.2}},
'models':[
{
'weight':'../AIlib2/weights/conf/channel2/yolov5.pt',
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.1,'iou_thres':0.45,'allowedList':list(range(20)),'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.7,"1":0.7,"2":0.8,"3":0.6} }
},
{
# 'weight' : '../AIlib2/weights/ocr2/crnn_ch_4090_fp16_192X32.engine',
'weight' : '../AIlib2/weights/conf/ocr2/crnn_ch.pth',
'name':'ocr',
'model':ocrModel,
'par':{
'char_file':'../AIlib2/weights/conf/ocr2/benchmark.txt',
'mode':'ch',
'nc':3,
'imgH':32,
'imgW':192,
'hidden':256,
'mean':[0.5,0.5,0.5],
'std':[0.5,0.5,0.5],
'dynamic':False,
},
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3]],
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None,
})
RIVERT_MODEL = ("25", "025", "河道检测模型(T)", 'riverT', lambda device, gpuName: {
'device': device,
'labelnames': ["漂浮物", "岸坡垃圾", "排口", "违建", "菜地", "水生植物", "河湖人员", "钓鱼人员", "船只",
"蓝藻"],
'trtFlag_seg': True,
'trtFlag_det': True,
'seg_nclass': 2,
'segRegionCnt': 1,
'segPar': {
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': riverDetSegMixProcess,
'pars': {
'slopeIndex': [1, 3, 4, 7],
'riverIou': 0.1
}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.3,
"ovlap_thres_crossCategory": 0.65,
"classes": 5,
"rainbows": COLOR
},
# "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3]
'Detweights': "../AIlib2/weights/riverT/yolov5_%s_fp16.engine" % gpuName,
# '../AIlib2/weights/conf/%s/stdc_360X640.pth' % modeType.value[3]
'Segweights': '../AIlib2/weights/riverT/stdc_360X640_%s_fp16.engine' % gpuName
})

@staticmethod
def checkCode(code):
for model in ModelType:

+ 337
- 301
enums/ModelTypeEnum2.py Wyświetl plik

@@ -5,13 +5,17 @@ from common.Constant import COLOR

sys.path.extend(['..', '../AIlib2'])
from segutils.segmodel import SegModel
from utilsK.queRiver import riverDetSegMixProcess
from segutils.trafficUtils import tracfficAccidentMixFunction
from utilsK.drownUtils import mixDrowing_water_postprocess
from utilsK.noParkingUtils import mixNoParking_road_postprocess
from utilsK.queRiver import riverDetSegMixProcess_N
from segutils.trafficUtils import tracfficAccidentMixFunction_N
from utilsK.drownUtils import mixDrowing_water_postprocess_N
from utilsK.noParkingUtils import mixNoParking_road_postprocess_N
from utilsK.illParkingUtils import illParking_postprocess
from DMPR import DMPRModel
from DMPRUtils.jointUtil import dmpr_yolo
from yolov5 import yolov5Model
from stdc import stdcModel
from AI import default_mix
from DMPRUtils.jointUtil import dmpr_yolo_stdc

'''
参数说明
@@ -27,35 +31,16 @@ class ModelType2(Enum):
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river', lambda device, gpuName: {
'device': device,
'labelnames': ["排口", "水生植被", "其它", "漂浮物", "污染排口", "菜地", "违建", "岸坡垃圾"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/river/yolov5_%s_fp16.engine" % gpuName,
'detModelpara': [],
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': {
'trtFlag_seg': True,
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225), 'numpy': False,
'RGB_convert_first': True, # 分割模型预处理参数
'mixFunction': {
'function': riverDetSegMixProcess,
'pars': {
'slopeIndex': [5, 6, 7],
'riverIou': 0.1
}
}
},
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6,7] ],###控制哪些检测类别显示、输出
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'det_cnt': 10, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 29, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'Segweights': '../AIlib2/weights/river/stdc_360X640_%s_fp16.engine' % gpuName,
'postProcess':{'function':riverDetSegMixProcess_N,'pars':{'slopeIndex':[1,3,4,7], 'riverIou':0.1}}, #分割和检测混合处理的函数
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
@@ -71,27 +56,63 @@ class ModelType2(Enum):
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
},
'models':
[
{
'weight':"../AIlib2/weights/river/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{
'half':True,
'device':'cuda:0' ,
'conf_thres':0.25,
'iou_thres':0.45,
'allowedList':[0,1,2,3],
'segRegionCnt':1,
'trtFlag_det':False,
'trtFlag_seg':False,
"score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
},
{
'weight':'../AIlib2/weights/conf/river/stdc_360X640.pth',
'par':{
'modelSize':(640,360),
'mean':(0.485, 0.456, 0.406),
'std' :(0.229, 0.224, 0.225),
'numpy':False,
'RGB_convert_first':True,
'seg_nclass':2},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
})

FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2', lambda device, gpuName: {
'device': device,
'labelnames': ["林斑", "病死树", "行人", "火焰", "烟雾"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/forest2/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'models':
[
{
'weight':"../AIlib2/weights/forest2/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,
'device':'cuda:0' ,
'conf_thres':0.25,
'iou_thres':0.45,
'allowedList':[0,1,2,3],
'segRegionCnt':1,
'trtFlag_det':False,
'trtFlag_seg':False,
"score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 }
},
}
],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
@@ -113,47 +134,58 @@ class ModelType2(Enum):
TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'highWay2', lambda device, gpuName: {
'device': device,
'labelnames': ["行人", "车辆", "纵向裂缝", "横向裂缝", "修补", "网状裂纹", "坑槽", "块状裂纹", "积水", "事故"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/highWay2/yolov5_%s_fp16.engine" % gpuName,
'seg_nclass': 3,
'segRegionCnt': 2,
'segPar': {
'trtFlag_seg': True,
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'predResize': True,
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': tracfficAccidentMixFunction,
'pars': {
'RoadArea': 16000,
'modelSize': (640, 360),
'vehicleArea': 10,
'roadVehicleAngle': 15,
'speedRoadVehicleAngleMax': 75,
'roundness': 1.0,
'cls': 9,
'vehicleFactor': 0.1,
'confThres': 0.25,
'roadIou': 0.6,
'radius': 50,
'vehicleFlag': False,
'distanceFlag': False
}
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':5,'windowsize':29,'patchCnt':100},
'postProcess':{
'function':tracfficAccidentMixFunction_N,
'pars':{
'RoadArea': 16000,
'vehicleArea': 10,
'roadVehicleAngle': 15,
'speedRoadVehicleAngleMax': 75,
'radius': 50 ,
'roundness': 1.0,
'cls': 9,
'vehicleFactor': 0.1,
'cls':9,
'confThres':0.25,
'roadIou':0.6,
'vehicleFlag':False,
'distanceFlag': False,
'modelSize':(640,360),
}
},
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'Segweights': '../AIlib2/weights/highWay2/stdc_360X640_%s_fp16.engine' % gpuName,
'models':
[
{
'weight':"../AIlib2/weights/highWay2/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{
'half':True,
'device':'cuda:0' ,
'conf_thres':0.25,
'iou_thres':0.45,
'allowedList':[0,1,2,3],
'segRegionCnt':1,
'trtFlag_det':False,
'trtFlag_seg':False,
"score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
},
{
'weight':'../AIlib2/weights/conf/highWay2/stdc_360X640.pth',
'par':{
'modelSize':(640,360),
'mean':(0.485, 0.456, 0.406),
'std' :(0.229, 0.224, 0.225),
'predResize':True,
'numpy':False,
'RGB_convert_first':True,
'seg_nclass':3},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
@@ -179,21 +211,26 @@ class ModelType2(Enum):
VEHICLE_MODEL = ("6", "006", "车辆模型", 'vehicle', lambda device, gpuName: {
'device': device,
'labelnames': ["车辆"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/vehicle/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/vehicle/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,
'device':'cuda:0' ,
'conf_thres':0.25,
'iou_thres':0.45,
'allowedList':[0,1,2,3],
'segRegionCnt':1,
'trtFlag_det':False,
'trtFlag_seg':False,
"score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
@@ -215,21 +252,19 @@ class ModelType2(Enum):
PEDESTRIAN_MODEL = ("7", "007", "行人模型", 'pedestrian', lambda device, gpuName: {
'device': device,
'labelnames': ["行人"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/pedestrian/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/pedestrian/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
@@ -250,21 +285,19 @@ class ModelType2(Enum):
SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire', lambda device, gpuName: {
'device': device,
'labelnames': ["烟雾", "火焰"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/smogfire/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/smogfire/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
#'weight':'../AIlib2/weights/conf/%s/yolov5.pt'%(opt['business'] ),
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
@@ -286,21 +319,18 @@ class ModelType2(Enum):
ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer', lambda device, gpuName: {
'device': device,
'labelnames': ["钓鱼", "游泳"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/AnglerSwimmer/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/AnglerSwimmer/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
@@ -322,21 +352,18 @@ class ModelType2(Enum):
COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad', lambda device, gpuName: {
'device': device,
'labelnames': ["违法种植"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/countryRoad/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/countryRoad/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
@@ -381,8 +408,8 @@ class ModelType2(Enum):
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'det_cnt': 10, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 29, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'device': "cuda:%s" % device,
@@ -412,21 +439,19 @@ class ModelType2(Enum):
CHANNEL_EMERGENCY_MODEL = ("13", "013", "航道模型", 'channelEmergency', lambda device, gpuName: {
'device': device,
'labelnames': ["人"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/channelEmergency/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/channelEmergency/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
#'weight':'../AIlib2/weights/conf/%s/yolov5.pt'%(opt['business'] ),
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
@@ -449,34 +474,25 @@ class ModelType2(Enum):
'device': device,
'labelnames': ["漂浮物", "岸坡垃圾", "排口", "违建", "菜地", "水生植物", "河湖人员", "钓鱼人员", "船只",
"蓝藻"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/river2/yolov5_%s_fp16.engine" % gpuName,
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': {
'trtFlag_seg': True,
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225), 'numpy': False,
'RGB_convert_first': True, # 分割模型预处理参数
'mixFunction': {
'function': riverDetSegMixProcess,
'pars': {
'slopeIndex': [1, 3, 4, 7],
'riverIou': 0.1
}
}
},
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'Segweights': '../AIlib2/weights/river2/stdc_360X640_%s_fp16.engine' % gpuName,
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':riverDetSegMixProcess_N,'pars':{'slopeIndex':[1,3,4,7], 'riverIou':0.1}}, #分割和检测混合处理的函数
'models':
[
{
'weight':"../AIlib2/weights/river2/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
},
{
'weight':'../AIlib2/weights/conf/river2/stdc_360X640.pth',
'par':{
'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True,'seg_nclass':2},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
@@ -499,34 +515,41 @@ class ModelType2(Enum):
CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement2', lambda device, gpuName: {
'device': device,
'labelnames': ["车辆", "垃圾", "商贩", "违停"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/cityMangement2/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 4,
'segRegionCnt': 2,
'segPar': {
'trtFlag_seg': True,
'depth_factor': 32,
'NUM_FEATURE_MAP_CHANNEL': 6,
'dmpr_thresh': 0.3,
'dmprimg_size': 640,
'mixFunction': {
'function': dmpr_yolo,
'pars': {'carCls': 0, 'illCls': 3, 'scaleRatio': 0.5, 'border':80}
}
},
'Segweights': '../AIlib2/weights/cityMangement2/dmpr_%s.engine' % gpuName,
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':5,'windowsize':29,'patchCnt':100},
'postProcess':{
'function':dmpr_yolo_stdc,
'pars':{'carCls':0 ,'illCls':3,'scaleRatio':0.5,'border':80}
},
'models':[
{
'weight':"../AIlib2/weights/cityMangement3/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.8,"1":0.5,"2":0.5,"3":0.5 } }
},
{
'weight':"../AIlib2/weights/cityMangement3/dmpr_%s.engine"% gpuName,###DMPR模型路径
'par':{
'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.3, 'dmprimg_size':640,
'name':'dmpr'
},
'model':DMPRModel,
'name':'dmpr'
},
{
'weight':"../AIlib2/weights/cityMangement3/stdc_360X640_%s_fp16.engine"% gpuName,###分割模型路径
'par':{
'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,'seg_nclass':2},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.8,
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
@@ -545,35 +568,26 @@ class ModelType2(Enum):
DROWING_MODEL = ("17", "017", "人员落水模型", 'drowning', lambda device, gpuName: {
'device': device,
'labelnames': ["人头", "人", "船只"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/drowning/yolov5_%s_fp16.engine" % gpuName,
'seg_nclass': 4,
'segRegionCnt': 2,
'segPar': {
'trtFlag_seg': True,
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'predResize': True,
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': mixDrowing_water_postprocess,
'pars': {
'modelSize': (640, 360),
}
}
},
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'Segweights': '../AIlib2/weights/drowning/stdc_360X640_%s_fp16.engine' % gpuName,
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':mixDrowing_water_postprocess_N,
'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/drowning/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
},
{
'weight':'../AIlib2/weights/conf/drowning/stdc_360X640.pth',
'par':{
'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,'seg_nclass':2},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
@@ -596,34 +610,27 @@ class ModelType2(Enum):
"18", "018", "城市违章模型", 'noParking', lambda device, gpuName: {
'device': device,
'labelnames': ["车辆", "违停"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/noParking/yolov5_%s_fp16.engine" % gpuName,
'seg_nclass': 4,
'segRegionCnt': 2,
'segPar': {
'trtFlag_seg': True,
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'predResize': True,
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': mixNoParking_road_postprocess,
'pars': {'modelSize': (640, 360), 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5,
'RoadArea': 16000, 'fitOrder':2}
}
},
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'Segweights': '../AIlib2/weights/noParking/stdc_360X640_%s_fp16.engine' % gpuName,
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':mixNoParking_road_postprocess_N,
'pars': { 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5 ,'RoadArea': 16000,'fitOrder':2, 'modelSize':(640,360)}
} ,
'models':
[
{
'weight':"../AIlib2/weights/noParking/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
},
{
'weight':'../AIlib2/weights/conf/noParking/stdc_360X640.pth',
'par':{
'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,'seg_nclass':4},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
@@ -646,22 +653,51 @@ class ModelType2(Enum):
CITYROAD_MODEL = ("20", "020", "城市公路模型", 'cityRoad', lambda device, gpuName: {
'device': device,
'labelnames': ["护栏", "交通标志", "非交通标志", "施工", "施工"],
'half': True,
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/cityRoad/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/cityRoad/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.8,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.8,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})

POTHOLE_MODEL = ("23", "023", "坑槽检测模型", 'pothole', lambda device, gpuName: { # 目前集成到另外的模型中去了 不单独使用
'device': device,
'labelnames': ["坑槽"],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':3,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/pothole/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3}},
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0]],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
@@ -677,7 +713,7 @@ class ModelType2(Enum):
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
},
})



BIN
enums/__pycache__/AnalysisStatusEnum.cpython-38.pyc Wyświetl plik


BIN
enums/__pycache__/AnalysisTypeEnum.cpython-38.pyc Wyświetl plik


BIN
enums/__pycache__/BaiduSdkEnum.cpython-38.pyc Wyświetl plik


BIN
enums/__pycache__/ExceptionEnum.cpython-38.pyc Wyświetl plik


BIN
enums/__pycache__/ModelTypeEnum.cpython-38.pyc Wyświetl plik


BIN
enums/__pycache__/ModelTypeEnum2.cpython-38.pyc Wyświetl plik


BIN
enums/__pycache__/RecordingStatusEnum.cpython-38.pyc Wyświetl plik


BIN
enums/__pycache__/StatusEnum.cpython-38.pyc Wyświetl plik


BIN
enums/__pycache__/__init__.cpython-38.pyc Wyświetl plik


BIN
exception/__pycache__/CustomerException.cpython-38.pyc Wyświetl plik


BIN
exception/__pycache__/__init__.cpython-38.pyc Wyświetl plik


+ 1
- 1
service/Dispatcher.py Wyświetl plik

@@ -218,7 +218,7 @@ class DispatcherService:
return
model_type = self.__context["service"]["model"]["model_type"]
codes = [model.get("code") for model in msg["models"] if model.get("code")]
if ModelMethodTypeEnum.NORMAL.value == model_type or ModelType.ILLPARKING_MODEL.value[1] in codes:
if ModelMethodTypeEnum.NORMAL.value == model_type:
first = OfflineIntelligentRecognitionProcess(self.__fbQueue, msg, analysisType, self.__context)
else:
first = OfflineIntelligentRecognitionProcess2(self.__fbQueue, msg, analysisType, self.__context)

BIN
service/__pycache__/Dispatcher.cpython-38.pyc Wyświetl plik


BIN
service/__pycache__/__init__.cpython-38.pyc Wyświetl plik


+ 99
- 39
util/ModelUtils.py Wyświetl plik

@@ -13,11 +13,11 @@ from enums.ExceptionEnum import ExceptionType
from enums.ModelTypeEnum import ModelType, BAIDU_MODEL_TARGET_CONFIG
from exception.CustomerException import ServiceException
from util.ImgBaiduSdk import AipBodyAnalysisClient, AipImageClassifyClient
from util.PlotsUtils import get_label_arrays
from util.PlotsUtils import get_label_arrays, get_label_array_dict
from util.TorchUtils import select_device

sys.path.extend(['..', '../AIlib2'])
from AI import AI_process, AI_process_forest, get_postProcess_para, ocr_process
from AI import AI_process, AI_process_forest, get_postProcess_para, ocr_process, AI_process_N, AI_process_C
from stdc import stdcModel
from segutils.segmodel import SegModel
from models.experimental import attempt_load
@@ -51,7 +51,7 @@ class OneModel:
par['segPar']['seg_nclass'] = par['seg_nclass']
Segweights = par['Segweights']
if Segweights:
if modeType.value[3] == 'cityMangement2':
if modeType.value[3] == 'cityMangement3':
segmodel = DMPRModel(weights=Segweights, par=par['segPar'])
else:
segmodel = stdcModel(weights=Segweights, par=par['segPar'])
@@ -82,7 +82,6 @@ class OneModel:
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0],
ExceptionType.MODEL_LOADING_EXCEPTION.value[1])

# 城管模型专用,多出一个score_byClass参数
class cityManagementModel:
__slots__ = "model_conf"

@@ -91,49 +90,35 @@ class cityManagementModel:
logger.info("########################加载{}########################, requestId:{}", modeType.value[2],
requestId)
par = modeType.value[4](str(device), gpu_name)
mode, postPar, segPar = par.get('mode', 'others'), par.get('postPar'), par.get('segPar')
postProcess = par['postProcess']
names = par['labelnames']
postFile = par['postFile']
rainbows = postFile["rainbows"]
new_device = select_device(par.get('device'))
half = new_device.type != 'cpu'
Detweights = par['Detweights']
with open(Detweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime:
model = runtime.deserialize_cuda_engine(f.read())
par['segPar']['seg_nclass'] = par['seg_nclass']
Segweights = par['Segweights']
if Segweights:
if modeType.value[3] == 'cityMangement2':
segmodel = DMPRModel(weights=Segweights, par=par['segPar'])
else:
segmodel = stdcModel(weights=Segweights, par=par['segPar'])
else:
segmodel = None
objectPar = {
'half': half,
'device': new_device,
'conf_thres': postFile["conf_thres"],
'ovlap_thres_crossCategory': postFile.get("ovlap_thres_crossCategory"),
'iou_thres': postFile["iou_thres"],
'allowedList': [],
'segRegionCnt': par['segRegionCnt'],
'trtFlag_det': par['trtFlag_det'],
'trtFlag_seg': par['trtFlag_seg'],
'score_byClass': par['score_byClass']
}
modelList=[ modelPar['model'](weights=modelPar['weight'],par=modelPar['par']) for modelPar in par['models'] ]
model_param = {
"model": model,
"segmodel": segmodel,
"objectPar": objectPar,
"segPar": segPar,
"mode": mode,
"postPar": postPar
"modelList": modelList,
"postProcess": postProcess,
}
self.model_conf = (modeType, model_param, allowedList, names, rainbows)
except Exception:
logger.error("模型加载异常:{}, requestId:{}", format_exc(), requestId)
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0],
ExceptionType.MODEL_LOADING_EXCEPTION.value[1])

def detSeg_demo2(args):
model_conf, frame, request_id = args
modelList, postProcess = model_conf[1]['modelList'], model_conf[1]['postProcess']
try:
return AI_process_N([frame], modelList, postProcess)
except ServiceException as s:
raise s
except Exception:
# self.num += 1
# cv2.imwrite('/home/th/tuo_heng/dev/img%s.jpg' % str(self.num), frame)
logger.error("算法模型分析异常:{}, requestId:{}", format_exc(), request_id)
raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0],
ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1])
def model_process(args):
model_conf, frame, request_id = args
@@ -216,7 +201,47 @@ def forest_process(args):
raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0],
ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1])

class MultiModel:
__slots__ = "model_conf"

def __init__(self, device1, allowedList=None, requestId=None, modeType=None, gpu_name=None, base_dir=None,
env=None):
s = time.time()
try:
logger.info("########################加载{}########################, requestId:{}", modeType.value[2],
requestId)
par = modeType.value[4](str(device1), gpu_name)
postProcess = par['postProcess']
names = par['labelnames']
postFile = par['postFile']
rainbows = postFile["rainbows"]
modelList=[ modelPar['model'](weights=modelPar['weight'],par=modelPar['par']) for modelPar in par['models'] ]
model_param = {
"modelList": modelList,
"postProcess": postProcess,
}
self.model_conf = (modeType, model_param, allowedList, names, rainbows)
except Exception:
logger.error("模型加载异常:{}, requestId:{}", format_exc(), requestId)
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0],
ExceptionType.MODEL_LOADING_EXCEPTION.value[1])
logger.info("模型初始化时间:{}, requestId:{}", time.time() - s, requestId)

def channel2_process(args):
model_conf, frame, request_id = args
modelList, postProcess = model_conf[1]['modelList'], model_conf[1]['postProcess']
try:
start = time.time()
result = [[None, None, AI_process_C([frame], modelList, postProcess)[0]]] # 为了让返回值适配统一的接口而写的shi
# print("AI_process_C use time = {}".format(time.time()-start))
return result
except ServiceException as s:
raise s
except Exception:
logger.error("算法模型分析异常:{}, requestId:{}", format_exc(), request_id)
raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0],
ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1])
def get_label_arraylist(*args):
width, height, names, rainbows = args
# line = int(round(0.002 * (height + width) / 2) + 1)
@@ -396,7 +421,22 @@ def one_label(width, height, model_conf):
model_param['label_arraylist'] = label_arraylist
model_param['font_config'] = font_config


def dynamics_label(width, height, model_conf):
# modeType, model_param, allowedList, names, rainbows = model_conf
names = model_conf[3]
rainbows = model_conf[4]
model_param = model_conf[1]
digitFont, label_arraylist, font_config = get_label_arraylist(width, height, names, rainbows)
line = max(1, int(round(width / 1920 * 3)))
label = ' 0.95'
tf = max(line - 1, 1)
fontScale = line * 0.33
_, text_height = cv2.getTextSize(label, 0, fontScale=fontScale, thickness=tf)[0]
label_dict = get_label_array_dict(rainbows, fontSize=text_height, fontPath=FONT_PATH)
model_param['digitFont'] = digitFont
model_param['label_arraylist'] = label_arraylist
model_param['font_config'] = font_config
model_param['label_dict'] = label_dict
def baidu_label(width, height, model_conf):
# modeType, aipImageClassifyClient, aipBodyAnalysisClient, allowedList, rainbows,
# vehicle_names, person_names, requestId
@@ -502,7 +542,7 @@ MODEL_CONFIG = {
lambda x, y, r, t, z, h: cityManagementModel(x, y, r, ModelType.CITY_MANGEMENT_MODEL, t, z, h),
ModelType.CITY_MANGEMENT_MODEL,
lambda x, y, z: one_label(x, y, z),
lambda x: model_process(x)
lambda x: detSeg_demo2(x)
),
# 人员落水模型
ModelType.DROWING_MODEL.value[1]: (
@@ -531,4 +571,24 @@ MODEL_CONFIG = {
ModelType.CITYROAD_MODEL,
lambda x, y, z: one_label(x, y, z),
lambda x: forest_process(x)),
# 加载坑槽模型
ModelType.POTHOLE_MODEL.value[1]: (
lambda x, y, r, t, z, h: TwoModel(x, y, r, ModelType.POTHOLE_MODEL, t, z, h),
ModelType.POTHOLE_MODEL,
lambda x, y, z: one_label(x, y, z),
lambda x: forest_process(x)
),
# 加载船只综合检测模型
ModelType.CHANNEL2_MODEL.value[1]: (
lambda x, y, r, t, z, h: MultiModel(x, y, r, ModelType.CHANNEL2_MODEL, t, z, h),
ModelType.CHANNEL2_MODEL,
lambda x, y, z: dynamics_label(x, y, z),
lambda x: channel2_process(x)
),
# 河道检测模型
ModelType.RIVERT_MODEL.value[1]: (
lambda x, y, r, t, z, h: OneModel(x, y, r, ModelType.RIVERT_MODEL, t, z, h),
ModelType.RIVERT_MODEL,
lambda x, y, z: one_label(x, y, z),
lambda x: model_process(x)),
}

+ 26
- 32
util/ModelUtils2.py Wyświetl plik

@@ -19,7 +19,7 @@ import torch
import tensorrt as trt

sys.path.extend(['..', '../AIlib2'])
from AI import AI_process, get_postProcess_para, get_postProcess_para_dic, AI_det_track, AI_det_track_batch
from AI import AI_process, get_postProcess_para, get_postProcess_para_dic, AI_det_track, AI_det_track_batch, AI_det_track_batch_N
from stdc import stdcModel
from utilsK.jkmUtils import pre_process, post_process, get_return_data
from obbUtils.shipUtils import OBB_infer, OBB_tracker, draw_obb, OBB_tracker_batch
@@ -39,39 +39,23 @@ class Model:
logger.info("########################加载{}########################, requestId:{}", modeType.value[2],
requestId)
par = modeType.value[4](str(device), gpu_name)
new_device = select_device(par['device'])
Detweights = par['Detweights']
with open(Detweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime:
model = runtime.deserialize_cuda_engine(f.read())
Segweights = par['Segweights']
if Segweights:
if modeType.value[3] == 'cityMangement2':
segmodel = DMPRModel(weights=par['Segweights'], par = par['segPar'])
else:
segmodel = stdcModel(weights=par['Segweights'], par = par['segPar'])
else:
segmodel = None
trackPar = par['trackPar']
sort_tracker = Sort(max_age=trackPar['sort_max_age'], min_hits=trackPar['sort_min_hits'],
iou_threshold=trackPar['sort_iou_thresh'])
names, segPar = par['labelnames'], par['segPar']
names = par['labelnames']
detPostPar = par['postFile']
rainbows = detPostPar["rainbows"]
modelPar = {'det_Model': model, 'seg_Model': segmodel}
processPar = {
'half': par['half'],
'device': new_device,
'conf_thres': detPostPar["conf_thres"],
'iou_thres': detPostPar["iou_thres"],
'trtFlag_det': par['trtFlag_det'],
'iou2nd': detPostPar.get("ovlap_thres_crossCategory")
}
#第一步加载模型
modelList=[ modelPar['model'](weights=modelPar['weight'],par=modelPar['par']) for modelPar in par['models'] ]
#第二步准备跟踪参数
trackPar=par['trackPar']
sort_tracker = Sort(max_age=trackPar['sort_max_age'],
min_hits=trackPar['sort_min_hits'],
iou_threshold=trackPar['sort_iou_thresh'])
postProcess = par['postProcess']
model_param = {
"modelPar": modelPar,
"processPar": processPar,
"modelList": modelList,
"postProcess": postProcess,
"sort_tracker": sort_tracker,
"trackPar": trackPar,
"segPar": segPar
}
self.model_conf = (modeType, model_param, allowedList, names, rainbows)
except Exception:
@@ -79,7 +63,6 @@ class Model:
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0],
ExceptionType.MODEL_LOADING_EXCEPTION.value[1])


def get_label_arraylist(*args):
width, height, names, rainbows = args
# line = int(round(0.002 * (height + width) / 2) + 1)
@@ -112,8 +95,11 @@ def model_process(args):
# (modeType, model_param, allowedList, names, rainbows)
imgarray_list, iframe_list, model_param, request_id = args
try:
return AI_det_track_batch(imgarray_list, iframe_list, model_param['modelPar'], model_param['processPar'],
model_param['sort_tracker'], model_param['trackPar'], model_param['segPar'])
return AI_det_track_batch_N(imgarray_list, iframe_list,
model_param['modelList'],
model_param['postProcess'],
model_param['sort_tracker'],
model_param['trackPar'])
except ServiceException as s:
raise s
except Exception:
@@ -444,5 +430,13 @@ MODEL_CONFIG2 = {
lambda x, y, r, t, z, h: Model(x, y, r, ModelType2.CITYROAD_MODEL, t, z, h),
ModelType2.CITYROAD_MODEL,
lambda x, y, z: one_label(x, y, z),
lambda x: model_process(x))
lambda x: model_process(x)
),
# 加载坑槽模型
ModelType2.POTHOLE_MODEL.value[1]: (
lambda x, y, r, t, z, h: Model(x, y, r, ModelType2.POTHOLE_MODEL, t, z, h),
ModelType2.POTHOLE_MODEL,
lambda x, y, z: one_label(x, y, z),
lambda x: model_process(x)
),
}

+ 83
- 2
util/PlotsUtils.py Wyświetl plik

@@ -1,10 +1,16 @@
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import unicodedata
FONT_PATH = "../AIlib2/conf/platech.ttf"

zhFont = ImageFont.truetype(FONT_PATH, 20, encoding="utf-8")

def get_label_array(color=None, label=None, font=None, fontSize=40):
x, y, width, height = font.getbbox(label)
def get_label_array(color=None, label=None, font=None, fontSize=40, unify=False):
if unify:
x, y, width, height = font.getbbox("标") # 统一数组大小
else:
x, y, width, height = font.getbbox(label)
text_image = np.zeros((height, width, 3), dtype=np.uint8)
text_image = Image.fromarray(text_image)
draw = ImageDraw.Draw(text_image)
@@ -24,6 +30,25 @@ def get_label_arrays(labelNames, colors, fontSize=40, fontPath="platech.ttf"):
enumerate(labelNames)]
return label_arraylist

def get_label_array_dict(colors, fontSize=40, fontPath="platech.ttf"):
font = ImageFont.truetype(fontPath, fontSize, encoding='utf-8')
all_chinese_characters = []
for char in range(0x4E00, 0x9FFF + 1): # 中文
chinese_character = chr(char)
if unicodedata.category(chinese_character) == 'Lo':
all_chinese_characters.append(chinese_character)
for char in range(0x0041, 0x005B): # 大写字母
all_chinese_characters.append(chr(char))
for char in range(0x0061, 0x007B): # 小写字母
all_chinese_characters.append(chr(char))
for char in range(0x0030, 0x003A): # 数字
all_chinese_characters.append(chr(char))
zh_dict = {}
for code in all_chinese_characters:
arr = get_label_array(colors[2], code, font, fontSize, unify=True)
zh_dict[code] = arr
return zh_dict


def xywh2xyxy(box):
if not isinstance(box[0], (list, tuple, np.ndarray)):
@@ -138,6 +163,62 @@ def draw_painting_joint(box, img, label_array, score=0.5, color=None, config=Non
cv2.putText(img, label, p3, 0, config[3], [225, 255, 255], thickness=config[4], lineType=cv2.LINE_AA)
return img, box

# 动态标签
def draw_name_joint(box, img, label_array_dict, score=0.5, color=None, config=None, name=""):
label_array = None
for zh in name:
if zh in label_array_dict:
if label_array is None:
label_array = label_array_dict[zh]
else:
label_array = np.concatenate((label_array,label_array_dict[zh]), axis= 1)
# 识别问题描述图片的高、宽
if label_array is None:
lh, lw = 0, 0
else:
lh, lw = label_array.shape[0:2]
# 图片的长度和宽度
imh, imw = img.shape[0:2]
box = xywh2xyxy(box)
# 框框左上的位置
x0, y1 = box[0][0], box[0][1]
x1, y0 = x0 + lw, y1 - lh
# 如果y0小于0, 说明超过上边框
if y0 < 0:
y0 = 0
# y1等于文字高度
y1 = y0 + lh
# 如果y1框框的高大于图片高度
if y1 > imh:
# y1等于图片高度
y1 = imh
# y0等于y1减去文字高度
y0 = y1 - lh
# 如果x0小于0
if x0 < 0:
x0 = 0
x1 = x0 + lw
if x1 > imw:
x1 = imw
x0 = x1 - lw
tl = config[0]
box1 = np.asarray(box, np.int32)
cv2.polylines(img, [box1], True, color, tl)
if label_array is not None:
img[y0:y1, x0:x1, :] = label_array
pts_cls = [(x0, y0), (x1, y1)]
# 把英文字符score画到类别旁边
# tl = max(int(round(imw / 1920 * 3)), 1) or round(0.002 * (imh + imw) / 2) + 1
label = ' %.2f' % score
t_size = (config[1], config[2])
# if socre_location=='leftTop':
p1, p2 = (pts_cls[1][0], pts_cls[0][1]), (pts_cls[1][0] + t_size[0], pts_cls[1][1])
cv2.rectangle(img, p1, p2, color, -1, cv2.LINE_AA)
p3 = pts_cls[1][0], pts_cls[1][1] - (lh - t_size[1]) // 2
cv2.putText(img, label, p3, 0, config[3], [225, 255, 255], thickness=config[4], lineType=cv2.LINE_AA)
return img, box


def filterBox(det0, det1, pix_dis):
# det0为 (m1, 11) 矩阵
# det1为 (m2, 12) 矩阵

BIN
util/__pycache__/AliyunSdk.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/CpuUtils.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/Cv2Utils.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/FileUtils.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/GPUtils.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/ImageUtils.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/ImgBaiduSdk.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/KafkaUtils.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/LogUtils.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/ModelUtils.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/ModelUtils2.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/OcrBaiduSdk.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/PlotsUtils.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/PushStreamUtils.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/QueUtil.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/RWUtils.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/TimeUtils.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/TorchUtils.cpython-38.pyc Wyświetl plik


BIN
util/__pycache__/__init__.cpython-38.pyc Wyświetl plik


BIN
vodsdk/__pycache__/AliyunVodUploader.cpython-38.pyc Wyświetl plik


BIN
vodsdk/__pycache__/AliyunVodUtils.cpython-38.pyc Wyświetl plik


BIN
vodsdk/__pycache__/UploadVideoRequest.cpython-38.pyc Wyświetl plik


BIN
vodsdk/__pycache__/__init__.cpython-38.pyc Wyświetl plik


Ładowanie…
Anuluj
Zapisz