2025firstedition

This commit is contained in:
th 2025-01-21 09:26:34 +08:00
commit f47a35d790
351 changed files with 31012 additions and 0 deletions

0
__init__.py Normal file
View File

Binary file not shown.

443
common/Constant.py Normal file
View File

@ -0,0 +1,443 @@
# -*- coding: utf-8 -*-
# 编码格式
UTF_8 = "utf-8"
# 文件读模式
R = 'r'
ON_OR = "_on_or_"
ON_AI = "_on_ai_"
MP4 = ".mp4"
# 初始化进度
init_progess = "0.0000"
# 进度100%
success_progess = "1.0000"
# 拉流每帧图片缩小宽度大小限制, 大于1400像素缩小一半, 小于1400像素不变
width = 1400
COLOR = (
[0, 0, 255],
[255, 0, 0],
[211, 0, 148],
[0, 127, 0],
[0, 69, 255],
[0, 255, 0],
[255, 0, 255],
[0, 0, 127],
[127, 0, 255],
[255, 129, 0],
[139, 139, 0],
[255, 255, 0],
[127, 255, 0],
[0, 127, 255],
[0, 255, 127],
[255, 127, 255],
[8, 101, 139],
[171, 130, 255],
[139, 112, 74],
[205, 205, 180])
ONLINE = "online"
OFFLINE = "offline"
PHOTO = "photo"
RECORDING = "recording"
ONLINE_START_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["start"]
},
"pull_url": {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 255
},
"push_url": {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 255
},
"logo_url": {
'type': 'string',
'required': False,
'nullable': True,
'maxlength': 255
},
"models": {
'type': 'list',
'required': True,
'nullable': False,
'minlength': 1,
'maxlength': 3,
'schema': {
'type': 'dict',
'required': True,
'schema': {
"code": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "categories",
'regex': r'^[a-zA-Z0-9]{1,255}$'
},
"is_video": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "code",
'allowed': ["0", "1"]
},
"is_image": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "code",
'allowed': ["0", "1"]
},
"categories": {
'type': 'list',
'required': True,
'dependencies': "code",
'schema': {
'type': 'dict',
'required': True,
'schema': {
"id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{0,255}$'},
"config": {
'type': 'dict',
'required': False,
'dependencies': "id",
}
}
}
}
}
}
}
}
ONLINE_STOP_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["stop"]
}
}
OFFLINE_START_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["start"]
},
"push_url": {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 255
},
"pull_url": {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 255
},
"logo_url": {
'type': 'string',
'required': False,
'nullable': True,
'maxlength': 255
},
"models": {
'type': 'list',
'required': True,
'maxlength': 3,
'minlength': 1,
'schema': {
'type': 'dict',
'required': True,
'schema': {
"code": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "categories",
'regex': r'^[a-zA-Z0-9]{1,255}$'
},
"is_video": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "code",
'allowed': ["0", "1"]
},
"is_image": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "code",
'allowed': ["0", "1"]
},
"categories": {
'type': 'list',
'required': True,
'dependencies': "code",
'schema': {
'type': 'dict',
'required': True,
'schema': {
"id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{0,255}$'},
"config": {
'type': 'dict',
'required': False,
'dependencies': "id",
}
}
}
}
}
}
}
}
OFFLINE_STOP_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["stop"]
}
}
IMAGE_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["start"]
},
"logo_url": {
'type': 'string',
'required': False,
'nullable': True,
'maxlength': 255
},
"image_urls": {
'type': 'list',
'required': True,
'minlength': 1,
'schema': {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 5000
}
},
"models": {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'required': True,
'schema': {
"code": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "categories",
'regex': r'^[a-zA-Z0-9]{1,255}$'
},
"is_video": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "code",
'allowed': ["0", "1"]
},
"is_image": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "code",
'allowed': ["0", "1"]
},
"categories": {
'type': 'list',
'required': True,
'dependencies': "code",
'schema': {
'type': 'dict',
'required': True,
'schema': {
"id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{0,255}$'},
"config": {
'type': 'dict',
'required': False,
'dependencies': "id",
}
}
}
}
}
}
}
}
RECORDING_START_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["start"]
},
"pull_url": {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 255
},
"push_url": {
'type': 'string',
'required': False,
'empty': True,
'maxlength': 255
},
"logo_url": {
'type': 'string',
'required': False,
'nullable': True,
'maxlength': 255
}
}
RECORDING_STOP_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["stop"]
}
}
PULL2PUSH_START_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["start"]
},
"video_urls": {
'type': 'list',
'required': True,
'nullable': False,
'schema': {
'type': 'dict',
'required': True,
'schema': {
"id": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "pull_url",
'regex': r'^[a-zA-Z0-9]{1,255}$'
},
"pull_url": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "push_url",
'regex': r'^(https|http|rtsp|rtmp|artc|webrtc|ws)://\w.+$'
},
"push_url": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "id",
'regex': r'^(https|http|rtsp|rtmp|artc|webrtc|ws)://\w.+$'
}
}
}
}
}
PULL2PUSH_STOP_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["start", "stop"]
},
"video_ids": {
'type': 'list',
'required': False,
'nullable': True,
'schema': {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,255}$'
}
}
}

12
common/YmlConstant.py Normal file
View File

@ -0,0 +1,12 @@
# -*- coding: utf-8 -*-
# 服务配置路径
service_yml_path = 'config/service/dsp_%s_service.yml'
# kafka配置路径
kafka_yml_path = 'config/kafka/dsp_%s_kafka.yml'
# 阿里云配置路径
aliyun_yml_path = "config/aliyun/dsp_%s_aliyun.yml"
# 百度配置路径
baidu_yml_path = 'config/baidu/dsp_%s_baidu.yml'
# minio配置路径
minio_yml_path = 'config/minio/dsp_%s_minio.yml'

0
common/__init__.py Normal file
View File

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,23 @@
from threading import Thread
from loguru import logger
class Common(Thread):
__slots__ = ('__func', '__param1', '__param2', '__result')
def __init__(self, func, param1, param2):
super(Common, self).__init__()
self.__func = func
self.__param1 = param1
self.__param2 = param2
self.__result = None
def get_result(self):
self.join()
return self.__result
def run(self):
logger.info("开始执行线程!")
self.__result = self.__func(self.__param1, self.__param2)
logger.info("线程停止完成!")

View File

@ -0,0 +1,67 @@
# -*- coding: utf-8 -*-
import time
from threading import Thread
from traceback import format_exc
from loguru import logger
from util.KafkaUtils import CustomerKafkaProducer
'''
问题反馈线程
'''
class FeedbackThread(Thread):
__slots__ = ('__fbQueue', '__kafka_config')
def __init__(self, fbQueue, kafka_config):
super().__init__()
self.__fbQueue = fbQueue
self.__kafka_config = kafka_config
'''
阻塞获取反馈消息
'''
def getFeedback(self):
return self.__fbQueue.get()
def run(self):
logger.info("启动问题反馈线程")
kafkaProducer = CustomerKafkaProducer(self.__kafka_config)
dsp_alg_results_topic = self.__kafka_config["topic"]["dsp-alg-results-topic"]
dsp_recording_result_topic = self.__kafka_config["topic"]["dsp-recording-result-topic"]
dsp_push_stream_result_topic = self.__kafka_config["topic"]["dsp-push-stream-result-topic"]
while True:
logger.info("问题反馈发送消息循环")
feedback = None
recording = None
pull_stream = None
try:
fb = self.getFeedback()
if fb is not None and len(fb) > 0:
feedback = fb.get("feedback")
recording = fb.get("recording")
pull_stream = fb.get("pull_stream")
if feedback is not None and len(feedback) > 0:
kafkaProducer.sender(dsp_alg_results_topic, feedback["request_id"], feedback, 1)
feedback = None
if recording is not None and len(recording) > 0:
kafkaProducer.sender(dsp_recording_result_topic, recording["request_id"], recording, 1)
recording = None
if pull_stream is not None and len(pull_stream) > 0:
kafkaProducer.sender(dsp_push_stream_result_topic, pull_stream["request_id"], pull_stream, 1)
pull_stream = None
else:
time.sleep(1)
except Exception:
if feedback and feedback.get("request_id"):
logger.error("问题反馈异常:{}, requestId:{}", format_exc(), feedback.get("request_id"))
elif recording and recording.get("request_id"):
logger.error("问题反馈异常:{}, requestId:{}", format_exc(), recording.get("request_id"))
elif pull_stream and pull_stream.get("request_id"):
logger.error("问题反馈异常:{}, requestId:{}", format_exc(), pull_stream.get("request_id"))
else:
logger.error("问题反馈异常:{}", format_exc())
logger.info("问题反馈线程执行完成")

View File

@ -0,0 +1,360 @@
# -*- coding: utf-8 -*-
from concurrent.futures import ThreadPoolExecutor
from threading import Thread
from time import sleep, time
from traceback import format_exc
from loguru import logger
import cv2
from entity.FeedBack import message_feedback
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util.AliyunSdk import AliyunOssSdk
from util.MinioSdk import MinioSdk
from util import TimeUtils
from enums.AnalysisStatusEnum import AnalysisStatus
from util.PlotsUtils import draw_painting_joint
from util.QueUtil import put_queue, get_no_block_queue, clear_queue
import io
class FileUpload(Thread):
__slots__ = ('_fb_queue', '_context', '_image_queue', '_analyse_type', '_msg')
def __init__(self, *args):
super().__init__()
self._fb_queue, self._context, self._msg, self._image_queue, self._analyse_type = args
self._storage_source = self._context['service']['storage_source']
class ImageFileUpload(FileUpload):
__slots__ = ()
@staticmethod
def handle_image(frame_msg, frame_step):
# (high_score_image["code"], all_frames, draw_config["font_config"])
# high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
det_xywh, frame, current_frame, all_frames, font_config = frame_msg
'''
det_xywh:{
'code':{
1: [[detect_targets_code, box, score, label_array, color]]
}
}
模型编号modeCode
检测目标detectTargetCode
'''
model_info = []
# 更加模型编码解析数据
for code, det_list in det_xywh.items():
if len(det_list) > 0:
for cls, target_list in det_list.items():
if len(target_list) > 0:
aFrame = frame.copy()
for target in target_list:
draw_painting_joint(target[1], aFrame, target[3], target[2], target[4], font_config, target[5])
model_info.append({"modelCode": str(code), "detectTargetCode": str(cls), "aFrame": aFrame})
if len(model_info) > 0:
image_result = {
"or_frame": frame,
"model_info": model_info,
"current_frame": current_frame,
"last_frame": current_frame + frame_step
}
return image_result
return None
def run(self):
msg, context = self._msg, self._context
service = context["service"]
base_dir, env, request_id = context["base_dir"], context["env"], msg["request_id"]
logger.info("启动图片上传线程, requestId: {}", request_id)
image_queue, fb_queue, analyse_type = self._image_queue, self._fb_queue, self._analyse_type
service_timeout = int(service["timeout"])
frame_step = int(service["filter"]["frame_step"]) + 120
try:
with ThreadPoolExecutor(max_workers=2) as t:
# 初始化oss客户端
if self._storage_source==1:
minioSdk = MinioSdk(base_dir, env, request_id )
else:
aliyunOssSdk = AliyunOssSdk(base_dir, env, request_id)
start_time = time()
while True:
try:
if time() - start_time > service_timeout:
logger.error("图片上传线程运行超时, requestId: {}", request_id)
break
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
# 获取队列中的消息
image_msg = get_no_block_queue(image_queue)
if image_msg is not None:
if image_msg[0] == 2:
if 'stop' == image_msg[1]:
logger.info("开始停止图片上传线程, requestId:{}", request_id)
break
if image_msg[0] == 1:
image_result = self.handle_image(image_msg[1], frame_step)
if image_result is not None:
task = []
or_image = cv2.imencode(".jpg", image_result["or_frame"])[1]
or_image_name = build_image_name(image_result["current_frame"],
image_result["last_frame"],
analyse_type,
"OR", "0", "0", request_id)
if self._storage_source==1:
or_future = t.submit(minioSdk.put_object, or_image,or_image_name)
else:
or_future = t.submit(aliyunOssSdk.put_object, or_image_name, or_image.tobytes())
task.append(or_future)
model_info_list = image_result["model_info"]
msg_list = []
for model_info in model_info_list:
ai_image = cv2.imencode(".jpg", model_info["aFrame"])[1]
ai_image_name = build_image_name(image_result["current_frame"],
image_result["last_frame"],
analyse_type,
"AI",
model_info["modelCode"],
model_info["detectTargetCode"],
request_id)
if self._storage_source==1:
ai_future = t.submit(minioSdk.put_object, ai_image,
ai_image_name)
else:
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name,
ai_image.tobytes())
task.append(ai_future)
#msg_list.append(message_feedback(request_id,
# AnalysisStatus.RUNNING.value,
# analyse_type, "", "", "",
# or_image_name,
# ai_image_name,
# model_info['modelCode'],
# model_info['detectTargetCode']))
remote_image_list=[]
for tk in task:
remote_image_list.append( tk.result())
for ii,model_info in enumerate(model_info_list):
msg_list.append( message_feedback(request_id,
AnalysisStatus.RUNNING.value,
analyse_type, "", "", "",
remote_image_list[0],
remote_image_list[ii+1],
model_info['modelCode'],
model_info['detectTargetCode']) )
for msg in msg_list:
put_queue(fb_queue, msg, timeout=2, is_ex=False)
del task, msg_list
else:
sleep(1)
del image_msg
except Exception:
logger.error("图片上传异常:{}, requestId:{}", format_exc(), request_id)
finally:
logger.info("停止图片上传线程0, requestId:{}", request_id)
clear_queue(image_queue)
logger.info("停止图片上传线程1, requestId:{}", request_id)
def build_image_name(*args):
"""
{requestId}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}" \
"-{modeCode}-{target}_{image_type}.jpg
"""
current_frame, last_frame, mode_type, image_type, modeCode, target, request_id = args
random_num = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF)
time_now = TimeUtils.now_date_to_str("%Y-%m-%d-%H-%M-%S")
return "%s/%s_frame-%s-%s_type_%s-%s-%s-%s_%s.jpg" % (request_id, time_now, current_frame, last_frame,
random_num, mode_type, modeCode, target, image_type)
class ImageTypeImageFileUpload(Thread):
__slots__ = ('_fb_queue', '_context', '_image_queue', '_analyse_type', '_msg')
def __init__(self, *args):
super().__init__()
self._fb_queue, self._context, self._msg, self._image_queue, self._analyse_type = args
self._storage_source = self._context['service']['storage_source']
@staticmethod
def handle_image(det_xywh, copy_frame, font_config):
"""
det_xywh:{
'code':{
1: [[detect_targets_code, box, score, label_array, color]]
}
}
模型编号modeCode
检测目标detectTargetCode
"""
model_info = []
# 更加模型编码解析数据
for code, det_info in det_xywh.items():
if det_info is not None and len(det_info) > 0:
for cls, target_list in det_info.items():
if target_list is not None and len(target_list) > 0:
aiFrame = copy_frame.copy()
for target in target_list:
draw_painting_joint(target[1], aiFrame, target[3], target[2], target[4], font_config)
model_info.append({
"modelCode": str(code),
"detectTargetCode": str(cls),
"frame": aiFrame
})
if len(model_info) > 0:
image_result = {
"or_frame": copy_frame,
"model_info": model_info,
"current_frame": 0,
"last_frame": 0
}
return image_result
return None
def run(self):
context, msg = self._context, self._msg
base_dir, env, request_id = context["base_dir"], context["env"], msg["request_id"]
logger.info("启动图片识别图片上传线程, requestId: {}", request_id)
image_queue, fb_queue, analyse_type = self._image_queue, self._fb_queue, self._analyse_type
service_timeout = int(context["service"]["timeout"])
with ThreadPoolExecutor(max_workers=2) as t:
try:
# 初始化oss客户端
if self._storage_source==1:
minioSdk = MinioSdk(base_dir, env, request_id )
else:
aliyunOssSdk = AliyunOssSdk(base_dir, env, request_id)
start_time = time()
while True:
try:
if time() - start_time > service_timeout:
logger.error("图片上传进程运行超时, requestId: {}", request_id)
break
# 获取队列中的消息
image_msg = image_queue.get()
if image_msg is not None:
if image_msg[0] == 2:
if 'stop' == image_msg[1]:
logger.info("开始停止图片上传线程, requestId:{}", request_id)
break
if image_msg[0] == 1:
task, msg_list = [], []
det_xywh, image_url, copy_frame, font_config, result = image_msg[1]
remote_names = []
if det_xywh is None:
ai_image_name = build_image_name(0, 0, analyse_type, "AI", result.get("modelCode"),
result.get("type"), request_id)
if self._storage_source==1:
ai_future = t.submit(minioSdk.put_object, copy_frame,ai_image_name)
else:
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name, copy_frame)
task.append(ai_future)
remote_names.append(ai_image_name)
#msg_list.append(message_feedback(request_id,
# AnalysisStatus.RUNNING.value,
# analyse_type, "", "", "",
# image_url,
# ai_image_name,
# result.get("modelCode"),
# result.get("type"),
# analyse_results=result))
else:
image_result = self.handle_image(det_xywh, copy_frame, font_config)
if image_result:
# 图片帧数编码
if image_url is None:
or_result, or_image = cv2.imencode(".jpg", image_result.get("or_frame"))
image_url_0 = build_image_name(image_result.get("current_frame"),
image_result.get("last_frame"),
analyse_type,
"OR", "0", "O", request_id)
if self._storage_source==1:
or_future = t.submit(minioSdk.put_object, or_image,image_url_0)
else:
or_future = t.submit(aliyunOssSdk.put_object, image_url_0,
or_image.tobytes())
task.append(or_future)
remote_names.append(image_url_0)
model_info_list = image_result.get("model_info")
for model_info in model_info_list:
ai_result, ai_image = cv2.imencode(".jpg", model_info.get("frame"))
ai_image_name = build_image_name(image_result.get("current_frame"),
image_result.get("last_frame"),
analyse_type,
"AI",
model_info.get("modelCode"),
model_info.get("detectTargetCode"),
request_id)
if self._storage_source==1:
ai_future = t.submit(minioSdk.put_object, ai_image, ai_image_name)
else:
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name,
ai_image.tobytes())
task.append(ai_future)
remote_names.append(ai_image_name)
#msg_list.append(message_feedback(request_id,
# AnalysisStatus.RUNNING.value,
# analyse_type, "", "", "",
# image_url,
# ai_image_name,
# model_info.get('modelCode'),
# model_info.get('detectTargetCode'),
# analyse_results=result))
remote_url_list = []
for thread_result in task:
remote_url_list.append(thread_result.result())
#以下代码是为了获取图像上传后,返回的全路径地址
if det_xywh is None:
msg_list.append(message_feedback(request_id,
AnalysisStatus.RUNNING.value,
analyse_type, "", "", "",
image_url,
remote_url_list[0],
result.get("modelCode"),
result.get("type"),
analyse_results=result))
else:
if image_result:
if image_url is None:
for ii in range(len(remote_names)-1):
msg_list.append(message_feedback(request_id,
AnalysisStatus.RUNNING.value,
analyse_type, "", "", "",
remote_url_list[0],
remote_url_list[1+ii],
model_info.get('modelCode'),
model_info.get('detectTargetCode'),
analyse_results=result))
else:
for ii in range(len(remote_names)):
msg_list.append(message_feedback(request_id,
AnalysisStatus.RUNNING.value,
analyse_type, "", "", "",
image_url,
remote_url_list[ii],
model_info.get('modelCode'),
model_info.get('detectTargetCode'),
analyse_results=result))
for msg in msg_list:
put_queue(fb_queue, msg, timeout=2, is_ex=False)
else:
sleep(1)
except Exception as e:
logger.error("图片上传异常:{}, requestId:{}", format_exc(), request_id)
finally:
clear_queue(image_queue)
logger.info("停止图片识别图片上传线程, requestId:{}", request_id)

View File

@ -0,0 +1,305 @@
# -*- coding: utf-8 -*-
from concurrent.futures import ThreadPoolExecutor
from threading import Thread
from time import sleep, time
from traceback import format_exc
from loguru import logger
import cv2
from entity.FeedBack import message_feedback
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util.AliyunSdk import AliyunOssSdk
from util.MinioSdk import MinioSdk
from util import TimeUtils
from enums.AnalysisStatusEnum import AnalysisStatus
from util.PlotsUtils import draw_painting_joint
from util.QueUtil import put_queue, get_no_block_queue, clear_queue
import io
class FileUpload(Thread):
__slots__ = ('_fb_queue', '_context', '_image_queue', '_analyse_type', '_msg')
def __init__(self, *args):
super().__init__()
self._fb_queue, self._context, self._msg, self._image_queue, self._analyse_type = args
self._storage_source = self._context['service']['storage_source']
class ImageFileUpload(FileUpload):
__slots__ = ()
@staticmethod
def handle_image(frame_msg, frame_step):
# (high_score_image["code"], all_frames, draw_config["font_config"])
# high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
det_xywh, frame, current_frame, all_frames, font_config = frame_msg
'''
det_xywh:{
'code':{
1: [[detect_targets_code, box, score, label_array, color]]
}
}
模型编号modeCode
检测目标detectTargetCode
'''
model_info = []
# 更加模型编码解析数据
for code, det_list in det_xywh.items():
if len(det_list) > 0:
for cls, target_list in det_list.items():
if len(target_list) > 0:
aFrame = frame.copy()
for target in target_list:
draw_painting_joint(target[1], aFrame, target[3], target[2], target[4], font_config, target[5])
model_info.append({"modelCode": str(code), "detectTargetCode": str(cls), "aFrame": aFrame})
if len(model_info) > 0:
image_result = {
"or_frame": frame,
"model_info": model_info,
"current_frame": current_frame,
"last_frame": current_frame + frame_step
}
return image_result
return None
def run(self):
msg, context = self._msg, self._context
service = context["service"]
base_dir, env, request_id = context["base_dir"], context["env"], msg["request_id"]
logger.info("启动图片上传线程, requestId: {}", request_id)
image_queue, fb_queue, analyse_type = self._image_queue, self._fb_queue, self._analyse_type
service_timeout = int(service["timeout"])
frame_step = int(service["filter"]["frame_step"]) + 120
try:
with ThreadPoolExecutor(max_workers=2) as t:
# 初始化oss客户端
if self._storage_source==1:
minioSdk = MinioSdk(base_dir, env, request_id )
else:
aliyunOssSdk = AliyunOssSdk(base_dir, env, request_id)
start_time = time()
while True:
try:
if time() - start_time > service_timeout:
logger.error("图片上传线程运行超时, requestId: {}", request_id)
break
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
# 获取队列中的消息
image_msg = get_no_block_queue(image_queue)
if image_msg is not None:
if image_msg[0] == 2:
if 'stop' == image_msg[1]:
logger.info("开始停止图片上传线程, requestId:{}", request_id)
break
if image_msg[0] == 1:
image_result = self.handle_image(image_msg[1], frame_step)
if image_result is not None:
task = []
or_image = cv2.imencode(".jpg", image_result["or_frame"])[1]
or_image_name = build_image_name(image_result["current_frame"],
image_result["last_frame"],
analyse_type,
"OR", "0", "0", request_id)
if self._storage_source==1:
or_future = t.submit(minioSdk.put_object, or_image,or_image_name)
else:
or_future = t.submit(aliyunOssSdk.put_object, or_image_name, or_image.tobytes())
task.append(or_future)
model_info_list = image_result["model_info"]
msg_list = []
for model_info in model_info_list:
ai_image = cv2.imencode(".jpg", model_info["aFrame"])[1]
ai_image_name = build_image_name(image_result["current_frame"],
image_result["last_frame"],
analyse_type,
"AI",
model_info["modelCode"],
model_info["detectTargetCode"],
request_id)
if self._storage_source==1:
ai_future = t.submit(minioSdk.put_object, ai_image,
ai_image_name)
else:
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name,
ai_image.tobytes())
task.append(ai_future)
msg_list.append(message_feedback(request_id,
AnalysisStatus.RUNNING.value,
analyse_type, "", "", "",
or_image_name,
ai_image_name,
model_info['modelCode'],
model_info['detectTargetCode']))
for tk in task:
tk.result()
for msg in msg_list:
put_queue(fb_queue, msg, timeout=2, is_ex=False)
del task, msg_list
else:
sleep(1)
del image_msg
except Exception:
logger.error("图片上传异常:{}, requestId:{}", format_exc(), request_id)
finally:
logger.info("停止图片上传线程0, requestId:{}", request_id)
clear_queue(image_queue)
logger.info("停止图片上传线程1, requestId:{}", request_id)
def build_image_name(*args):
"""
{requestId}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}" \
"-{modeCode}-{target}_{image_type}.jpg
"""
current_frame, last_frame, mode_type, image_type, modeCode, target, request_id = args
random_num = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF)
time_now = TimeUtils.now_date_to_str("%Y-%m-%d-%H-%M-%S")
return "%s/%s_frame-%s-%s_type_%s-%s-%s-%s_%s.jpg" % (request_id, time_now, current_frame, last_frame,
random_num, mode_type, modeCode, target, image_type)
class ImageTypeImageFileUpload(Thread):
__slots__ = ('_fb_queue', '_context', '_image_queue', '_analyse_type', '_msg')
def __init__(self, *args):
super().__init__()
self._fb_queue, self._context, self._msg, self._image_queue, self._analyse_type = args
self._storage_source = self._context['service']['storage_source']
@staticmethod
def handle_image(det_xywh, copy_frame, font_config):
"""
det_xywh:{
'code':{
1: [[detect_targets_code, box, score, label_array, color]]
}
}
模型编号modeCode
检测目标detectTargetCode
"""
model_info = []
# 更加模型编码解析数据
for code, det_info in det_xywh.items():
if det_info is not None and len(det_info) > 0:
for cls, target_list in det_info.items():
if target_list is not None and len(target_list) > 0:
aiFrame = copy_frame.copy()
for target in target_list:
draw_painting_joint(target[1], aiFrame, target[3], target[2], target[4], font_config)
model_info.append({
"modelCode": str(code),
"detectTargetCode": str(cls),
"frame": aiFrame
})
if len(model_info) > 0:
image_result = {
"or_frame": copy_frame,
"model_info": model_info,
"current_frame": 0,
"last_frame": 0
}
return image_result
return None
def run(self):
context, msg = self._context, self._msg
base_dir, env, request_id = context["base_dir"], context["env"], msg["request_id"]
logger.info("启动图片识别图片上传线程, requestId: {}", request_id)
image_queue, fb_queue, analyse_type = self._image_queue, self._fb_queue, self._analyse_type
service_timeout = int(context["service"]["timeout"])
with ThreadPoolExecutor(max_workers=2) as t:
try:
# 初始化oss客户端
if self._storage_source==1:
minioSdk = MinioSdk(base_dir, env, request_id )
else:
aliyunOssSdk = AliyunOssSdk(base_dir, env, request_id)
start_time = time()
while True:
try:
if time() - start_time > service_timeout:
logger.error("图片上传进程运行超时, requestId: {}", request_id)
break
# 获取队列中的消息
image_msg = image_queue.get()
if image_msg is not None:
if image_msg[0] == 2:
if 'stop' == image_msg[1]:
logger.info("开始停止图片上传线程, requestId:{}", request_id)
break
if image_msg[0] == 1:
task, msg_list = [], []
det_xywh, image_url, copy_frame, font_config, result = image_msg[1]
if det_xywh is None:
ai_image_name = build_image_name(0, 0, analyse_type, "AI", result.get("modelCode"),
result.get("type"), request_id)
if self._storage_source==1:
ai_future = t.submit(minioSdk.put_object, copy_frame,ai_image_name)
else:
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name, copy_frame)
task.append(ai_future)
msg_list.append(message_feedback(request_id,
AnalysisStatus.RUNNING.value,
analyse_type, "", "", "",
image_url,
ai_image_name,
result.get("modelCode"),
result.get("type"),
analyse_results=result))
else:
image_result = self.handle_image(det_xywh, copy_frame, font_config)
if image_result:
# 图片帧数编码
if image_url is None:
or_result, or_image = cv2.imencode(".jpg", image_result.get("or_frame"))
image_url = build_image_name(image_result.get("current_frame"),
image_result.get("last_frame"),
analyse_type,
"OR", "0", "O", request_id)
if self._storage_source==1:
or_future = t.submit(minioSdk.put_object, or_image,image_url)
else:
or_future = t.submit(aliyunOssSdk.put_object, image_url,
or_image.tobytes())
task.append(or_future)
model_info_list = image_result.get("model_info")
for model_info in model_info_list:
ai_result, ai_image = cv2.imencode(".jpg", model_info.get("frame"))
ai_image_name = build_image_name(image_result.get("current_frame"),
image_result.get("last_frame"),
analyse_type,
"AI",
model_info.get("modelCode"),
model_info.get("detectTargetCode"),
request_id)
if self._storage_source==1:
ai_future = t.submit(minioSdk.put_object, ai_image, ai_image_name)
else:
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name,
ai_image.tobytes())
task.append(ai_future)
msg_list.append(message_feedback(request_id,
AnalysisStatus.RUNNING.value,
analyse_type, "", "", "",
image_url,
ai_image_name,
model_info.get('modelCode'),
model_info.get('detectTargetCode'),
analyse_results=result))
for thread_result in task:
thread_result.result()
for msg in msg_list:
put_queue(fb_queue, msg, timeout=2, is_ex=False)
else:
sleep(1)
except Exception as e:
logger.error("图片上传异常:{}, requestId:{}", format_exc(), request_id)
finally:
clear_queue(image_queue)
logger.info("停止图片识别图片上传线程, requestId:{}", request_id)

View File

@ -0,0 +1,315 @@
# -*- coding: utf-8 -*-
from concurrent.futures import ThreadPoolExecutor
from threading import Thread
from time import sleep, time
from traceback import format_exc
from loguru import logger
import cv2
from entity.FeedBack import message_feedback
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util.AliyunSdk import AliyunOssSdk
from util.MinioSdk import MinioSdk
from util import TimeUtils
from enums.AnalysisStatusEnum import AnalysisStatus
from util.PlotsUtils import draw_painting_joint
from util.QueUtil import put_queue, get_no_block_queue, clear_queue
import io
class FileUpload(Thread):
__slots__ = ('_fb_queue', '_context', '_image_queue', '_analyse_type', '_msg')
def __init__(self, *args):
super().__init__()
self._fb_queue, self._context, self._msg, self._image_queue, self._analyse_type = args
self._storage_source = self._context['service']['storage_source']
class ImageFileUpload(FileUpload):
__slots__ = ()
@staticmethod
def handle_image(frame_msg, frame_step):
# (high_score_image["code"], all_frames, draw_config["font_config"])
# high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list)
det_xywh, frame, current_frame, all_frames, font_config = frame_msg
'''
det_xywh:{
'code':{
1: [[detect_targets_code, box, score, label_array, color]]
}
}
模型编号modeCode
检测目标detectTargetCode
'''
model_info = []
# 更加模型编码解析数据
for code, det_list in det_xywh.items():
if len(det_list) > 0:
for cls, target_list in det_list.items():
if len(target_list) > 0:
aFrame = frame.copy()
for target in target_list:
draw_painting_joint(target[1], aFrame, target[3], target[2], target[4], font_config, target[5])
model_info.append({"modelCode": str(code), "detectTargetCode": str(cls), "aFrame": aFrame})
if len(model_info) > 0:
image_result = {
"or_frame": frame,
"model_info": model_info,
"current_frame": current_frame,
"last_frame": current_frame + frame_step
}
return image_result
return None
def run(self):
msg, context = self._msg, self._context
service = context["service"]
base_dir, env, request_id = context["base_dir"], context["env"], msg["request_id"]
logger.info("启动图片上传线程, requestId: {}", request_id)
image_queue, fb_queue, analyse_type = self._image_queue, self._fb_queue, self._analyse_type
service_timeout = int(service["timeout"])
frame_step = int(service["filter"]["frame_step"]) + 120
try:
with ThreadPoolExecutor(max_workers=2) as t:
# 初始化oss客户端
if self._storage_source==1:
minioSdk = MinioSdk(base_dir, env, request_id )
else:
aliyunOssSdk = AliyunOssSdk(base_dir, env, request_id)
start_time = time()
while True:
try:
if time() - start_time > service_timeout:
logger.error("图片上传线程运行超时, requestId: {}", request_id)
break
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
# 获取队列中的消息
image_msg = get_no_block_queue(image_queue)
if image_msg is not None:
if image_msg[0] == 2:
if 'stop' == image_msg[1]:
logger.info("开始停止图片上传线程, requestId:{}", request_id)
break
if image_msg[0] == 1:
image_result = self.handle_image(image_msg[1], frame_step)
if image_result is not None:
task = []
or_image = cv2.imencode(".jpg", image_result["or_frame"])[1]
or_image_name = build_image_name(image_result["current_frame"],
image_result["last_frame"],
analyse_type,
"OR", "0", "0", request_id)
if self._storage_source==1:
or_future = t.submit(minioSdk.put_object, or_image,or_image_name)
else:
or_future = t.submit(aliyunOssSdk.put_object, or_image_name, or_image.tobytes())
task.append(or_future)
model_info_list = image_result["model_info"]
msg_list = []
for model_info in model_info_list:
ai_image = cv2.imencode(".jpg", model_info["aFrame"])[1]
ai_image_name = build_image_name(image_result["current_frame"],
image_result["last_frame"],
analyse_type,
"AI",
model_info["modelCode"],
model_info["detectTargetCode"],
request_id)
if self._storage_source==1:
ai_future = t.submit(minioSdk.put_object, ai_image,
ai_image_name)
else:
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name,
ai_image.tobytes())
task.append(ai_future)
#msg_list.append(message_feedback(request_id,
# AnalysisStatus.RUNNING.value,
# analyse_type, "", "", "",
# or_image_name,
# ai_image_name,
# model_info['modelCode'],
# model_info['detectTargetCode']))
remote_image_list=[]
for tk in task:
remote_image_list.append( tk.result())
for ii,model_info in enumerate(model_info_list):
msg_list.append( message_feedback(request_id,
AnalysisStatus.RUNNING.value,
analyse_type, "", "", "",
remote_image_list[0],
remote_image_list[ii+1],
model_info['modelCode'],
model_info['detectTargetCode']) )
for msg in msg_list:
put_queue(fb_queue, msg, timeout=2, is_ex=False)
del task, msg_list
else:
sleep(1)
del image_msg
except Exception:
logger.error("图片上传异常:{}, requestId:{}", format_exc(), request_id)
finally:
logger.info("停止图片上传线程0, requestId:{}", request_id)
clear_queue(image_queue)
logger.info("停止图片上传线程1, requestId:{}", request_id)
def build_image_name(*args):
"""
{requestId}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}" \
"-{modeCode}-{target}_{image_type}.jpg
"""
current_frame, last_frame, mode_type, image_type, modeCode, target, request_id = args
random_num = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF)
time_now = TimeUtils.now_date_to_str("%Y-%m-%d-%H-%M-%S")
return "%s/%s_frame-%s-%s_type_%s-%s-%s-%s_%s.jpg" % (request_id, time_now, current_frame, last_frame,
random_num, mode_type, modeCode, target, image_type)
class ImageTypeImageFileUpload(Thread):
__slots__ = ('_fb_queue', '_context', '_image_queue', '_analyse_type', '_msg')
def __init__(self, *args):
super().__init__()
self._fb_queue, self._context, self._msg, self._image_queue, self._analyse_type = args
self._storage_source = self._context['service']['storage_source']
@staticmethod
def handle_image(det_xywh, copy_frame, font_config):
"""
det_xywh:{
'code':{
1: [[detect_targets_code, box, score, label_array, color]]
}
}
模型编号modeCode
检测目标detectTargetCode
"""
model_info = []
# 更加模型编码解析数据
for code, det_info in det_xywh.items():
if det_info is not None and len(det_info) > 0:
for cls, target_list in det_info.items():
if target_list is not None and len(target_list) > 0:
aiFrame = copy_frame.copy()
for target in target_list:
draw_painting_joint(target[1], aiFrame, target[3], target[2], target[4], font_config)
model_info.append({
"modelCode": str(code),
"detectTargetCode": str(cls),
"frame": aiFrame
})
if len(model_info) > 0:
image_result = {
"or_frame": copy_frame,
"model_info": model_info,
"current_frame": 0,
"last_frame": 0
}
return image_result
return None
def run(self):
context, msg = self._context, self._msg
base_dir, env, request_id = context["base_dir"], context["env"], msg["request_id"]
logger.info("启动图片识别图片上传线程, requestId: {}", request_id)
image_queue, fb_queue, analyse_type = self._image_queue, self._fb_queue, self._analyse_type
service_timeout = int(context["service"]["timeout"])
with ThreadPoolExecutor(max_workers=2) as t:
try:
# 初始化oss客户端
if self._storage_source==1:
minioSdk = MinioSdk(base_dir, env, request_id )
else:
aliyunOssSdk = AliyunOssSdk(base_dir, env, request_id)
start_time = time()
while True:
try:
if time() - start_time > service_timeout:
logger.error("图片上传进程运行超时, requestId: {}", request_id)
break
# 获取队列中的消息
image_msg = image_queue.get()
if image_msg is not None:
if image_msg[0] == 2:
if 'stop' == image_msg[1]:
logger.info("开始停止图片上传线程, requestId:{}", request_id)
break
if image_msg[0] == 1:
task, msg_list = [], []
det_xywh, image_url, copy_frame, font_config, result = image_msg[1]
if det_xywh is None:
ai_image_name = build_image_name(0, 0, analyse_type, "AI", result.get("modelCode"),
result.get("type"), request_id)
if self._storage_source==1:
ai_future = t.submit(minioSdk.put_object, copy_frame,ai_image_name)
else:
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name, copy_frame)
task.append(ai_future)
msg_list.append(message_feedback(request_id,
AnalysisStatus.RUNNING.value,
analyse_type, "", "", "",
image_url,
ai_image_name,
result.get("modelCode"),
result.get("type"),
analyse_results=result))
else:
image_result = self.handle_image(det_xywh, copy_frame, font_config)
if image_result:
# 图片帧数编码
if image_url is None:
or_result, or_image = cv2.imencode(".jpg", image_result.get("or_frame"))
image_url = build_image_name(image_result.get("current_frame"),
image_result.get("last_frame"),
analyse_type,
"OR", "0", "O", request_id)
if self._storage_source==1:
or_future = t.submit(minioSdk.put_object, or_image,image_url)
else:
or_future = t.submit(aliyunOssSdk.put_object, image_url,
or_image.tobytes())
task.append(or_future)
model_info_list = image_result.get("model_info")
for model_info in model_info_list:
ai_result, ai_image = cv2.imencode(".jpg", model_info.get("frame"))
ai_image_name = build_image_name(image_result.get("current_frame"),
image_result.get("last_frame"),
analyse_type,
"AI",
model_info.get("modelCode"),
model_info.get("detectTargetCode"),
request_id)
if self._storage_source==1:
ai_future = t.submit(minioSdk.put_object, ai_image, ai_image_name)
else:
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name,
ai_image.tobytes())
task.append(ai_future)
msg_list.append(message_feedback(request_id,
AnalysisStatus.RUNNING.value,
analyse_type, "", "", "",
image_url,
ai_image_name,
model_info.get('modelCode'),
model_info.get('detectTargetCode'),
analyse_results=result))
for thread_result in task:
thread_result.result()
for msg in msg_list:
put_queue(fb_queue, msg, timeout=2, is_ex=False)
else:
sleep(1)
except Exception as e:
logger.error("图片上传异常:{}, requestId:{}", format_exc(), request_id)
finally:
clear_queue(image_queue)
logger.info("停止图片识别图片上传线程, requestId:{}", request_id)

View File

@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-
from threading import Thread
from time import sleep, time
from traceback import format_exc
from loguru import logger
from common.Constant import init_progess
from enums.AnalysisStatusEnum import AnalysisStatus
from entity.FeedBack import message_feedback
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util.QueUtil import get_no_block_queue, put_queue, clear_queue
class Heartbeat(Thread):
__slots__ = ('__fb_queue', '__hb_queue', '__request_id', '__analyse_type', "_context")
def __init__(self, *args):
super().__init__()
self.__fb_queue, self.__hb_queue, self.__request_id, self.__analyse_type, self._context = args
def run(self):
request_id, hb_queue, progress = self.__request_id, self.__hb_queue, init_progess
analyse_type, fb_queue = self.__analyse_type, self.__fb_queue
service_timeout = int(self._context["service"]["timeout"]) + 120
try:
logger.info("开始启动心跳线程requestId:{}", request_id)
start_time = time()
hb_init_num = 0
while True:
if time() - start_time > service_timeout:
logger.error("心跳运行超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
sleep(3)
hb_msg = get_no_block_queue(hb_queue)
if hb_msg is not None:
command = hb_msg.get("command")
hb_value = hb_msg.get("hb_value")
if 'stop' == command:
logger.info("开始终止心跳线程, requestId:{}", request_id)
break
if hb_value is not None:
progress = hb_value
if hb_init_num % 30 == 0:
hb_init_num = 0
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.RUNNING.value, analyse_type,
progress=progress), timeout=3, is_ex=True)
hb_init_num += 3
del hb_msg
except Exception:
logger.error("心跳线程异常:{}, requestId:{}", format_exc(), request_id)
finally:
clear_queue(hb_queue)
logger.info("心跳线程停止完成requestId:{}", request_id)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,153 @@
# -*- coding: utf-8 -*-
import time
from traceback import format_exc
from multiprocessing import Process, Queue
from loguru import logger
from concurrency.Pull2PushStreamThread import PushSteamThread
from enums.StatusEnum import PushStreamStatus, ExecuteStatus
from util.LogUtils import init_log
from enums.ExceptionEnum import ExceptionType
from entity.FeedBack import pull_stream_feedback
from exception.CustomerException import ServiceException
from util.QueUtil import get_no_block_queue, put_queue
class PushStreamProcess(Process):
__slots__ = ('_fb_queue', 'event_queue', '_context', '_msg', '_analysisType')
def __init__(self, *args):
super().__init__()
self._fb_queue, self._context, self._msg, self._analysisType = args
self.event_queue = Queue()
def sendEvent(self, eBody):
try:
self.event_queue.put(eBody, timeout=2)
except Exception:
logger.error("添加事件到队列超时异常:{}, requestId:{}", format_exc(), self._msg["request_id"])
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
def run(self):
msg, context = self._msg, self._context
requestId, videoUrls = msg["request_id"], msg["video_urls"]
base_dir, env = context['base_dir'], context['env']
fb_queue = self._fb_queue
task, videoStatus = {}, {}
ex = None
try:
init_log(base_dir, env)
if videoUrls is None or len(videoUrls) == 0:
raise ServiceException(ExceptionType.PUSH_STREAM_URL_IS_NULL.value[0],
ExceptionType.PUSH_STREAM_URL_IS_NULL.value[1])
if len(videoUrls) > 5:
logger.error("推流数量超过限制, 当前推流数量: {}, requestId:{}", len(videoUrls), requestId)
raise ServiceException(ExceptionType.PULL_STREAM_NUM_LIMIT_EXCEPTION.value[0],
ExceptionType.PULL_STREAM_NUM_LIMIT_EXCEPTION.value[1])
videoInfo = [{"id": url["id"], "status": PushStreamStatus.WAITING.value[0]} for url in videoUrls if
url.get("id")]
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.WAITING.value[0], "", "", videoInfo))
for videoUrl in videoUrls:
pushThread = PushSteamThread(videoUrl["pull_url"], videoUrl["push_url"], requestId, videoUrl["id"])
pushThread.start()
task[videoUrl["id"]] = pushThread
enable_time = time.time()
for video in videoInfo:
videoStatus[video.get("id")] = video.get("status")
count = 0
while True:
# 整个推流任务超时时间
if time.time() - enable_time > 43200:
logger.error("任务执行超时, requestId:{}", requestId)
for t in list(task.keys()):
if task[t].is_alive():
task[t].status = False
task[t].pushStreamUtil.close_push_stream_sp()
task[t].join(120)
videoStatus[t] = PushStreamStatus.TIMEOUT.value[0]
videoInfo_timeout = [{"id": k, "status": v} for k, v in videoStatus.items()]
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1],
videoInfo_timeout))
break
# 接受停止指令
event_result = get_no_block_queue(self.event_queue)
if event_result is not None:
command = event_result.get("command")
videoIds = event_result.get("videoIds")
if "stop" == command:
# 如果videoIds是空停止所有任务
if videoIds is None or len(videoIds) == 0:
logger.info("停止所有执行的推流任务, requestId:{}", requestId)
for t in list(task.keys()):
if task[t].is_alive():
task[t].status = False
task[t].pushStreamUtil.close_push_stream_sp()
task[t].join(120)
videoStatus[t] = PushStreamStatus.SUCCESS.value[0]
videoInfo_sucess = [{"id": k, "status": v} for k, v in videoStatus.items()]
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.SUCCESS.value[0], "", "",
videoInfo_sucess))
break
else:
logger.info("停止指定的推流任务, requestId:{}", requestId)
alive_thread = 0
for t in list(task.keys()):
if task[t].is_alive():
if t in videoIds:
task[t].status = False
task[t].pushStreamUtil.close_push_stream_sp()
task[t].join(120)
videoStatus[t] = PushStreamStatus.SUCCESS.value[0]
else:
alive_thread += 1
if alive_thread == 0:
videoInfo_sucess = [{"id": k, "status": v} for k, v in videoStatus.items()]
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.SUCCESS.value[0], "",
"", videoInfo_sucess))
break
for t in list(task.keys()):
if task[t].status and not task[t].is_alive():
videoStatus[t] = PushStreamStatus.FAILED.value[0]
logger.error("检测到推流线程异常停止videoId:{}, requestId:{}", t, requestId)
if task[t].ex:
raise task[t].ex
raise Exception("检测到推流线程异常停止!")
if task[t].is_alive():
videoStatus[t] = task[t].excute_status
if count % 10 == 0:
videoInfo_hb = [{"id": k, "status": v} for k, v in videoStatus.items()]
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.RUNNING.value[0], "", "",
videoInfo_hb))
count = 0
count += 1
time.sleep(1)
except ServiceException as s:
ex = s.code, s.msg
logger.error("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, requestId)
except Exception:
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
logger.error("服务异常: {}, requestId: {},", format_exc(), requestId)
finally:
if ex:
errorCode, errorMsg = ex
for t in list(task.keys()):
if task[t].is_alive():
task[t].status = False
task[t].pushStreamUtil.close_push_stream_sp()
task[t].join(120)
videoStatus[t] = PushStreamStatus.FAILED.value[0]
videoInfo_ex = [{"id": k, "status": v} for k, v in videoStatus.items()]
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.FAILED.value[0], errorCode, errorMsg,
videoInfo_ex))
for t in list(task.keys()):
if task[t].is_alive():
task[t].status = False
task[t].pushStreamUtil.close_push_stream_sp()
task[t].join(120)
logger.info("推流任务完成, requestId: {}", requestId)

View File

@ -0,0 +1,55 @@
# -*- coding: utf-8 -*-
from threading import Thread
import time
from traceback import format_exc
from loguru import logger
from enums.StatusEnum import PushStreamStatus
from exception.CustomerException import ServiceException
from util.PushStreamUtils import PushStreamUtil
class PushSteamThread(Thread):
__slots__ = ("pushStreamUtil", "requestId", "videoId", "status", "ex")
def __init__(self, pullUrl, pushUrl, requestId, videoId):
super().__init__()
self.pushStreamUtil = PushStreamUtil(pullUrl, pushUrl, requestId)
self.requestId = requestId
self.videoId = videoId
self.status = True
self.excute_status = PushStreamStatus.WAITING.value[0]
self.ex = None
def run(self):
logger.info("开始启动推流线程, 视频id: {}, requestId:{}", self.videoId, self.requestId)
while True:
try:
self.pushStreamUtil.start_push_stream()
self.excute_status = PushStreamStatus.RUNNING.value[0]
out, err = self.pushStreamUtil.push_stream_sp.communicate()
# 异常断流
if self.status:
logger.warning("推流异常,请检测拉流地址和推流地址是否正常!")
if self.pushStreamUtil.push_stream_sp.returncode != 0:
logger.error("推流异常:{}, 视频id: {}, requestId:{}", err.decode(), self.videoId,
self.requestId)
self.excute_status = PushStreamStatus.RETRYING.value[0]
self.pushStreamUtil.close_push_stream_sp()
time.sleep(5)
# 手动断流
if not self.status:
self.pushStreamUtil.close_push_stream_sp()
break
except ServiceException as s:
logger.error("异常: {}, 视频id: {}, requestId:{}", s.msg, self.videoId, self.requestId)
self.pushStreamUtil.close_push_stream_sp()
self.ex = s
break
except Exception as e:
logger.error("异常:{}, 视频id: {}, requestId:{}", format_exc(), self.videoId, self.requestId)
self.pushStreamUtil.close_push_stream_sp()
self.ex = e
break
logger.info("结束推流线程, 视频id: {}, requestId:{}", self.videoId, self.requestId)

View File

@ -0,0 +1,185 @@
# -*- coding: utf-8 -*-
from queue import Queue
from threading import Thread
from time import time, sleep
from traceback import format_exc
from loguru import logger
from enums.ExceptionEnum import ExceptionType
from enums.RecordingStatusEnum import RecordingStatus
from exception.CustomerException import ServiceException
from util.Cv2Utils import check_video_stream, clear_pull_p, build_video_info2, pull_read_video_stream2
from util.QueUtil import put_queue, get_no_block_queue, clear_queue, put_queue_result
class PullStreamThread(Thread):
__slots__ = ('_command', '_pull_queue', '_hb_queue', '_fb_queue', '_msg', '_context')
def __init__(self, *args):
super().__init__()
self._msg, self._context, self._pull_queue, self._hb_queue, self._fb_queue, self._frame_num = args
self._command = Queue()
def sendEvent(self, result):
put_queue(self._command, result, timeout=10, is_ex=False)
class RecordingPullStreamThread(PullStreamThread):
def run(self):
msg, context, frame_num = self._msg, self._context, self._frame_num
request_id, pull_url = msg["request_id"], msg['pull_url']
service = context["service"]
pull_stream_timeout = int(service["recording_pull_stream_timeout"])
read_stream_timeout = int(service["cv2_read_stream_timeout"])
service_timeout = int(service["timeout"])
command_queue, pull_queue, fb_queue, hb_queue = self._command, self._pull_queue, self._fb_queue, self._hb_queue
width, height, width_height_3, all_frames, w, h = None, None, None, 0, None, None
read_start_time, pull_p, ex = None, None, None
frame_list, frame_index_list = [], []
stop_ex = True
pull_stream_start_time = time()
try:
logger.info("录屏拉流线程开始启动, requestId: {}", request_id)
cv2_init_num, init_pull_num, concurrent_frame = 0, 1, 1
start_time = time()
while True:
# 检查任务是否超时
if time() - start_time > service_timeout:
logger.error("录屏拉流超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
# 最终停止拉流
event = get_no_block_queue(command_queue)
if event is not None:
# 当接收到停止指令,说明不会再处理视频帧了, 直接退出
if 'stop' == event.get("command"):
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
logger.info("录屏拉流线程开始停止, requestId: {}", request_id)
break
# 主进程异常,停止子线程
if 'stop_ex' == event.get("command"):
logger.info("录屏异常拉开始停止拉流线程, requestId: {}", request_id)
stop_ex = False
break
# 如果是离线拉流
if pull_url.startswith('http'):
if check_video_stream(width, height):
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id)
# 当是离线地址重试3次还是拉取不到视频流关闭拉流管道返回失败信息
# 目前改为等待5分钟
# if cv2_init_num > 3:
if time() - start_time > 300:
logger.info("离线拉流重试失败, 重试次数: {}, requestId: {}", cv2_init_num, request_id)
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0],
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1])
cv2_init_num += 1
width, height, width_height_3, all_frames, w, h = build_video_info2(pull_url, request_id)
if width is not None:
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RUNNING.value[0]}, timeout=2)
else:
# if cv2_init_num < 2:
if time() - start_time < 300:
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RETRYING.value[0]}, timeout=2)
continue
# 当离线视频时, 队列满了, 等待1秒后再试
if pull_queue.full():
logger.info("pull拉流队列满了: {}, requestId: {}", pull_queue.qsize(), request_id)
sleep(1)
continue
# 如果是实时拉流
else:
if check_video_stream(width, height):
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id)
pull_stream_init_timeout = time() - pull_stream_start_time
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
if pull_stream_init_timeout > pull_stream_timeout:
logger.error("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, request_id)
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1])
cv2_init_num += 1
width, height, width_height_3, all_frames, w, h = build_video_info2(pull_url, request_id)
if width is not None:
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RUNNING.value[0]}, timeout=1)
else:
if cv2_init_num < 3:
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RETRYING.value[0]}, timeout=1)
sleep(1)
continue
pull_stream_start_time = time()
cv2_init_num = 1
frame, pull_p, width, height = pull_read_video_stream2(pull_p, pull_url, width, height,
width_height_3, w, h, request_id)
if frame is None:
if pull_url.startswith('http'):
clear_pull_p(pull_p, request_id)
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}", all_frames, concurrent_frame, request_id)
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
if concurrent_frame < all_frames - 100:
logger.info("离线拉流异常结束requestId: {}", request_id)
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1])
logger.info("离线拉流线程结束, requestId: {}", request_id)
break
else:
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, request_id)
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
if read_start_time is None:
read_start_time = time()
pull_stream_read_timeout = time() - read_start_time
if pull_stream_read_timeout > read_stream_timeout:
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout,
request_id)
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1])
init_pull_num += 1
continue
init_pull_num = 1
read_start_time = None
if pull_queue.full():
sleep(1)
logger.info("pull拉流队列满了:{}, requestId: {}", pull_queue.qsize(), request_id)
continue
frame_list.append(frame)
frame_index_list.append(concurrent_frame)
if len(frame_list) >= frame_num:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
concurrent_frame += 1
del frame
except ServiceException as s:
ex = s.code, s.msg
except Exception:
logger.exception("实时拉流异常: {}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
clear_pull_p(pull_p, request_id)
if stop_ex:
if ex:
error_code, error_msg = ex
result = put_queue_result(pull_queue, (1, error_code, error_msg), timeout=3)
else:
result = put_queue_result(pull_queue, (2,), timeout=3)
if result:
# 3分钟超时时间
cr_time = time()
while time() - cr_time < 180:
event = get_no_block_queue(command_queue)
if event is not None:
# 当接收到停止指令,说明不会再处理视频帧了, 直接退出
if 'stop' == event.get("command"):
logger.info("录屏拉流线程开始停止, requestId: {}", request_id)
break
sleep(1)
clear_queue(command_queue)
clear_queue(pull_queue)
clear_queue(hb_queue)
del frame_list, frame_index_list
logger.info("录屏拉流线程结束, requestId: {}", request_id)

View File

@ -0,0 +1,331 @@
# -*- coding: utf-8 -*-
import os
from multiprocessing import Process, Queue
from os import getpid
from time import time, sleep
from traceback import format_exc
import psutil
from loguru import logger
from util.LogUtils import init_log
from concurrency.FileUploadThread import ImageFileUpload
from entity.FeedBack import message_feedback
from enums.AnalysisStatusEnum import AnalysisStatus
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util.Cv2Utils import check_video_stream, build_video_info, pull_read_video_stream, clear_pull_p
from util.QueUtil import get_no_block_queue, put_queue, clear_queue, put_queue_result
class PullVideoStreamProcess(Process):
__slots__ = ("_command_queue", "_msg", "_context", "_fb_queue", "_pull_queue", "_image_queue", "_analyse_type",
"_frame_num")
def __init__(self, *args):
super().__init__()
# 自带参数
self._command_queue = Queue()
# 传参
self._msg, self._context, self._fb_queue, self._pull_queue, self._image_queue, self._analyse_type, \
self._frame_num = args
def sendCommand(self, result):
put_queue(self._command_queue, result, timeout=2, is_ex=True)
@staticmethod
def start_File_upload(fb_queue, context, msg, image_queue, analyse_type):
image_thread = ImageFileUpload(fb_queue, context, msg, image_queue, analyse_type)
image_thread.setDaemon(True)
image_thread.start()
return image_thread
@staticmethod
def check(start_time, service_timeout, request_id, image_thread):
if time() - start_time > service_timeout:
logger.error("拉流进程运行超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
# 检测图片上传线程是否正常运行
if image_thread and not image_thread.is_alive():
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, requestId:{}", request_id)
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!")
class OnlinePullVideoStreamProcess(PullVideoStreamProcess):
__slots__ = ()
def run(self):
# 避免循环调用性能影响, 优先赋值
context, msg, analyse_type, frame_num = self._context, self._msg, self._analyse_type, self._frame_num
base_dir, env, service = context['base_dir'], context['env'], context["service"]
request_id, pull_url = msg["request_id"], msg["pull_url"]
pull_stream_timeout, read_stream_timeout, service_timeout = int(service["cv2_pull_stream_timeout"]), \
int(service["cv2_read_stream_timeout"]), int(service["timeout"]) + 120
command_queue, pull_queue, image_queue, fb_queue = self._command_queue, self._pull_queue, self._image_queue, \
self._fb_queue
image_thread, ex = None, None
width, height, width_height_3, all_frames, w_2, h_2, pull_p = None, None, None, 0, None, None, None
frame_list, frame_index_list = [], []
ex_status = True
try:
# 初始化日志
init_log(base_dir, env)
logger.info("开启启动实时视频拉流进程, requestId:{},pid:{},ppid:{}", request_id,os.getpid(),os.getppid())
# 开启图片上传线程
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type)
cv2_init_num, init_pull_num, concurrent_frame = 0, 1, 1
start_time, pull_stream_start_time, read_start_time, full_timeout = time(), None, None, None
while True:
# 检测任务执行是否超时、图片上传线程是否正常
self.check(start_time, service_timeout, request_id, image_thread)
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
break
if 'stop_ex' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
ex_status = False
break
# 检测视频信息是否存在或拉流对象是否存在
if check_video_stream(width, height):
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id)
if pull_stream_start_time is None:
pull_stream_start_time = time()
pull_stream_init_timeout = time() - pull_stream_start_time
if pull_stream_init_timeout > pull_stream_timeout:
logger.info("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, request_id)
# 如果超时了, 将异常信息发送给主进程,如果队列满了,抛出异常
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1])
cv2_init_num += 1
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id)
if width is None:
sleep(1)
continue
pull_stream_start_time, cv2_init_num = None, 1
frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height, width_height_3,
w_2, h_2, request_id)
if pull_queue.full():
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id)
if full_timeout is None:
full_timeout = time()
if time() - full_timeout > 180:
logger.error("拉流队列阻塞超时, 请检查父进程是否正常requestId: {}", request_id)
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
if psutil.Process(getpid()).ppid() == 1:
clear_pull_p(pull_p, request_id)
ex_status = False
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
image_thread.join(120)
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", request_id)
put_queue(self._fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
self._analyse_type,
ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1]), timeout=2)
break
del frame
continue
full_timeout = None
if frame is None:
clear_pull_p(pull_p, request_id)
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, request_id)
if read_start_time is None:
read_start_time = time()
pull_stream_read_timeout = time() - read_start_time
if pull_stream_read_timeout > read_stream_timeout:
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout,
request_id)
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1])
init_pull_num += 1
sleep(1)
continue
init_pull_num, read_start_time = 1, None
frame_list.append(frame)
frame_index_list.append(concurrent_frame)
if len(frame_list) >= frame_num:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True)
frame_list, frame_index_list = [], []
concurrent_frame += 1
del frame
except ServiceException as s:
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), request_id)
ex = s.code, s.msg
except Exception:
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", format_exc(), pull_queue.qsize(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
clear_pull_p(pull_p, request_id)
del frame_list, frame_index_list
if ex_status:
if ex:
code, msg = ex
r = put_queue_result(pull_queue, (1, code, msg), timeout=10)
else:
r = put_queue_result(pull_queue, (2,), timeout=10)
if r:
c_time = time()
while time() - c_time < 60:
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
break
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
logger.info("实时拉流线程结束, 图片队列: {}, 拉流队列: {}, 图片进程的状态: {} requestId: {}",
image_queue.qsize(), pull_queue.qsize(), image_thread.is_alive(), request_id)
class OfflinePullVideoStreamProcess(PullVideoStreamProcess):
__slots__ = ()
def run(self):
msg, context, frame_num, analyse_type = self._msg, self._context, self._frame_num, self._analyse_type
request_id, base_dir, env, pull_url = msg["request_id"], context['base_dir'], context['env'], msg["pull_url"]
ex, service_timeout, full_timeout = None, int(context["service"]["timeout"]) + 120, None
command_queue, pull_queue, image_queue, fb_queue = self._command_queue, self._pull_queue, self._image_queue, \
self._fb_queue
image_thread, pull_p = None, None
width, height, width_height_3, all_frames, w_2, h_2 = None, None, None, 0, None, None
frame_list, frame_index_list = [], []
ex_status = True
try:
# 初始化日志
init_log(base_dir, env)
logger.info("开启离线视频拉流进程, requestId:{}", request_id)
# 开启图片上传线程
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type)
# 初始化拉流工具类
cv2_init_num, concurrent_frame = 0, 1
start_time = time()
while True:
# 检测任务执行是否超时、图片上传线程是否正常
self.check(start_time, service_timeout, request_id, image_thread)
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止离线拉流进程, requestId:{}", request_id)
break
if 'stop_ex' == command_msg.get("command"):
logger.info("开始停止离线拉流进程, requestId:{}", request_id)
ex_status = False
break
# 检测视频信息是否存在或拉流对象是否存在
if check_video_stream(width, height):
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id)
if cv2_init_num > 3:
logger.info("离线拉流重试失败, 重试次数: {}, requestId: {}", cv2_init_num, request_id)
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0],
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1])
cv2_init_num += 1
sleep(1)
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id)
continue
if pull_queue.full():
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id)
if full_timeout is None:
full_timeout = time()
if time() - full_timeout > 180:
logger.error("pull队列阻塞超时请检测父进程是否正常requestId: {}", request_id)
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
if psutil.Process(getpid()).ppid() == 1:
clear_pull_p(pull_p, request_id)
ex_status = False
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
put_queue(image_queue, (2, "stop"), timeout=1)
image_thread.join(120)
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", request_id)
put_queue(self._fb_queue, message_feedback(request_id,
AnalysisStatus.FAILED.value,
self._analyse_type,
ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1]), timeout=2)
break
continue
full_timeout = None
frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height,
width_height_3, w_2, h_2, request_id)
if frame is None:
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}", all_frames, concurrent_frame, request_id)
clear_pull_p(pull_p, request_id)
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
# 允许100帧的误差
if concurrent_frame < all_frames - 100:
logger.info("离线拉流异常结束requestId: {}", request_id)
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1])
logger.info("离线拉流线程结束, requestId: {}", request_id)
break
frame_list.append(frame)
frame_index_list.append(concurrent_frame)
if len(frame_list) >= frame_num:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True)
frame_list, frame_index_list = [], []
concurrent_frame += 1
del frame
except ServiceException as s:
logger.error("离线拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), request_id)
ex = s.code, s.msg
except Exception:
logger.error("离线拉流异常: {}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
clear_pull_p(pull_p, request_id)
del frame_list, frame_index_list
if ex_status:
if ex:
code, msg = ex
r = put_queue_result(pull_queue, (1, code, msg), timeout=10)
else:
r = put_queue_result(pull_queue, (2,), timeout=10)
if r:
c_time = time()
while time() - c_time < 180:
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
break
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
logger.info("离线拉流线程结束, 图片队列: {}, 拉流队列: {}, 图片进程的状态: {} requestId: {}",
image_queue.qsize(), pull_queue.qsize(), image_thread.is_alive(), request_id)

View File

@ -0,0 +1,349 @@
# -*- coding: utf-8 -*-
import os
from multiprocessing import Process, Queue
from os import getpid
from time import time, sleep
from traceback import format_exc
import psutil
from loguru import logger
from util.LogUtils import init_log
from concurrency.FileUploadThread import ImageFileUpload
from entity.FeedBack import message_feedback
from enums.AnalysisStatusEnum import AnalysisStatus
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util.Cv2Utils import check_video_stream, build_video_info, pull_read_video_stream, clear_pull_p
from util.QueUtil import get_no_block_queue, put_queue, clear_queue, put_queue_result
class PullVideoStreamProcess2(Process):
__slots__ = ("_command_queue", "_msg", "_context", "_fb_queue", "_pull_queue", "_image_queue", "_analyse_type",
"_frame_num")
def __init__(self, *args):
super().__init__()
# 自带参数
self._command_queue = Queue()
# 传参
self._msg, self._context, self._fb_queue, self._pull_queue, self._image_queue, self._analyse_type, \
self._frame_num = args
def sendCommand(self, result):
try:
self._command_queue.put(result, timeout=10)
except Exception:
logger.error("添加队列超时异常:{}, requestId:{}", format_exc(), self._msg.get("request_id"))
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
@staticmethod
def start_File_upload(*args):
fb_queue, context, msg, image_queue, analyse_type = args
image_thread = ImageFileUpload(fb_queue, context, msg, image_queue, analyse_type)
image_thread.setDaemon(True)
image_thread.start()
return image_thread
@staticmethod
def check(start_time, service_timeout, request_id, image_thread):
if time() - start_time > service_timeout:
logger.error("分析超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0],
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1])
# 检测图片上传线程是否正常运行
if image_thread is not None and not image_thread.is_alive():
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, requestId:{}", request_id)
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!")
class OnlinePullVideoStreamProcess2(PullVideoStreamProcess2):
__slots__ = ()
def run(self):
# 避免循环调用性能影响, 优先赋值
context, msg, analyse_type = self._context, self._msg, self._analyse_type
request_id, base_dir, env = msg["request_id"], context['base_dir'], context['env']
pull_url, frame_num = msg["pull_url"], self._frame_num
pull_stream_timeout = int(context["service"]["cv2_pull_stream_timeout"])
read_stream_timeout = int(context["service"]["cv2_read_stream_timeout"])
service_timeout = int(context["service"]["timeout"])
command_queue, pull_queue, image_queue = self._command_queue, self._pull_queue, self._image_queue
fb_queue = self._fb_queue
image_thread, pull_p = None, None
width, height, width_height_3, all_frames, w_2, h_2 = None, None, None, 0, None, None
frame_list, frame_index_list = [], []
ex = None
ex_status = True
full_timeout = None
try:
# 初始化日志
init_log(base_dir, env)
logger.info("开启实时视频拉流进程, requestId:{}", request_id)
# 开启图片上传线程
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type)
# 初始化拉流工具类
cv2_init_num, init_pull_num, concurrent_frame = 0, 1, 1
start_time, pull_start_time, read_start_time = time(), None, None
while True:
# 检测任务执行是否超时、图片上传线程是否正常
self.check(start_time, service_timeout, request_id, image_thread)
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
break
if 'stop_ex' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
ex_status = False
break
# 检测视频信息是否存在或拉流对象是否存在
if check_video_stream(width, height):
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id)
if pull_start_time is None:
pull_start_time = time()
pull_stream_init_timeout = time() - pull_start_time
if pull_stream_init_timeout > pull_stream_timeout:
logger.info("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, request_id)
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1])
cv2_init_num += 1
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id)
if width is None:
sleep(1)
continue
pull_start_time, cv2_init_num = None, 1
frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height, width_height_3,
w_2, h_2, request_id)
if frame is None:
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, request_id)
if read_start_time is None:
read_start_time = time()
pull_stream_read_timeout = time() - read_start_time
if pull_stream_read_timeout > read_stream_timeout:
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout,
request_id)
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1])
init_pull_num += 1
continue
init_pull_num, read_start_time = 1, None
if pull_queue.full():
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id)
if full_timeout is None:
full_timeout = time()
if time() - full_timeout > 180:
logger.error("拉流队列阻塞异常, requestId: {}", request_id)
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
if psutil.Process(getpid()).ppid() == 1:
clear_pull_p(pull_p, request_id)
ex_status = False
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
image_thread.join(120)
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", request_id)
put_queue(self._fb_queue, message_feedback(request_id,
AnalysisStatus.FAILED.value,
self._analyse_type,
ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1]))
break
del frame
continue
full_timeout = None
frame_list.append(frame)
frame_index_list.append(concurrent_frame)
if len(frame_list) >= frame_num:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True)
frame_list, frame_index_list = [], []
concurrent_frame += 1
del frame
except ServiceException as s:
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), request_id)
ex = s.code, s.msg
except Exception:
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", format_exc(), pull_queue.qsize(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
clear_pull_p(pull_p, request_id)
del frame_list, frame_index_list
if ex_status:
if ex:
code, msg = ex
r = put_queue_result(pull_queue, (1, code, msg), timeout=10)
else:
r = put_queue_result(pull_queue, (2,), timeout=10)
if r:
c_time = time()
while time() - c_time < 180:
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
break
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
logger.info("实时拉流线程结束, 图片队列: {}, 拉流队列: {}, 图片进程的状态: {} requestId: {}",
image_queue.qsize(), pull_queue.qsize(), image_thread.is_alive(), request_id)
class OfflinePullVideoStreamProcess2(PullVideoStreamProcess2):
__slots__ = ()
def run(self):
msg, context, frame_num, analyse_type = self._msg, self._context, self._frame_num, self._analyse_type
request_id, base_dir, env, pull_url = msg["request_id"], context['base_dir'], context['env'], msg["pull_url"]
ex, service_timeout = None, int(context["service"]["timeout"])
command_queue, pull_queue, image_queue, fb_queue = self._command_queue, self._pull_queue, self._image_queue, \
self._fb_queue
image_thread, pull_p = None, None
width, height, width_height_3, all_frames, w_2, h_2 = None, None, None, 0, None, None
frame_list, frame_index_list = [], []
ex_status = True
full_timeout = None
try:
# 初始化日志
init_log(base_dir, env)
logger.info("开启离线视频拉流进程, requestId:{}", request_id)
# 开启图片上传线程
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type)
# 初始化拉流工具类
cv2_init_num = 0
concurrent_frame = 1
start_time = time()
while True:
# 检测任务执行是否超时、图片上传线程是否正常
self.check(start_time, service_timeout, request_id, image_thread)
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止离线拉流进程, requestId:{}", request_id)
break
if 'stop_ex' == command_msg.get("command"):
logger.info("开始停止离线拉流进程, requestId:{}", request_id)
ex_status = False
break
# 检测视频信息是否存在或拉流对象是否存在
if check_video_stream(width, height):
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id)
if cv2_init_num > 3:
clear_pull_p(pull_p, request_id)
logger.info("离线拉流重试失败, 重试次数: {}, requestId: {}", cv2_init_num, request_id)
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0],
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1])
cv2_init_num += 1
sleep(1)
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id)
continue
if pull_queue.full():
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id)
if full_timeout is None:
full_timeout = time()
if time() - full_timeout > 300:
logger.error("拉流队列阻塞超时, 请检查父进程是否正常requestId: {}", request_id)
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
if psutil.Process(getpid()).ppid() == 1:
clear_pull_p(pull_p, request_id)
ex_status = False
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
image_thread.join(120)
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", request_id)
put_queue(self._fb_queue, message_feedback(request_id,
AnalysisStatus.FAILED.value,
self._analyse_type,
ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1]))
break
continue
full_timeout = None
frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height, width_height_3,
w_2, h_2, request_id)
if frame is None:
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}", all_frames, concurrent_frame, request_id)
clear_pull_p(pull_p, request_id)
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=2, is_ex=False)
frame_list, frame_index_list = [], []
# 允许100帧的误差
if concurrent_frame < all_frames - 100:
logger.info("离线拉流异常结束requestId: {}", request_id)
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1])
logger.info("离线拉流线程结束, requestId: {}", request_id)
break
frame_list.append(frame)
frame_index_list.append(concurrent_frame)
if len(frame_list) >= frame_num:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True)
frame_list, frame_index_list = [], []
concurrent_frame += 1
del frame
except ServiceException as s:
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), request_id)
ex = s.code, s.msg
except Exception:
logger.error("实时拉流异常: {}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
clear_pull_p(pull_p, request_id)
del frame_list, frame_index_list
if ex_status:
if ex:
code, msg = ex
r = put_queue_result(pull_queue, (1, code, msg), timeout=10)
else:
r = put_queue_result(pull_queue, (2,), timeout=10)
if r:
c_time = time()
while time() - c_time < 180:
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止离线拉流进程, requestId:{}", request_id)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
break
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
logger.info("离线拉流线程结束, 图片队列: {}, 拉流队列: {}, 图片进程的状态: {} requestId: {}",
image_queue.qsize(), pull_queue.qsize(), image_thread.is_alive(), request_id)

View File

@ -0,0 +1,181 @@
# -*- coding: utf-8 -*-
from concurrent.futures import ThreadPoolExecutor
from os.path import join
from threading import Thread
from traceback import format_exc
import cv2
import numpy as np
from loguru import logger
from util.Cv2Utils import write_or_video, write_ai_video, push_video_stream, close_all_p, video_conjuncing
from util.ImageUtils import url2Array, add_water_pic
from util.PlotsUtils import draw_painting_joint
from util.QueUtil import put_queue
class OnPushStreamThread(Thread):
__slots__ = ('_msg', '_push_queue', '_context', 'ex', '_logo', '_image_queue')
def __init__(self, *args):
super().__init__()
# 传参
self._msg, self._push_queue, self._image_queue, self._context = args
# 自带参数
self.ex = None
self._logo = None
if self._context["video"]["video_add_water"]:
self._logo = self._msg.get("logo_url")
if self._logo:
self._logo = url2Array(self._logo, enable_ex=False)
if not self._logo:
self._logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)
def run(self):
request_id, push_queue, image_queue = self._msg.get("request_id"), self._push_queue, self._image_queue
orFilePath, aiFilePath, logo = self._context.get("orFilePath"), self._context.get("aiFilePath"), self._logo
or_video_file, ai_video_file, push_p = None, None, None
push_url = self._msg.get("push_url")
try:
logger.info("开始启动推流线程requestId:{}", request_id)
with ThreadPoolExecutor(max_workers=2) as t:
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0]
while True:
push_parm = push_queue.get()
if push_parm is not None:
# [(1, 原视频帧, 分析视频帧)]
# # [视频帧、当前帧数、 总帧数、 [(问题数组、code、allowedList、label_arraylist、rainbows)]]
# res = (1, (pull_frame[1], pull_frame[2], pull_frame[3], []))
# [(2, 操作指令)]
if push_parm[0] == 1: # 视频帧操作
frame, current_frame, all_frames, ques_list = push_parm[1]
copy_frame = frame.copy()
det_xywh = {}
if len(ques_list) > 0:
for qs in ques_list:
det_xywh[qs[1]] = {}
detect_targets_code = int(qs[0][0])
score = qs[0][-1]
label_array = qs[3][detect_targets_code]
color = qs[4][detect_targets_code]
if not isinstance(qs[0][1], (list, tuple, np.ndarray)):
xc, yc, x2, y2 = int(qs[0][1]), int(qs[0][2]), int(qs[0][3]), int(qs[0][4])
box = [(xc, yc), (x2, yc), (x2, y2), (xc, y2)]
else:
box = qs[0][1]
draw_painting_joint(box, copy_frame, label_array, score, color, "leftTop")
cd = det_xywh[qs[1]].get(detect_targets_code)
if cd is None:
det_xywh[qs[1]][detect_targets_code] = [
[detect_targets_code, box, score, label_array, color]]
else:
det_xywh[qs[1]][detect_targets_code].append(
[detect_targets_code, box, score, label_array, color])
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
frame_merge = video_conjuncing(frame, copy_frame)
# 写原视频到本地
write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file,
or_write_status, request_id)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, ai_video_file,
ai_write_status, request_id)
if len(det_xywh) > 0:
put_queue(image_queue, (1, (det_xywh, frame, current_frame, all_frames)))
push_p = push_video_stream(frame_merge, push_p, push_url, p_push_status, request_id)
ai_video_file = write_ai_video_result.result()
or_video_file = write_or_video_result.result()
if push_parm[0] == 2:
if 'stop' == push_parm[1]:
logger.info("停止推流线程, requestId: {}", request_id)
close_all_p(push_p, or_video_file, ai_video_file, request_id)
or_video_file, ai_video_file, push_p = None, None, None
break
except Exception as e:
logger.error("推流线程异常:{}, requestId:{}", format_exc(), request_id)
self.ex = e
finally:
close_all_p(push_p, or_video_file, ai_video_file, request_id)
logger.info("推流线程停止完成requestId:{}", request_id)
class OffPushStreamThread(Thread):
__slots__ = ('_msg', '_push_queue', '_context', 'ex', '_logo', '_image_queue')
def __init__(self, *args):
super().__init__()
# 传参
self._msg, self._push_queue, self._image_queue, self._context = args
# 自带参数
self.ex = None
self._logo = None
if self._context["video"]["video_add_water"]:
self._logo = self._msg.get("logo_url")
if self._logo:
self._logo = url2Array(self._logo, enable_ex=False)
if not self._logo:
self._logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)
def run(self):
request_id, push_queue, image_queue = self._msg.get("request_id"), self._push_queue, self._image_queue
aiFilePath, logo = self._context.get("aiFilePath"), self._logo
ai_video_file, push_p = None, None
push_url = self._msg.get("push_url")
try:
logger.info("开始启动推流线程requestId:{}", request_id)
with ThreadPoolExecutor(max_workers=1) as t:
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0]
while True:
push_parm = push_queue.get()
if push_parm is not None:
# [(1, 原视频帧, 分析视频帧)]
# # [视频帧、当前帧数、 总帧数、 [(问题数组、code、allowedList、label_arraylist、rainbows)]]
# res = (1, (pull_frame[1], pull_frame[2], pull_frame[3], []))
# [(2, 操作指令)]
if push_parm[0] == 1: # 视频帧操作
frame, current_frame, all_frames, ques_list = push_parm[1]
copy_frame = frame.copy()
det_xywh = {}
if len(ques_list) > 0:
for qs in ques_list:
det_xywh[qs[1]] = {}
detect_targets_code = int(qs[0][0])
score = qs[0][-1]
label_array = qs[3][detect_targets_code]
color = qs[4][detect_targets_code]
if not isinstance(qs[0][1], (list, tuple, np.ndarray)):
xc, yc, x2, y2 = int(qs[0][1]), int(qs[0][2]), int(qs[0][3]), int(qs[0][4])
box = [(xc, yc), (x2, yc), (x2, y2), (xc, y2)]
else:
box = qs[0][1]
draw_painting_joint(box, copy_frame, label_array, score, color, "leftTop")
cd = det_xywh[qs[1]].get(detect_targets_code)
if cd is None:
det_xywh[qs[1]][detect_targets_code] = [
[detect_targets_code, box, score, label_array, color]]
else:
det_xywh[qs[1]][detect_targets_code].append(
[detect_targets_code, box, score, label_array, color])
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
frame_merge = video_conjuncing(frame, copy_frame)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, ai_video_file,
ai_write_status, request_id)
if len(det_xywh) > 0:
put_queue(image_queue, (1, (det_xywh, frame, current_frame, all_frames)))
push_p = push_video_stream(frame_merge, push_p, push_url, p_push_status, request_id)
ai_video_file = write_ai_video_result.result()
if push_parm[0] == 2:
if 'stop' == push_parm[1]:
logger.info("停止推流线程, requestId: {}", request_id)
close_all_p(push_p, None, ai_video_file, request_id)
ai_video_file, push_p = None, None
break
except Exception as e:
logger.error("推流线程异常:{}, requestId:{}", format_exc(), request_id)
self.ex = e
finally:
close_all_p(push_p, None, ai_video_file, request_id)
logger.info("推流线程停止完成requestId:{}", request_id)

View File

@ -0,0 +1,201 @@
# -*- coding: utf-8 -*-
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
from os.path import join
from threading import Thread
from traceback import format_exc
import cv2
import numpy as np
from loguru import logger
from util.Cv2Utils import write_or_video, write_ai_video, push_video_stream, close_all_p, video_conjuncing
from util.ImageUtils import url2Array, add_water_pic
from util.PlotsUtils import draw_painting_joint
from util.QueUtil import put_queue
class OnPushStreamThread2(Thread):
__slots__ = ('_msg', '_push_queue', '_context', 'ex', '_logo', '_image_queue')
def __init__(self, *args):
super().__init__()
# 传参
self._msg, self._push_queue, self._image_queue, self._context = args
# 自带参数
self.ex = None
self._logo = None
if self._context["video"]["video_add_water"]:
self._logo = self._msg.get("logo_url")
if self._logo:
self._logo = url2Array(self._logo, enable_ex=False)
if not self._logo:
self._logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)
def run(self):
request_id, push_queue, image_queue = self._msg.get("request_id"), self._push_queue, self._image_queue
orFilePath, aiFilePath, logo = self._context.get("orFilePath"), self._context.get("aiFilePath"), self._logo
or_video_file, ai_video_file, push_p = None, None, None
push_url = self._msg.get("push_url")
try:
logger.info("开始启动推流线程requestId:{}", request_id)
with ThreadPoolExecutor(max_workers=2) as t:
with ThreadPoolExecutor(max_workers=5) as tt:
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0]
while True:
push_r = push_queue.get()
if push_r is not None:
# [(1, 原视频帧, 分析视频帧)]
# [(code, retResults[2])]
# [(2, 操作指令)]
if push_r[0] == 1: # 视频帧操作
frame_list, frame_index_list, all_frames = push_r[1]
allowedList, rainbows, label_arrays, font_config = push_r[2]
for i, frame in enumerate(frame_list):
copy_frame = frame.copy()
det_xywh = {}
# 每帧可能存在多模型,多模型问题处理
thread_p = []
for det in push_r[3]:
code, retResults = det
det_xywh[code] = {}
# 如果识别到了检测目标
if len(retResults[i]) > 0:
for qs in retResults[i]:
detect_targets_code = int(qs[6])
if detect_targets_code not in allowedList:
logger.warning("当前检测目标不在检测目标中: {}, requestId: {}", detect_targets_code, request_id)
continue
score = qs[5]
label_array = label_arrays[detect_targets_code]
color = rainbows[detect_targets_code]
if not isinstance(qs[1], (list, tuple, np.ndarray)):
xc, yc, x2, y2 = int(qs[1]), int(qs[2]), int(qs[3]), int(qs[4])
box = [(xc, yc), (x2, yc), (x2, y2), (xc, y2)]
else:
box = qs[1]
# box, img, label_array, score=0.5, color=None, config=None
dp = tt.submit(draw_painting_joint, box, copy_frame, label_array, score,
color, font_config)
thread_p.append(dp)
cd = det_xywh[code].get(detect_targets_code)
if cd is None:
det_xywh[code][detect_targets_code] = [
[detect_targets_code, box, score, label_array, color]]
else:
det_xywh[code][detect_targets_code].append(
[detect_targets_code, box, score, label_array, color])
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
if len(thread_p) > 0:
completed_results = wait(thread_p, timeout=60, return_when=ALL_COMPLETED)
completed_futures = completed_results.done
for r in completed_futures:
if r.exception():
raise r.exception()
frame_merge = video_conjuncing(frame, copy_frame)
# 写原视频到本地
write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file,
or_write_status, request_id)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, ai_video_file,
ai_write_status, request_id)
if len(det_xywh) > 0:
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
font_config)))
push_p = push_video_stream(frame_merge, push_p, push_url, p_push_status, request_id)
ai_video_file = write_ai_video_result.result()
or_video_file = write_or_video_result.result()
if push_r[0] == 2:
if 'stop' == push_r[1]:
logger.info("停止推流线程, requestId: {}", request_id)
close_all_p(push_p, or_video_file, ai_video_file, request_id)
or_video_file, ai_video_file, push_p = None, None, None
break
except Exception as e:
logger.error("推流线程异常:{}, requestId:{}", format_exc(), request_id)
self.ex = e
finally:
close_all_p(push_p, or_video_file, ai_video_file, request_id)
logger.info("推流线程停止完成requestId:{}", request_id)
# class OffPushStreamThread(Thread):
# __slots__ = ('_msg', '_push_queue', '_context', 'ex', '_logo', '_image_queue')
#
# def __init__(self, *args):
# super().__init__()
# # 传参
# self._msg, self._push_queue, self._image_queue, self._context = args
# # 自带参数
# self.ex = None
# self._logo = None
# if self._context["video"]["video_add_water"]:
# self._logo = self._msg.get("logo_url")
# if self._logo:
# self._logo = url2Array(self._logo, enable_ex=False)
# if not self._logo:
# self._logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)
#
# def run(self):
# request_id, push_queue, image_queue = self._msg.get("request_id"), self._push_queue, self._image_queue
# aiFilePath, logo = self._context.get("aiFilePath"), self._logo
# ai_video_file, push_p = None, None
# push_url = self._msg.get("push_url")
# try:
# logger.info("开始启动推流线程requestId:{}", request_id)
# with ThreadPoolExecutor(max_workers=1) as t:
# p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0]
# while True:
# push_parm = push_queue.get()
# if push_parm is not None:
# # [(1, 原视频帧, 分析视频帧)]
# # # [视频帧、当前帧数、 总帧数、 [(问题数组、code、allowedList、label_arraylist、rainbows)]]
# # res = (1, (pull_frame[1], pull_frame[2], pull_frame[3], []))
# # [(2, 操作指令)]
# if push_parm[0] == 1: # 视频帧操作
# frame, current_frame, all_frames, ques_list = push_parm[1]
# copy_frame = frame.copy()
# det_xywh = {}
# if len(ques_list) > 0:
# for qs in ques_list:
# det_xywh[qs[1]] = {}
# detect_targets_code = int(qs[0][0])
# score = qs[0][-1]
# label_array = qs[3][detect_targets_code]
# color = qs[4][detect_targets_code]
# if not isinstance(qs[0][1], (list, tuple, np.ndarray)):
# xc, yc, x2, y2 = int(qs[0][1]), int(qs[0][2]), int(qs[0][3]), int(qs[0][4])
# box = [(xc, yc), (x2, yc), (x2, y2), (xc, y2)]
# else:
# box = qs[0][1]
# draw_painting_joint(box, copy_frame, label_array, score, color, "leftTop")
# cd = det_xywh[qs[1]].get(detect_targets_code)
# if cd is None:
# det_xywh[qs[1]][detect_targets_code] = [
# [detect_targets_code, box, score, label_array, color]]
# else:
# det_xywh[qs[1]][detect_targets_code].append(
# [detect_targets_code, box, score, label_array, color])
# if logo:
# frame = add_water_pic(frame, logo, request_id)
# copy_frame = add_water_pic(copy_frame, logo, request_id)
# frame_merge = video_conjuncing(frame, copy_frame)
# # 写识别视频到本地
# write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, ai_video_file,
# ai_write_status, request_id)
# if len(det_xywh) > 0:
# put_queue(image_queue, (1, (det_xywh, frame, current_frame, all_frames)))
# push_p = push_video_stream(frame_merge, push_p, push_url, p_push_status, request_id)
# ai_video_file = write_ai_video_result.result()
# if push_parm[0] == 2:
# if 'stop' == push_parm[1]:
# logger.info("停止推流线程, requestId: {}", request_id)
# close_all_p(push_p, None, ai_video_file, request_id)
# ai_video_file, push_p = None, None
# break
# except Exception as e:
# logger.error("推流线程异常:{}, requestId:{}", format_exc(), request_id)
# self.ex = e
# finally:
# close_all_p(push_p, None, ai_video_file, request_id)
# logger.info("推流线程停止完成requestId:{}", request_id)

View File

@ -0,0 +1,468 @@
# -*- coding: utf-8 -*-
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Process
from os import getpid
import os
from os.path import join
from time import time, sleep
from traceback import format_exc
import cv2
import numpy as np
import psutil
from loguru import logger
from enums.ExceptionEnum import ExceptionType
from enums.ModelTypeEnum import ModelType
from exception.CustomerException import ServiceException
from util import ImageUtils
from util.Cv2Utils import video_conjuncing, write_or_video, write_ai_video, push_video_stream, close_all_p
from util.ImageUtils import url2Array, add_water_pic
from util.LogUtils import init_log
from util.PlotsUtils import draw_painting_joint, filterBox, xywh2xyxy2, draw_name_joint
from util.QueUtil import get_no_block_queue, put_queue, clear_queue
class PushStreamProcess(Process):
__slots__ = ("_msg", "_push_queue", "_image_queue", '_push_ex_queue', '_hb_queue', "_context")
def __init__(self, *args):
super().__init__()
# 传参
self._msg, self._push_queue, self._image_queue, self._push_ex_queue, self._hb_queue, self._context = args
def build_logo_url(self):
logo = None
if self._context["video"]["video_add_water"]:
logo = self._msg.get("logo_url")
if logo:
logo = url2Array(logo, enable_ex=False)
if logo is None:
logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)
self._context["logo"] = logo
@staticmethod
def handle_image(det_xywh, det, frame_score, copy_frame, draw_config, code_list):
code, det_result = det
# 每个单独模型处理
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
draw_painting_joint(box, copy_frame, label_array, score, color, font_config)
if det_xywh.get(code) is None:
det_xywh[code], code_list[code] = {}, {}
cd = det_xywh[code].get(cls)
if cd is None:
code_list[code][cls] = 1
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
code_list[code][cls] += 1
det_xywh[code][cls].append([cls, box, score, label_array, color])
class OnPushStreamProcess(PushStreamProcess):
__slots__ = ()
def run(self):
self.build_logo_url()
msg, context = self._msg, self._context
base_dir, env, orFilePath, aiFilePath, logo, service_timeout, frame_score = context["base_dir"], \
context['env'], context["orFilePath"], context["aiFilePath"], context["logo"], \
int(context["service"]["timeout"]) + 120, context["service"]["filter"]["frame_score"]
request_id, push_url = msg["request_id"], msg["push_url"]
push_queue, image_queue, push_ex_queue, hb_queue = self._push_queue, self._image_queue, self._push_ex_queue, \
self._hb_queue
or_video_file, ai_video_file, push_p, ex = None, None, None, None
ex_status = True
# 图片相似度开关
picture_similarity = bool(context["service"]["filter"]["picture_similarity"])
qs_np_tmp = None
pix_dis = 60
try:
init_log(base_dir, env)
logger.info("开始实时启动推流进程requestId:{},pid:{}, ppid:{}", request_id,os.getpid(),os.getppid())
with ThreadPoolExecutor(max_workers=2) as t:
# 定义三种推流、写原视频流、写ai视频流策略
# 第一个参数时间, 第二个参数重试次数
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0]
start_time = time()
while True:
# 检测推流执行超时时间, 1.防止任务运行超时 2.主进程挂了,子进程运行超时
if time() - start_time > service_timeout:
logger.error("推流超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
# 系统由于各种问题可能会杀死内存使用多的进程, 自己杀掉自己
if psutil.Process(getpid()).ppid() == 1:
logger.info("推流进程检测到父进程异常停止, 自动停止推流进程, requestId: {}", request_id)
ex_status = False
for q in [push_queue, image_queue, push_ex_queue, hb_queue]:
clear_queue(q)
break
# 获取推流的视频帧
push_r = get_no_block_queue(push_queue)
if push_r is not None:
if push_r[0] == 1:
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1]
for i, frame in enumerate(frame_list):
pix_dis = int((frame.shape[0]//10)*1.2)
# 复制帧用来画图
copy_frame = frame.copy()
det_xywh, thread_p = {}, []
det_xywh2 = {}
# 所有问题的矩阵集合
qs_np = None
qs_reurn = []
for det in push_objs[i]:
code, det_result = det
# 每个单独模型处理
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
try: # 应对NaN情况
box, score, cls = xywh2xyxy2(qs)
except:
continue
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
if ModelType.CHANNEL2_MODEL.value[1] == str(code) and cls == 2:
rr = t.submit(draw_name_joint, box, copy_frame, draw_config[code]["label_dict"], score, color, font_config, qs[6])
else:
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
if not (ModelType.CHANNEL2_MODEL.value[1] == str(code) and cls == 2):
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
if qs_np is None:
qs_np = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
else:
result_li = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
qs_np = np.row_stack((qs_np, result_li))
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
if len(thread_p) > 0:
for r in thread_p:
r.result()
frame_merge = video_conjuncing(frame, copy_frame)
# 写原视频到本地
write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file,
or_write_status, request_id)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath,
ai_video_file, ai_write_status, request_id)
push_stream_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status, request_id)
# 如果有问题, 走下面的逻辑
if qs_np is not None:
if len(qs_np.shape) == 1:
qs_np = qs_np[np.newaxis,...]
qs_np_id = qs_np.copy()
b = np.ones(qs_np_id.shape[0])
qs_np_id = np.column_stack((qs_np_id,b))
if qs_np_tmp is None:
if picture_similarity:
qs_np_tmp = qs_np_id.copy()
b = np.zeros(qs_np.shape[0])
qs_reurn = np.column_stack((qs_np,b))
else:
qs_reurn = filterBox(qs_np, qs_np_tmp, pix_dis)
if picture_similarity:
qs_np_tmp = np.append(qs_np_tmp,qs_np_id,axis=0)
qs_np_tmp[:, 11] += 1
qs_np_tmp = np.delete(qs_np_tmp, np.where((qs_np_tmp[:, 11] >= 75))[0], axis=0)
has = False
new_lab = []
for j in qs_reurn:
if j[11] == 1:
has = True
new_lab.append(j[9])
if has:
for q in qs_reurn:
if q[11] >= 1:
cls = int(q[9])
if not (cls in new_lab):
continue # 为了防止其他类别被带出
code = str(int(q[10])).zfill(3)
if det_xywh2.get(code) is None:
det_xywh2[code] = {}
cd = det_xywh2[code].get(cls)
score = q[8]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
label_array, color = label_arrays[cls], rainbows[cls]
box = [(int(q[0]), int(q[1])), (int(q[2]), int(q[3])),
(int(q[4]), int(q[5])), (int(q[6]), int(q[7]))]
is_new = False
if q[11] == 1:
is_new = True
if cd is None:
det_xywh2[code][cls] = [[cls, box, score, label_array, color, is_new]]
else:
det_xywh2[code][cls].append([cls, box, score, label_array, color, is_new])
if len(det_xywh2) > 0:
put_queue(image_queue, (1, [det_xywh2, frame, frame_index_list[i], all_frames, draw_config["font_config"]]))
push_p = push_stream_result.result(timeout=60)
ai_video_file = write_ai_video_result.result(timeout=60)
or_video_file = write_or_video_result.result(timeout=60)
# 接收停止指令
if push_r[0] == 2:
if 'stop' == push_r[1]:
logger.info("停止推流进程, requestId: {}", request_id)
break
if 'stop_ex' == push_r[1]:
ex_status = False
logger.info("停止推流进程, requestId: {}", request_id)
break
del push_r
else:
sleep(1)
except ServiceException as s:
logger.error("推流进程异常:{}, requestId:{}", s.msg, request_id)
ex = s.code, s.msg
except Exception:
logger.error("推流进程异常:{}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
# 关闭推流管, 原视频写对象, 分析视频写对象
close_all_p(push_p, or_video_file, ai_video_file, request_id)
if ex:
code, msg = ex
put_queue(push_ex_queue, (1, code, msg), timeout=2)
else:
if ex_status:
# 关闭推流的时候, 等待1分钟图片队列处理完如果1分钟内没有处理完, 清空图片队列, 丢弃没有上传的图片
c_time = time()
while time() - c_time < 60:
if image_queue.qsize() == 0 or image_queue.empty():
break
sleep(2)
for q in [push_queue, image_queue, hb_queue]:
clear_queue(q)
logger.info("推流进程停止完成!图片队列大小: {}, requestId:{}", image_queue.qsize(), request_id)
class OffPushStreamProcess(PushStreamProcess):
__slots__ = ()
def run(self):
self.build_logo_url()
msg, context = self._msg, self._context
request_id = msg["request_id"]
base_dir, env = context["base_dir"], context['env']
push_queue, image_queue, push_ex_queue, hb_queue = self._push_queue, self._image_queue, self._push_ex_queue, \
self._hb_queue
aiFilePath, logo = context["aiFilePath"], context["logo"]
ai_video_file, push_p, push_url = None, None, msg["push_url"]
service_timeout = int(context["service"]["timeout"]) + 120
frame_score = context["service"]["filter"]["frame_score"]
ex = None
ex_status = True
# 图片相似度开关
picture_similarity = bool(context["service"]["filter"]["picture_similarity"])
qs_np_tmp = None
pix_dis = 60
try:
init_log(base_dir, env)
logger.info("开始启动离线推流进程requestId:{}", request_id)
with ThreadPoolExecutor(max_workers=2) as t:
# 定义三种推流、写原视频流、写ai视频流策略
# 第一个参数时间, 第二个参数重试次数
p_push_status, ai_write_status = [0, 0], [0, 0]
start_time = time()
while True:
# 检测推流执行超时时间
if time() - start_time > service_timeout:
logger.error("离线推流超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
# 系统由于各种问题可能会杀死内存使用多的进程, 自己杀掉自己
if psutil.Process(getpid()).ppid() == 1:
logger.info("离线推流进程检测到父进程异常停止, 自动停止推流进程, requestId: {}", request_id)
ex_status = False
for q in [push_queue, image_queue, push_ex_queue, hb_queue]:
clear_queue(q)
break
# 获取推流的视频帧
push_r = get_no_block_queue(push_queue)
if push_r is not None:
# [(1, ...] 视频帧操作
# [(2, 操作指令)] 指令操作
if push_r[0] == 1:
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1]
# 处理每一帧图片
for i, frame in enumerate(frame_list):
pix_dis = int((frame.shape[0]//10)*1.2)
if frame_index_list[i] % 300 == 0 and frame_index_list[i] <= all_frames:
task_process = "%.2f" % (float(frame_index_list[i]) / float(all_frames))
put_queue(hb_queue, {"hb_value": task_process}, timeout=2)
# 复制帧用来画图
copy_frame = frame.copy()
# 所有问题记录字典
det_xywh, thread_p = {}, []
det_xywh2 = {}
# 所有问题的矩阵集合
qs_np = None
qs_reurn = []
for det in push_objs[i]:
code, det_result = det
# 每个单独模型处理
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
if ModelType.CHANNEL2_MODEL.value[1] == str(code) and cls == 2:
rr = t.submit(draw_name_joint, box, copy_frame, draw_config[code]["label_dict"], score, color, font_config, qs[6])
else:
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
if not (ModelType.CHANNEL2_MODEL.value[1] == str(code) and cls == 2):
if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
det_xywh[code][cls].append([cls, box, score, label_array, color])
if qs_np is None:
qs_np = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
else:
result_li = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32)
qs_np = np.row_stack((qs_np, result_li))
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
if len(thread_p) > 0:
for r in thread_p:
r.result()
frame_merge = video_conjuncing(frame, copy_frame)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath,
ai_video_file,
ai_write_status, request_id)
push_stream_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status, request_id)
if qs_np is not None:
if len(qs_np.shape) == 1:
qs_np = qs_np[np.newaxis,...]
qs_np_id = qs_np.copy()
b = np.ones(qs_np_id.shape[0])
qs_np_id = np.column_stack((qs_np_id,b))
if qs_np_tmp is None:
if picture_similarity:
qs_np_tmp = qs_np_id.copy()
b = np.zeros(qs_np.shape[0])
qs_reurn = np.column_stack((qs_np,b))
else:
qs_reurn = filterBox(qs_np, qs_np_tmp, pix_dis)
if picture_similarity:
qs_np_tmp = np.append(qs_np_tmp,qs_np_id,axis=0)
qs_np_tmp[:, 11] += 1
qs_np_tmp = np.delete(qs_np_tmp, np.where((qs_np_tmp[:, 11] >= 75))[0], axis=0)
has = False
new_lab = []
for j in qs_reurn:
if j[11] == 1:
has = True
new_lab.append(j[9])
if has:
for q in qs_reurn:
if q[11] >= 1:
cls = int(q[9])
if not (cls in new_lab):
continue # 为了防止其他类别被带出
code = str(int(q[10])).zfill(3)
if det_xywh2.get(code) is None:
det_xywh2[code] = {}
cd = det_xywh2[code].get(cls)
score = q[8]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
label_array, color = label_arrays[cls], rainbows[cls]
box = [(int(q[0]), int(q[1])), (int(q[2]), int(q[3])),
(int(q[4]), int(q[5])), (int(q[6]), int(q[7]))]
is_new = False
if q[11] == 1:
is_new = True
if cd is None:
det_xywh2[code][cls] = [[cls, box, score, label_array, color, is_new]]
else:
det_xywh2[code][cls].append([cls, box, score, label_array, color, is_new])
if len(det_xywh2) > 0:
put_queue(image_queue, (1, [det_xywh2, frame, frame_index_list[i], all_frames, draw_config["font_config"]]))
push_p = push_stream_result.result(timeout=60)
ai_video_file = write_ai_video_result.result(timeout=60)
# 接收停止指令
if push_r[0] == 2:
if 'stop' == push_r[1]:
logger.info("停止推流进程, requestId: {}", request_id)
break
if 'stop_ex' == push_r[1]:
logger.info("停止推流进程, requestId: {}", request_id)
ex_status = False
break
del push_r
else:
sleep(1)
except ServiceException as s:
logger.error("推流进程异常:{}, requestId:{}", s.msg, request_id)
ex = s.code, s.msg
except Exception:
logger.error("推流进程异常:{}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
# 关闭推流管, 分析视频写对象
close_all_p(push_p, None, ai_video_file, request_id)
if ex:
code, msg = ex
put_queue(push_ex_queue, (1, code, msg), timeout=2)
else:
if ex_status:
# 关闭推流的时候, 等待1分钟图片队列处理完如果1分钟内没有处理完, 清空图片队列, 丢弃没有上传的图片
c_time = time()
while time() - c_time < 60:
if image_queue.qsize() == 0 or image_queue.empty():
break
sleep(2)
for q in [push_queue, image_queue, hb_queue]:
clear_queue(q)
logger.info("推流进程停止完成requestId:{}", request_id)

View File

@ -0,0 +1,356 @@
# -*- coding: utf-8 -*-
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Process
from os import getpid
from os.path import join
from time import time, sleep
from traceback import format_exc
import cv2
import numpy as np
import psutil
from loguru import logger
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util import ImageUtils
from util.Cv2Utils import video_conjuncing, write_or_video, write_ai_video, push_video_stream, close_all_p
from util.ImageUtils import url2Array, add_water_pic
from util.LogUtils import init_log
from util.PlotsUtils import draw_painting_joint, filterBox, xywh2xyxy2
from util.QueUtil import get_no_block_queue, put_queue, clear_queue
class PushStreamProcess2(Process):
__slots__ = ("_msg", "_push_queue", "_image_queue", '_push_ex_queue', '_hb_queue', "_context")
def __init__(self, *args):
super().__init__()
# 传参
self._msg, self._push_queue, self._image_queue, self._push_ex_queue, self._hb_queue, self._context = args
def build_logo_url(self):
logo = None
if self._context["video"]["video_add_water"]:
logo = self._msg.get("logo_url")
if logo:
logo = url2Array(logo, enable_ex=False)
if logo is None:
logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)
self._context["logo"] = logo
class OnPushStreamProcess2(PushStreamProcess2):
__slots__ = ()
def run(self):
msg, context = self._msg, self._context
self.build_logo_url()
request_id = msg["request_id"]
base_dir, env = context["base_dir"], context['env']
push_queue, image_queue, push_ex_queue, hb_queue = self._push_queue, self._image_queue, self._push_ex_queue, \
self._hb_queue
orFilePath, aiFilePath, logo = context["orFilePath"], context["aiFilePath"], context["logo"]
or_video_file, ai_video_file, push_p, push_url = None, None, None, msg["push_url"]
service_timeout = int(context["service"]["timeout"]) + 120
frame_score = context["service"]["filter"]["frame_score"]
ex = None
ex_status = True
try:
init_log(base_dir, env)
logger.info("开始启动推流进程requestId:{}", request_id)
with ThreadPoolExecutor(max_workers=3) as t:
# 定义三种推流、写原视频流、写ai视频流策略
# 第一个参数时间, 第二个参数重试次数
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0]
start_time = time()
minID = 0
maxID = 0
while True:
# 检测推流执行超时时间
if time() - start_time > service_timeout:
logger.error("推流超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[1])
# 系统由于各种问题可能会杀死内存使用多的进程, 自己杀掉自己
if psutil.Process(getpid()).ppid() == 1:
ex_status = False
logger.info("推流进程检测到父进程异常停止, 自动停止推流进程, requestId: {}", request_id)
for q in [push_queue, image_queue, push_ex_queue, hb_queue]:
clear_queue(q)
break
# 获取推流的视频帧
push_r = get_no_block_queue(push_queue)
if push_r is not None:
# [(1, ...] 视频帧操作
# [(2, 操作指令)] 指令操作
if push_r[0] == 1:
# 如果是多模型push_objs数组可能包含[模型1识别数组, 模型2识别数组, 模型3识别数组]
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1]
# 处理每一帧图片
for i, frame in enumerate(frame_list):
# 复制帧用来画图
copy_frame = frame.copy()
# 所有问题记录字典
det_xywh, thread_p = {}, []
det_tmp = {}
det_xywh2 = {}
# [模型1识别数组, 模型2识别数组, 模型3识别数组]
for s_det_list in push_objs:
code, det_result = s_det_list[0], s_det_list[1][i]
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
is_new = False
if len(qs) == 8:
trackId = qs[7]
elif len(qs) == 5:
trackId = qs[4]
if trackId > minID:
is_new = True
if det_tmp.get(code) is None:
det_tmp[code] = [cls]
else:
if not (cls in det_tmp[code]):
det_tmp[code].append(cls)
qs_tmp = [cls, box, score, label_array, color, is_new]
if trackId > maxID:
maxID = trackId
if cd is None:
det_xywh[code][cls] = [qs_tmp]
else:
det_xywh[code][cls].append(qs_tmp)
minID = maxID
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
if len(thread_p) > 0:
for r in thread_p:
r.result()
frame_merge = video_conjuncing(frame, copy_frame)
# 写原视频到本地
write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file,
or_write_status, request_id)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath,
ai_video_file, ai_write_status, request_id)
push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status,
request_id)
if det_xywh:
for index, (key, value) in enumerate(det_xywh.items()):
for k in value.keys():
if (key in det_tmp.keys()) and (k in det_tmp[key]):
det_xywh2[key] = {}
det_xywh2[key][k] = det_xywh[key][k]
if len(det_xywh2) > 0:
put_queue(image_queue, (1, [det_xywh2, frame, frame_index_list[i], all_frames, draw_config["font_config"]]))
push_p = push_p_result.result(timeout=60)
ai_video_file = write_ai_video_result.result(timeout=60)
or_video_file = write_or_video_result.result(timeout=60)
# 接收停止指令
if push_r[0] == 2:
if 'stop' == push_r[1]:
logger.info("停止推流线程, requestId: {}", request_id)
break
if 'stop_ex' == push_r[1]:
logger.info("停止推流线程, requestId: {}", request_id)
ex_status = False
break
del push_r
else:
sleep(1)
except ServiceException as s:
logger.error("推流进程异常:{}, requestId:{}", s.msg, request_id)
ex = s.code, s.msg
except Exception:
logger.error("推流进程异常:{}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
# 关闭推流管, 原视频写对象, 分析视频写对象
close_all_p(push_p, or_video_file, ai_video_file, request_id)
if ex:
code, msg = ex
put_queue(push_ex_queue, (1, code, msg), timeout=2)
else:
if ex_status:
# 关闭推流的时候, 等待1分钟图片队列处理完如果1分钟内没有处理完, 清空图片队列, 丢弃没有上传的图片
c_time = time()
while time() - c_time < 60:
if image_queue.qsize() == 0 or image_queue.empty():
break
sleep(2)
for q in [push_queue, image_queue, hb_queue]:
clear_queue(q)
logger.info("推流进程停止完成requestId:{}", request_id)
class OffPushStreamProcess2(PushStreamProcess2):
__slots__ = ()
def run(self):
self.build_logo_url()
msg, context = self._msg, self._context
request_id = msg["request_id"]
base_dir, env = context["base_dir"], context['env']
push_queue, image_queue, push_ex_queue, hb_queue = self._push_queue, self._image_queue, self._push_ex_queue, \
self._hb_queue
aiFilePath, logo = context["aiFilePath"], context["logo"]
ai_video_file, push_p, push_url = None, None, msg["push_url"]
service_timeout = int(context["service"]["timeout"]) + 120
frame_score = context["service"]["filter"]["frame_score"]
ex = None
ex_status = True
try:
init_log(base_dir, env)
logger.info("开始启动离线推流进程requestId:{}", request_id)
with ThreadPoolExecutor(max_workers=2) as t:
# 定义三种推流、写原视频流、写ai视频流策略
# 第一个参数时间, 第二个参数重试次数
p_push_status, ai_write_status = [0, 0], [0, 0]
start_time = time()
minID = 0
maxID = 0
while True:
# 检测推流执行超时时间
if time() - start_time > service_timeout:
logger.error("离线推流超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[1])
# 系统由于各种问题可能会杀死内存使用多的进程, 自己杀掉自己
if psutil.Process(getpid()).ppid() == 1:
ex_status = False
logger.info("离线推流进程检测到父进程异常停止, 自动停止推流进程, requestId: {}", request_id)
for q in [push_queue, image_queue, push_ex_queue, hb_queue]:
clear_queue(q)
break
# 获取推流的视频帧
push_r = get_no_block_queue(push_queue)
if push_r is not None:
# [(1, ...] 视频帧操作
# [(2, 操作指令)] 指令操作
if push_r[0] == 1:
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1]
# 处理每一帧图片
for i, frame in enumerate(frame_list):
if frame_index_list[i] % 300 == 0 and frame_index_list[i] <= all_frames:
task_process = "%.2f" % (float(frame_index_list[i]) / float(all_frames))
put_queue(hb_queue, {"hb_value": task_process}, timeout=2)
# 复制帧用来画图
copy_frame = frame.copy()
# 所有问题记录字典
det_xywh, thread_p = {}, []
det_xywh2 = {}
det_tmp = {}
for s_det_list in push_objs:
code, det_result = s_det_list[0], s_det_list[1][i]
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config)
thread_p.append(rr)
if det_xywh.get(code) is None:
det_xywh[code] = {}
cd = det_xywh[code].get(cls)
is_new = False
if len(qs) == 8:
trackId = qs[7]
elif len(qs) == 5:
trackId = qs[4]
if trackId > minID:
is_new = True
if det_tmp.get(code) is None:
det_tmp[code] = [cls]
else:
if not (cls in det_tmp[code]):
det_tmp[code].append(cls)
qs_tmp = [cls, box, score, label_array, color, is_new]
if trackId > maxID:
maxID = trackId
if cd is None:
det_xywh[code][cls] = [qs_tmp]
else:
det_xywh[code][cls].append(qs_tmp)
minID = maxID
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
if len(thread_p) > 0:
for r in thread_p:
r.result()
frame_merge = video_conjuncing(frame, copy_frame)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath,
ai_video_file,
ai_write_status, request_id)
push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status,
request_id)
if det_xywh:
for index, (key, value) in enumerate(det_xywh.items()):
for k in value.keys():
if (key in det_tmp.keys()) and (k in det_tmp[key]):
det_xywh2[key] = {}
det_xywh2[key][k] = det_xywh[key][k]
if len(det_xywh2) > 0:
put_queue(image_queue, (1, [det_xywh2, frame, frame_index_list[i], all_frames, draw_config["font_config"]]))
push_p = push_p_result.result(timeout=60)
ai_video_file = write_ai_video_result.result(timeout=60)
# 接收停止指令
if push_r[0] == 2:
if 'stop' == push_r[1]:
logger.info("停止推流线程, requestId: {}", request_id)
break
if 'stop_ex' == push_r[1]:
logger.info("停止推流线程, requestId: {}", request_id)
ex_status = False
break
del push_r
else:
sleep(1)
except ServiceException as s:
logger.error("推流进程异常:{}, requestId:{}", s.msg, request_id)
ex = s.code, s.msg
except Exception:
logger.error("推流进程异常:{}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
# 关闭推流管, 分析视频写对象
close_all_p(push_p, None, ai_video_file, request_id)
if ex:
code, msg = ex
put_queue(push_ex_queue, (1, code, msg), timeout=2)
else:
if ex_status:
# 关闭推流的时候, 等待1分钟图片队列处理完如果1分钟内没有处理完, 清空图片队列, 丢弃没有上传的图片
c_time = time()
while time() - c_time < 60:
if image_queue.qsize() == 0 or image_queue.empty():
break
sleep(2)
for q in [push_queue, image_queue, hb_queue]:
clear_queue(q)
logger.info("推流进程停止完成requestId:{}", request_id)

View File

@ -0,0 +1,50 @@
# -*- coding: utf-8 -*-
from threading import Thread
import time
from traceback import format_exc
from loguru import logger
from entity.FeedBack import recording_feedback
from enums.RecordingStatusEnum import RecordingStatus
from util.QueUtil import get_no_block_queue, put_queue, clear_queue
class RecordingHeartbeat(Thread):
__slots__ = ('_fb_queue', '_hb_queue', '_request_id')
def __init__(self, fb_queue, hb_queue, request_id):
super().__init__()
self._fb_queue = fb_queue
self._hb_queue = hb_queue
self._request_id = request_id
def run(self):
request_id = self._request_id
hb_queue, fb_queue = self._hb_queue, self._fb_queue
logger.info("开始启动录屏心跳线程requestId:{}", request_id)
hb_init_num, progress = 0, '0.0000'
status = RecordingStatus.RECORDING_WAITING.value[0]
try:
while True:
time.sleep(3)
hb_msg = get_no_block_queue(hb_queue)
if hb_msg is not None and len(hb_msg) > 0:
command_que = hb_msg.get("command")
progress_que = hb_msg.get("progress")
status_que = hb_msg.get("status")
if progress_que is not None:
progress = progress_que
if status_que is not None:
status = status_que
if 'stop' == command_que:
logger.info("开始终止心跳线程, requestId:{}", request_id)
break
if hb_init_num % 30 == 0:
put_queue(fb_queue, recording_feedback(request_id, status, progress=progress), timeout=5, is_ex=True)
hb_init_num += 3
except Exception:
logger.error("心跳线程异常:{}, requestId:{}", format_exc(), request_id)
finally:
clear_queue(hb_queue)
logger.info("心跳线程停止完成requestId:{}", request_id)

0
concurrency/__init__.py Normal file
View File

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,10 @@
access_key: "LTAI5tSJ62TLMUb4SZuf285A"
access_secret: "MWYynm30filZ7x0HqSHlU3pdLVNeI7"
oss:
endpoint: "http://oss-cn-shanghai.aliyuncs.com"
bucket: "ta-tech-image"
connect_timeout: 30
vod:
host_address: "https://vod.play.t-aaron.com/"
ecsRegionId: "cn-shanghai"
cateId: 1000468341

View File

@ -0,0 +1,10 @@
access_key: "LTAI5tSJ62TLMUb4SZuf285A"
access_secret: "MWYynm30filZ7x0HqSHlU3pdLVNeI7"
oss:
endpoint: "http://oss-cn-shanghai.aliyuncs.com"
bucket: "ta-tech-image"
connect_timeout: 30
vod:
host_address: "https://vod.play.t-aaron.com/"
ecsRegionId: "cn-shanghai"
cateId: 1000468340

View File

@ -0,0 +1,11 @@
access_key: "LTAI5tFmaJHU7ywvBH2j12Gp"
access_secret: "mX6rS1xYTgZgFAgpOrQHyc6jLXv0AV"
oss:
endpoint: "http://oss-cn-shanghai.aliyuncs.com"
bucket: "ta-tech-image"
connect_timeout: 30
vod:
host_address: "https://vod.play.t-aaron.com/"
ecsRegionId: "cn-shanghai"
cateId: 1000468338

View File

@ -0,0 +1,11 @@
access_key: "LTAI5tFmaJHU7ywvBH2j12Gp"
access_secret: "mX6rS1xYTgZgFAgpOrQHyc6jLXv0AV"
oss:
endpoint: "http://oss-cn-shanghai.aliyuncs.com"
bucket: "ta-tech-image"
connect_timeout: 30
vod:
host_address: "https://vod.play.t-aaron.com/"
ecsRegionId: "cn-shanghai"
cateId: 1000468338

View File

@ -0,0 +1,12 @@
orc:
APP_ID: 28173504
API_KEY: "kqrFE7VuygIaFer7z6cRxzoi"
SECRET_KEY: "yp7xBokyl4TItyGhay7skAN1cMwfvEXf"
vehicle:
APP_ID: 31096670
API_KEY: "Dam3O4tgPRN3qh4OYE82dbg7"
SECRET_KEY: "1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa"
person:
APP_ID: 31096755
API_KEY: "CiWrt4iyxOly36n3kR7utiAG"
SECRET_KEY: "K7y6V3XTGdyXvgtCNCwTGUEooxxDuX9v"

View File

@ -0,0 +1,12 @@
orc:
APP_ID: 28173504
API_KEY: "kqrFE7VuygIaFer7z6cRxzoi"
SECRET_KEY: "yp7xBokyl4TItyGhay7skAN1cMwfvEXf"
vehicle:
APP_ID: 31096670
API_KEY: "Dam3O4tgPRN3qh4OYE82dbg7"
SECRET_KEY: "1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa"
person:
APP_ID: 31096755
API_KEY: "CiWrt4iyxOly36n3kR7utiAG"
SECRET_KEY: "K7y6V3XTGdyXvgtCNCwTGUEooxxDuX9v"

View File

@ -0,0 +1,12 @@
orc:
APP_ID: 28173504
API_KEY: "kqrFE7VuygIaFer7z6cRxzoi"
SECRET_KEY: "yp7xBokyl4TItyGhay7skAN1cMwfvEXf"
vehicle:
APP_ID: 31096670
API_KEY: "Dam3O4tgPRN3qh4OYE82dbg7"
SECRET_KEY: "1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa"
person:
APP_ID: 31096755
API_KEY: "CiWrt4iyxOly36n3kR7utiAG"
SECRET_KEY: "K7y6V3XTGdyXvgtCNCwTGUEooxxDuX9v"

View File

@ -0,0 +1,25 @@
bootstrap_servers: ["192.168.11.13:9092"]
topic:
dsp-alg-online-tasks-topic: "dsp-alg-online-tasks"
dsp-alg-offline-tasks-topic: "dsp-alg-offline-tasks"
dsp-alg-image-tasks-topic: "dsp-alg-image-tasks"
dsp-alg-results-topic: "dsp-alg-task-results"
dsp-recording-task-topic: "dsp-recording-task"
dsp-recording-result-topic: "dsp-recording-result"
dsp-push-stream-task-topic: "dsp-push-stream-task"
dsp-push-stream-result-topic: "dsp-push-stream-result"
producer:
acks: -1
retries: 3
linger_ms: 50
retry_backoff_ms: 1000
max_in_flight_requests_per_connection: 5
consumer:
client_id: "dsp_ai_server"
group_id: "dsp-ai-dev"
auto_offset_reset: "latest"
enable_auto_commit: false
max_poll_records: 1

View File

@ -0,0 +1,22 @@
bootstrap_servers: ["101.132.127.1:19094"]
topic:
dsp-alg-online-tasks-topic: "dsp-alg-online-tasks"
dsp-alg-offline-tasks-topic: "dsp-alg-offline-tasks"
dsp-alg-image-tasks-topic: "dsp-alg-image-tasks"
dsp-alg-results-topic: "dsp-alg-task-results"
dsp-recording-task-topic: "dsp-recording-task"
dsp-recording-result-topic: "dsp-recording-result"
dsp-push-stream-task-topic: "dsp-push-stream-task"
dsp-push-stream-result-topic: "dsp-push-stream-result"
producer:
acks: -1
retries: 3
linger_ms: 50
retry_backoff_ms: 1000
max_in_flight_requests_per_connection: 5
consumer:
client_id: "dsp_ai_server"
group_id: "dsp-ai-prod"
auto_offset_reset: "latest"
enable_auto_commit: false
max_poll_records: 1

View File

@ -0,0 +1,24 @@
bootstrap_servers: ["106.14.96.218:19092"]
topic:
dsp-alg-online-tasks-topic: "dsp-alg-online-tasks"
dsp-alg-offline-tasks-topic: "dsp-alg-offline-tasks"
dsp-alg-image-tasks-topic: "dsp-alg-image-tasks"
dsp-alg-results-topic: "dsp-alg-task-results"
dsp-recording-task-topic: "dsp-recording-task"
dsp-recording-result-topic: "dsp-recording-result"
dsp-push-stream-task-topic: "dsp-push-stream-task"
dsp-push-stream-result-topic: "dsp-push-stream-result"
producer:
acks: -1
retries: 3
linger_ms: 50
retry_backoff_ms: 1000
max_in_flight_requests_per_connection: 5
consumer:
client_id: "dsp_ai_server"
group_id: "dsp-ai-test"
auto_offset_reset: "latest"
enable_auto_commit: false
max_poll_records: 1

View File

@ -0,0 +1,10 @@
enable_file_log: true
enable_stderr: true
base_path: "../dsp/logs"
log_name: "dsp.log"
log_fmt: "{time:YYYY-MM-DD HH:mm:ss.SSS} [{level}][{process.name}-{process.id}-{thread.name}-{thread.id}][{line}] {module}-{function} - {message}"
level: "INFO"
rotation: "00:00"
retention: "1 days"
encoding: "utf8"

View File

@ -0,0 +1,10 @@
enable_file_log: true
enable_stderr: false
base_path: "../dsp/logs"
log_name: "dsp.log"
log_fmt: "{time:YYYY-MM-DD HH:mm:ss.SSS} [{level}][{process.name}-{process.id}-{thread.name}-{thread.id}][{line}] {module}-{function} - {message}"
level: "INFO"
rotation: "00:00"
retention: "7 days"
encoding: "utf8"

View File

@ -0,0 +1,10 @@
enable_file_log: true
enable_stderr: false
base_path: "../dsp/logs"
log_name: "dsp.log"
log_fmt: "{time:YYYY-MM-DD HH:mm:ss.SSS} [{level}][{process.name}-{process.id}-{thread.name}-{thread.id}][{line}] {module}-{function} - {message}"
level: "INFO"
rotation: "00:00"
retention: "3 days"
encoding: "utf8"

View File

@ -0,0 +1,7 @@
endpoint: 'minio.t-aaron.com:9000'
domain: 'https://minio.t-aaron.com'
access_key: 'IKf3A0ZSXsR1m0oalMjV'
secret_key: 'yoC6qRo2hlyZu8Pdbt6eh9TVaTV4gD7KRudromrk'
secure: false
image_bucket: 'image'
video_bucket: 'video'

View File

@ -0,0 +1,8 @@
endpoint: 'minio-jndsj.t-aaron.com:2443'
domain: 'https://minio-jndsj.t-aaron.com:2443'
access_key: 'PJM0c2qlauoXv5TMEHm2'
secret_key: 'Wr69Dm3ZH39M3GCSeyB3eFLynLPuGCKYfphixZuI'
secure: true
image_bucket: 'image'
video_bucket: 'video'

View File

@ -0,0 +1,30 @@
video:
# 视频本地保存地址
file_path: "../dsp/video/"
# 是否添加水印
video_add_water: false
service:
filter:
# 图片得分多少分以上返回图片
frame_score: 0.4
# 图片相似度过滤
picture_similarity: true
similarity: 0.65
frame_step: 160
timeout: 21600
cv2_pull_stream_timeout: 1000
cv2_read_stream_timeout: 1000
recording_pull_stream_timeout: 600
model:
# 使用哪种识别方式
# 1 普通方式
# 2 模型追踪
model_type: 1
limit: 3
task:
# 任务限制5个
limit: 5
image:
limit: 20

View File

@ -0,0 +1,29 @@
video:
# 视频本地保存地址
file_path: "../dsp/video/"
# 是否添加水印
video_add_water: false
service:
filter:
# 图片得分多少分以上返回图片
frame_score: 0.4
# 图片相似度过滤
picture_similarity: true
similarity: 0.65
frame_step: 160
timeout: 21600
cv2_pull_stream_timeout: 1000
cv2_read_stream_timeout: 1000
recording_pull_stream_timeout: 600
model:
# 使用哪种识别方式
# 1 普通方式
# 2 模型追踪
model_type: 1
limit: 3
task:
# 任务限制5个
limit: 5
image:
limit: 20

View File

@ -0,0 +1,31 @@
video:
# 视频本地保存地址
file_path: "../dsp/video/"
# 是否添加水印
video_add_water: false
service:
filter:
# 图片得分多少分以上返回图片
frame_score: 0.4
# 图片相似度过滤
picture_similarity: true
similarity: 0.65
frame_step: 160
timeout: 21600
cv2_pull_stream_timeout: 1000
cv2_read_stream_timeout: 1000
recording_pull_stream_timeout: 600
model:
# 使用哪种识别方式
# 1 普通方式
# 2 模型追踪
model_type: 1
limit: 3
task:
# 任务限制5个
limit: 5
image:
limit: 20
#storage source,0--aliyun,1--minio
storage_source: 1

26
dsp_master.py Normal file
View File

@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
from os.path import dirname, realpath
from sys import argv
from loguru import logger
from torch import multiprocessing
from service.Dispatcher import DispatcherService
from util.LogUtils import init_log
'''
dsp主程序入口
'''
if __name__ == '__main__':
multiprocessing.set_start_method('spawn')
base_dir = dirname(realpath(__file__))
arg = argv
print("脚本启动参数: ", arg)
envs = ('dev', 'test', 'prod')
env = envs[0]
active = [env for env in envs if env in arg]
if len(active) != 0:
env = active[0]
init_log(base_dir, env)
logger.info("(♥◠‿◠)ノ゙ DSP【算法调度服务】开始启动 ლ(´ڡ`ლ)゙")
DispatcherService(base_dir, env)

53
entity/FeedBack.py Normal file
View File

@ -0,0 +1,53 @@
from json import dumps
from util.TimeUtils import now_date_to_str
def message_feedback(requestId, status, analyse_type, error_code="", error_msg="", progress="", original_url="",
sign_url="", modelCode="", detectTargetCode="", analyse_results="", video_url="", ai_video_url=""):
if len(analyse_results) > 0:
analyse_results = dumps(analyse_results)
taskbar = {
"request_id": requestId,
"status": status,
"type": analyse_type,
"video_url": video_url,
"ai_video_url": ai_video_url,
"error_code": error_code,
"error_msg": error_msg,
"progress": progress,
"results": [
{
"original_url": original_url,
"sign_url": sign_url,
"analyse_results": analyse_results,
"model_code": modelCode,
"detect_targets_code": detectTargetCode,
"analyse_time": now_date_to_str()
}
]
}
return {"feedback": taskbar}
def recording_feedback(requestId, status, error_code="", error_msg="", progress="", video_url=""):
rdf = {
"request_id": requestId,
"status": status,
"error_code": error_code,
"error_msg": error_msg,
"progress": progress,
"video_url": video_url
}
return {"recording": rdf}
def pull_stream_feedback(requestId, status, error_code="", error_msg="", videoInfo=[]):
return {"pull_stream": {
"request_id": requestId,
"video_info_list": videoInfo,
"push_stream_status": status,
"error_code": error_code,
"error_msg": error_msg,
"current_time": now_date_to_str()
}}

14
entity/PullStreamDto.py Normal file
View File

@ -0,0 +1,14 @@
class PullStreamDto:
__slots__ = ('msg', 'context', 'pullQueue', 'fbQueue', 'hbQueue', 'imageQueue', 'analyse_type')
def __init__(self, msg, context, pullQueue, fbQueue, hbQueue, imageQueue, analyse_type):
self.msg = msg
self.context = context
self.pullQueue = pullQueue
self.fbQueue = fbQueue
self.hbQueue = hbQueue
self.imageQueue = imageQueue
self.analyse_type = analyse_type

0
entity/__init__.py Normal file
View File

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,21 @@
from enum import Enum, unique
# 分析状态枚举
@unique
class AnalysisStatus(Enum):
# 等待
WAITING = "waiting"
# 分析中
RUNNING = "running"
# 分析完成
SUCCESS = "success"
# 超时
TIMEOUT = "timeout"
# 失败
FAILED = "failed"

25
enums/AnalysisTypeEnum.py Normal file
View File

@ -0,0 +1,25 @@
from enum import Enum, unique
# 分析类型枚举
@unique
class AnalysisType(Enum):
# 在线
ONLINE = "1"
# 离线
OFFLINE = "2"
# 图片
IMAGE = "3"
# 录屏
RECORDING = "9999"
# 转推流
PULLTOPUSH = "10000"

188
enums/BaiduSdkEnum.py Normal file
View File

@ -0,0 +1,188 @@
from enum import Enum, unique
'''
ocr官方文档: https://ai.baidu.com/ai-doc/OCR/zkibizyhz
官方文档: https://ai.baidu.com/ai-doc/VEHICLE/rk3inf9tj
参数1: 异常编号
参数2: 异常英文描述
参数3: 异常中文描述
参数4: 0-异常信息统一输出为内部异常
1-异常信息可以输出
2-输出空的异常信息
参数5: 指定异常重试的次数
'''
# 异常枚举
@unique
class BaiduSdkErrorEnum(Enum):
UNKNOWN_ERROR = (1, "Unknown error", "未知错误", 0, 0)
SERVICE_TEMPORARILY_UNAVAILABLE = (2, "Service temporarily unavailable", "服务暂不可用,请再次请求", 0, 3)
UNSUPPORTED_OPENAPI_METHOD = (3, "Unsupported openapi method", "调用的API不存在", 0, 0)
API_REQUEST_LIMIT_REACHED = (4, "Open api request limit reached", "请求量限制, 请稍后再试!", 1, 5)
NO_PERMISSION_TO_ACCESS_DATA = (6, "No permission to access data", "无权限访问该用户数据", 1, 0)
GET_SERVICE_TOKEN_FAILED = (13, "Get service token failed", "获取token失败", 0, 2)
IAM_CERTIFICATION_FAILED = (14, "IAM Certification failed", "IAM 鉴权失败", 0, 1)
APP_NOT_EXSITS_OR_CREATE_FAILED = (15, "app not exsits or create failed", "应用不存在或者创建失败", 0, 0)
API_DAILY_REQUEST_LIMIT_REACHED = (17, "Open api daily request limit reached", "每天请求量超限额!", 1, 2)
API_QPS_REQUEST_LIMIT_REACHED = (18, "Open api qps request limit reached", "QPS超限额!", 1, 10)
API_TOTAL_REQUEST_LIMIT_REACHED = (19, "Open api total request limit reached", "请求总量超限额!", 1, 2)
INVALID_TOKEN = (100, "Invalid parameter", "无效的access_token参数token拉取失败", 0, 1)
ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID = (110, "Access token invalid or no longer valid", "access_token无效token有效期为30天", 0, 1)
ACCESS_TOKEN_EXPIRED = (111, "Access token expired", "access token过期token有效期为30天", 0, 1)
INTERNAL_ERROR = (282000, "internal error", "服务器内部错误", 0, 1)
INVALID_PARAM = (216100, "invalid param", "请求中包含非法参数!", 0, 1)
NOT_ENOUGH_PARAM = (216101, "not enough param", "缺少必须的参数!", 0, 0)
SERVICE_NOT_SUPPORT = (216102, "service not support", "请求了不支持的服务请检查调用的url", 0, 0)
PARAM_TOO_LONG = (216103, "param too long", "请求中某些参数过长!", 1, 0)
APPID_NOT_EXIST = (216110, "appid not exist", "appid不存在", 0, 0)
EMPTY_IMAGE = (216200, "empty image", "图片为空!", 1, 0)
IMAGE_FORMAT_ERROR = (216201, "image format error", "上传的图片格式错误现阶段我们支持的图片格式为PNG、JPG、JPEG、BMP", 1, 0)
IMAGE_SIZE_ERROR = (216202, "image size error", "上传的图片大小错误分辨率不高于4096*4096", 1, 0)
IMAGE_SIZE_BASE_ERROR = (216203, "image size error", "上传的图片编码有误", 1, 0)
RECOGNIZE_ERROR = (216630, "recognize error", "识别错误", 2, 2)
DETECT_ERROR = (216634, "detect error", "检测错误", 2, 2)
MISSING_PARAMETERS = (282003, "missing parameters: {参数名}", "请求参数缺失", 0, 0)
BATCH_ROCESSING_ERROR = (282005, "batch processing error", "处理批量任务时发生部分或全部错误", 0, 5)
BATCH_TASK_LIMIT_REACHED = (282006, "batch task limit reached", "批量任务处理数量超出限制请将任务数量减少到10或10以下", 1, 5)
IMAGE_TRANSCODE_ERROR = (282100, "image transcode error", "图片压缩转码错误", 0, 1)
IMAGE_SPLIT_LIMIT_REACHED = (282101, "image split limit reached", "长图片切分数量超限!", 1, 1)
TARGET_DETECT_ERROR = (282102, "target detect error", "未检测到图片中识别目标!", 2, 1)
TARGET_RECOGNIZE_ERROR = (282103, "target recognize error", "图片目标识别错误!", 2, 1)
URLS_NOT_EXIT = (282110, "urls not exit", "URL参数不存在请核对URL后再次提交!", 1, 0)
URL_FORMAT_ILLEGAL = (282111, "url format illegal", "URL格式非法!", 1, 0)
URL_DOWNLOAD_TIMEOUT = (282112, "url download timeout", "URL格式非法!", 1, 0)
URL_RESPONSE_INVALID = (282113, "url response invalid", "URL返回无效参数!", 1, 0)
URL_SIZE_ERROR = (282114, "url size error", "URL长度超过1024字节或为0!", 1, 0)
REQUEST_ID_NOT_EXIST = (282808, "request id: xxxxx not exist", "request id xxxxx 不存在", 0, 0)
RESULT_TYPE_ERROR = (282809, "result type error", "返回结果请求错误不属于excel或json", 0, 0)
IMAGE_RECOGNIZE_ERROR = (282810, "image recognize error", "图像识别错误", 2, 1)
INVALID_ARGUMENT = (283300, "Invalid argument", "入参格式有误,可检查下图片编码、代码格式是否有误", 1, 0)
INTERNAL_ERROR_2 = (336000, "Internal error", "服务器内部错误", 0, 0)
INVALID_ARGUMENT_2 = (336001, "Invalid Argument", "入参格式有误,比如缺少必要参数、图片编码错误等等,可检查下图片编码、代码格式是否有误", 0, 0)
SDK_IMAGE_SIZE_ERROR = ('SDK100', "image size error", "图片大小超限最短边至少50px最长边最大4096px 建议长宽比31以内图片请求格式支持PNG、JPG、BMP", 1, 0)
SDK_IMAGE_LENGTH_ERROR = ('SDK101', "image length error", "图片边长不符合要求最短边至少50px最长边最大4096px 建议长宽比31以内", 1, 0)
SDK_READ_IMAGE_FILE_ERROR = ('SDK102', "read image file error", "读取图片文件错误", 0, 1)
SDK_CONNECTION_OR_READ_DATA_TIME_OUT = ('SDK108', "connection or read data time out", "连接超时或读取数据超时,请检查本地网络设置、文件读取设置", 0, 3)
SDK_UNSUPPORTED_IMAGE_FORMAT = ('SDK109', "unsupported image format", "不支持的图片格式当前支持以下几类图片PNG、JPG、BMP", 1, 0)
BAIDUERRORDATA = {
BaiduSdkErrorEnum.UNKNOWN_ERROR.value[0]: BaiduSdkErrorEnum.UNKNOWN_ERROR,
BaiduSdkErrorEnum.SERVICE_TEMPORARILY_UNAVAILABLE.value[0]: BaiduSdkErrorEnum.SERVICE_TEMPORARILY_UNAVAILABLE,
BaiduSdkErrorEnum.UNSUPPORTED_OPENAPI_METHOD.value[0]: BaiduSdkErrorEnum.UNSUPPORTED_OPENAPI_METHOD,
BaiduSdkErrorEnum.API_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_REQUEST_LIMIT_REACHED,
BaiduSdkErrorEnum.NO_PERMISSION_TO_ACCESS_DATA.value[0]: BaiduSdkErrorEnum.NO_PERMISSION_TO_ACCESS_DATA,
BaiduSdkErrorEnum.GET_SERVICE_TOKEN_FAILED.value[0]: BaiduSdkErrorEnum.GET_SERVICE_TOKEN_FAILED,
BaiduSdkErrorEnum.IAM_CERTIFICATION_FAILED.value[0]: BaiduSdkErrorEnum.IAM_CERTIFICATION_FAILED,
BaiduSdkErrorEnum.APP_NOT_EXSITS_OR_CREATE_FAILED.value[0]: BaiduSdkErrorEnum.APP_NOT_EXSITS_OR_CREATE_FAILED,
BaiduSdkErrorEnum.API_DAILY_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_DAILY_REQUEST_LIMIT_REACHED,
BaiduSdkErrorEnum.API_QPS_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_QPS_REQUEST_LIMIT_REACHED,
BaiduSdkErrorEnum.API_TOTAL_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_TOTAL_REQUEST_LIMIT_REACHED,
BaiduSdkErrorEnum.INVALID_TOKEN.value[0]: BaiduSdkErrorEnum.INVALID_TOKEN,
BaiduSdkErrorEnum.ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID.value[0]: BaiduSdkErrorEnum.ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID,
BaiduSdkErrorEnum.ACCESS_TOKEN_EXPIRED.value[0]: BaiduSdkErrorEnum.ACCESS_TOKEN_EXPIRED,
BaiduSdkErrorEnum.INTERNAL_ERROR.value[0]: BaiduSdkErrorEnum.INTERNAL_ERROR,
BaiduSdkErrorEnum.INVALID_PARAM.value[0]: BaiduSdkErrorEnum.INVALID_PARAM,
BaiduSdkErrorEnum.NOT_ENOUGH_PARAM.value[0]: BaiduSdkErrorEnum.NOT_ENOUGH_PARAM,
BaiduSdkErrorEnum.SERVICE_NOT_SUPPORT.value[0]: BaiduSdkErrorEnum.SERVICE_NOT_SUPPORT,
BaiduSdkErrorEnum.PARAM_TOO_LONG.value[0]: BaiduSdkErrorEnum.PARAM_TOO_LONG,
BaiduSdkErrorEnum.APPID_NOT_EXIST.value[0]: BaiduSdkErrorEnum.APPID_NOT_EXIST,
BaiduSdkErrorEnum.EMPTY_IMAGE.value[0]: BaiduSdkErrorEnum.EMPTY_IMAGE,
BaiduSdkErrorEnum.IMAGE_FORMAT_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_FORMAT_ERROR,
BaiduSdkErrorEnum.IMAGE_SIZE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_SIZE_ERROR,
BaiduSdkErrorEnum.IMAGE_SIZE_BASE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_SIZE_BASE_ERROR,
BaiduSdkErrorEnum.RECOGNIZE_ERROR.value[0]: BaiduSdkErrorEnum.RECOGNIZE_ERROR,
BaiduSdkErrorEnum.DETECT_ERROR.value[0]: BaiduSdkErrorEnum.DETECT_ERROR,
BaiduSdkErrorEnum.MISSING_PARAMETERS.value[0]: BaiduSdkErrorEnum.MISSING_PARAMETERS,
BaiduSdkErrorEnum.BATCH_ROCESSING_ERROR.value[0]: BaiduSdkErrorEnum.BATCH_ROCESSING_ERROR,
BaiduSdkErrorEnum.BATCH_TASK_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.BATCH_TASK_LIMIT_REACHED,
BaiduSdkErrorEnum.IMAGE_TRANSCODE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_TRANSCODE_ERROR,
BaiduSdkErrorEnum.IMAGE_SPLIT_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.IMAGE_SPLIT_LIMIT_REACHED,
BaiduSdkErrorEnum.TARGET_DETECT_ERROR.value[0]: BaiduSdkErrorEnum.TARGET_DETECT_ERROR,
BaiduSdkErrorEnum.TARGET_RECOGNIZE_ERROR.value[0]: BaiduSdkErrorEnum.TARGET_RECOGNIZE_ERROR,
BaiduSdkErrorEnum.URL_SIZE_ERROR.value[0]: BaiduSdkErrorEnum.URL_SIZE_ERROR,
BaiduSdkErrorEnum.REQUEST_ID_NOT_EXIST.value[0]: BaiduSdkErrorEnum.REQUEST_ID_NOT_EXIST,
BaiduSdkErrorEnum.RESULT_TYPE_ERROR.value[0]: BaiduSdkErrorEnum.RESULT_TYPE_ERROR,
BaiduSdkErrorEnum.IMAGE_RECOGNIZE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_RECOGNIZE_ERROR,
BaiduSdkErrorEnum.INVALID_ARGUMENT.value[0]: BaiduSdkErrorEnum.INVALID_ARGUMENT,
BaiduSdkErrorEnum.INTERNAL_ERROR_2.value[0]: BaiduSdkErrorEnum.INTERNAL_ERROR_2,
BaiduSdkErrorEnum.INVALID_ARGUMENT_2.value[0]: BaiduSdkErrorEnum.INVALID_ARGUMENT_2,
BaiduSdkErrorEnum.SDK_IMAGE_SIZE_ERROR.value[0]: BaiduSdkErrorEnum.SDK_IMAGE_SIZE_ERROR,
BaiduSdkErrorEnum.SDK_IMAGE_LENGTH_ERROR.value[0]: BaiduSdkErrorEnum.SDK_IMAGE_LENGTH_ERROR,
BaiduSdkErrorEnum.SDK_READ_IMAGE_FILE_ERROR.value[0]: BaiduSdkErrorEnum.SDK_READ_IMAGE_FILE_ERROR,
BaiduSdkErrorEnum.SDK_CONNECTION_OR_READ_DATA_TIME_OUT.value[0]: BaiduSdkErrorEnum.SDK_CONNECTION_OR_READ_DATA_TIME_OUT,
BaiduSdkErrorEnum.SDK_UNSUPPORTED_IMAGE_FORMAT.value[0]: BaiduSdkErrorEnum.SDK_UNSUPPORTED_IMAGE_FORMAT,
BaiduSdkErrorEnum.URLS_NOT_EXIT.value[0]: BaiduSdkErrorEnum.URLS_NOT_EXIT,
BaiduSdkErrorEnum.URL_FORMAT_ILLEGAL.value[0]: BaiduSdkErrorEnum.URL_FORMAT_ILLEGAL,
BaiduSdkErrorEnum.URL_DOWNLOAD_TIMEOUT.value[0]: BaiduSdkErrorEnum.URL_DOWNLOAD_TIMEOUT,
BaiduSdkErrorEnum.URL_RESPONSE_INVALID.value[0]: BaiduSdkErrorEnum.URL_RESPONSE_INVALID
}
@unique
class VehicleEnum(Enum):
CAR = ("car", "小汽车", 0)
TRICYCLE = ("tricycle", "三轮车", 1)
MOTORBIKE = ("motorbike", "摩托车", 2)
CARPLATE = ("carplate", "车牌", 3)
TRUCK = ("truck", "卡车", 4)
BUS = ("bus", "巴士", 5)
VehicleEnumVALUE={
VehicleEnum.CAR.value[0]: VehicleEnum.CAR,
VehicleEnum.TRICYCLE.value[0]: VehicleEnum.TRICYCLE,
VehicleEnum.MOTORBIKE.value[0]: VehicleEnum.MOTORBIKE,
VehicleEnum.CARPLATE.value[0]: VehicleEnum.CARPLATE,
VehicleEnum.TRUCK.value[0]: VehicleEnum.TRUCK,
VehicleEnum.BUS.value[0]: VehicleEnum.BUS
}

86
enums/ExceptionEnum.py Normal file
View File

@ -0,0 +1,86 @@
from enum import Enum, unique
# 异常枚举
@unique
class ExceptionType(Enum):
OR_VIDEO_ADDRESS_EXCEPTION = ("SP000", "未拉取到视频流, 请检查拉流地址是否有视频流!")
ANALYSE_TIMEOUT_EXCEPTION = ("SP001", "AI分析超时!")
PULLSTREAM_TIMEOUT_EXCEPTION = ("SP002", "原视频拉流超时!")
READSTREAM_TIMEOUT_EXCEPTION = ("SP003", "原视频读取视频流超时!")
GET_VIDEO_URL_EXCEPTION = ("SP004", "获取视频播放地址失败!")
GET_VIDEO_URL_TIMEOUT_EXCEPTION = ("SP005", "获取原视频播放地址超时!")
PULL_STREAM_URL_EXCEPTION = ("SP006", "拉流地址不能为空!")
PUSH_STREAM_URL_EXCEPTION = ("SP007", "推流地址不能为空!")
PUSH_STREAM_TIME_EXCEPTION = ("SP008", "未生成本地视频地址!")
AI_MODEL_MATCH_EXCEPTION = ("SP009", "未匹配到对应的AI模型!")
ILLEGAL_PARAMETER_FORMAT = ("SP010", "非法参数格式!")
PUSH_STREAMING_CHANNEL_IS_OCCUPIED = ("SP011", "推流通道可能被占用, 请稍后再试!")
VIDEO_RESOLUTION_EXCEPTION = ("SP012", "不支持该分辨率类型的视频,请切换分辨率再试!")
READ_IAMGE_URL_EXCEPTION = ("SP013", "未能解析图片地址!")
DETECTION_TARGET_TYPES_ARE_NOT_SUPPORTED = ("SP014", "不支持该类型的检测目标!")
WRITE_STREAM_EXCEPTION = ("SP015", "写流异常!")
OR_VIDEO_DO_NOT_EXEIST_EXCEPTION = ("SP016", "原视频不存在!")
MODEL_LOADING_EXCEPTION = ("SP017", "模型加载异常!")
MODEL_ANALYSE_EXCEPTION = ("SP018", "算法模型分析异常!")
AI_MODEL_CONFIG_EXCEPTION = ("SP019", "模型配置不能为空!")
AI_MODEL_GET_CONFIG_EXCEPTION = ("SP020", "获取模型配置异常, 请检查模型配置是否正确!")
MODEL_GROUP_LIMIT_EXCEPTION = ("SP021", "模型组合个数超过限制!")
MODEL_NOT_SUPPORT_VIDEO_EXCEPTION = ("SP022", "%s不支持视频识别!")
MODEL_NOT_SUPPORT_IMAGE_EXCEPTION = ("SP023", "%s不支持图片识别!")
THE_DETECTION_TARGET_CANNOT_BE_EMPTY = ("SP024", "检测目标不能为空!")
URL_ADDRESS_ACCESS_FAILED = ("SP025", "URL地址访问失败, 请检测URL地址是否正确!")
UNIVERSAL_TEXT_RECOGNITION_FAILED = ("SP026", "识别失败!")
COORDINATE_ACQUISITION_FAILED = ("SP027", "飞行坐标识别异常!")
PUSH_STREAM_EXCEPTION = ("SP028", "推流异常!")
MODEL_DUPLICATE_EXCEPTION = ("SP029", "存在重复模型配置!")
DETECTION_TARGET_NOT_SUPPORT = ("SP031", "存在不支持的检测目标!")
TASK_EXCUTE_TIMEOUT = ("SP032", "任务执行超时!")
PUSH_STREAM_URL_IS_NULL = ("SP033", "拉流、推流地址不能为空!")
PULL_STREAM_NUM_LIMIT_EXCEPTION = ("SP034", "转推流数量超过限制!")
NOT_REQUESTID_TASK_EXCEPTION = ("SP993", "未查询到该任务,无法停止任务!")
NO_RESOURCES = ("SP995", "服务器暂无资源可以使用请稍后30秒后再试!")
NO_CPU_RESOURCES = ("SP996", "暂无CPU资源可以使用请稍后再试!")
SERVICE_COMMON_EXCEPTION = ("SP997", "公共服务异常!")
NO_GPU_RESOURCES = ("SP998", "暂无GPU资源可以使用请稍后再试!")
SERVICE_INNER_EXCEPTION = ("SP999", "系统内部异常!")

747
enums/ModelTypeEnum.py Normal file
View File

@ -0,0 +1,747 @@
import sys
from enum import Enum, unique
from common.Constant import COLOR
sys.path.extend(['..', '../AIlib2'])
from DMPR import DMPRModel
from DMPRUtils.jointUtil import dmpr_yolo
from segutils.segmodel import SegModel
from utilsK.queRiver import riverDetSegMixProcess
from utilsK.crowdGather import gather_post_process
from segutils.trafficUtils import tracfficAccidentMixFunction
from utilsK.drownUtils import mixDrowing_water_postprocess
from utilsK.noParkingUtils import mixNoParking_road_postprocess
from utilsK.illParkingUtils import illParking_postprocess
from stdc import stdcModel
from yolov5 import yolov5Model
from DMPRUtils.jointUtil import dmpr_yolo_stdc
from AI import default_mix
from ocr import ocrModel
from utilsK.channel2postUtils import channel2_post_process
'''
参数说明
1. 编号
2. 模型编号
3. 模型名称
4. 选用的模型名称
5. 模型配置
6. 模型引用配置[Detweights文件, Segweights文件, 引用计数]
'''
@unique
class ModelType(Enum):
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river', lambda device, gpuName: {
'device': device,
'labelnames': ["排口", "水生植被", "其它", "漂浮物", "污染排口", "菜地", "违建", "岸坡垃圾"],
'seg_nclass': 2,
'trtFlag_seg': True,
'trtFlag_det': True,
'segRegionCnt': 1,
'segPar': {
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': riverDetSegMixProcess,
'pars': {
'slopeIndex': [5, 6, 7],
'riverIou': 0.1
}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Detweights': "../AIlib2/weights/river/yolov5_%s_fp16.engine" % gpuName,
'Segweights': '../AIlib2/weights/river/stdc_360X640_%s_fp16.engine' % gpuName
})
# FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2', lambda device, gpuName: {
# 'device': device,
# 'gpu_name': gpuName,
# 'labelnames': ["林斑", "病死树", "行人", "火焰", "烟雾","云朵"],
# 'trtFlag_det': True,
# 'trtFlag_seg': False,
# 'Detweights': "../AIlib2/weights/forest2/yolov5_%s_fp16.engine" % gpuName,
# 'seg_nclass': 2,
# 'segRegionCnt': 0,
# 'slopeIndex': [],
# 'segPar': None,
# 'postFile': {
# "name": "post_process",
# "conf_thres": 0.25,
# "iou_thres": 0.45,
# "classes": 6,
# "rainbows": COLOR
# },
# 'Segweights': None
# })
FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2', lambda device, gpuName: {
'labelnames': ["林斑", "病死树", "行人", "火焰", "烟雾","云朵"],
'postProcess':{'function':default_mix,'pars':{}},
'models':
[
{
'weight':"../AIlib2/weights/forest2/yolov5_%s_fp16.engine"%(gpuName),###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'segRegionCnt':2,###分割模型结果需要保留的等值线数目
"pixScale": 1.2,
})
TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'highWay2', lambda device, gpuName: {
'device': str(device),
'labelnames': ["行人", "车辆", "纵向裂缝", "横向裂缝", "修补", "网状裂纹", "坑槽", "块状裂纹", "积水", "影子", "事故"],
'trtFlag_seg': True,
'trtFlag_det': True,
'seg_nclass': 3,
'segRegionCnt': 2,
'segPar': {
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'predResize': True,
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': tracfficAccidentMixFunction,
'pars': {
'modelSize': (640, 360),
#'modelSize': (1920,1080),
'RoadArea': 16000,
'roadVehicleAngle': 15,
'speedRoadVehicleAngleMax': 75,
'roundness': 1.0,
'cls': 9,
'vehicleFactor': 0.1,
'confThres': 0.25,
'roadIou': 0.6,
'radius': 50,
'vehicleFlag': False,
'distanceFlag': False
}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 10,
"rainbows": COLOR
},
'Detweights': "../AIlib2/weights/highWay2/yolov5_%s_fp16.engine" % gpuName,
'Segweights': '../AIlib2/weights/highWay2/stdc_360X640_%s_fp16.engine' % gpuName
})
EPIDEMIC_PREVENTION_MODEL = ("4", "004", "防疫模型", None, None)
PLATE_MODEL = ("5", "005", "车牌模型", None, None)
VEHICLE_MODEL = ("6", "006", "车辆模型", 'vehicle', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["车辆"],
'seg_nclass': 2,
'segRegionCnt': 0,
'slopeIndex': [],
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/vehicle/yolov5_%s_fp16.engine" % gpuName,
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
})
PEDESTRIAN_MODEL = ("7", "007", "行人模型", 'pedestrian', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["行人"],
'seg_nclass': 2,
'segRegionCnt': 0,
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/pedestrian/yolov5_%s_fp16.engine" % gpuName,
'slopeIndex': [],
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
})
SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["火焰", "烟雾"],
'seg_nclass': 2, # 分割模型类别数目默认2类
'segRegionCnt': 0,
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/smogfire/yolov5_%s_fp16.engine" % gpuName,
'slopeIndex': [],
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
})
ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["钓鱼", "游泳"],
'seg_nclass': 2, # 分割模型类别数目默认2类
'segRegionCnt': 0,
'slopeIndex': [],
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/AnglerSwimmer/yolov5_%s_fp16.engine" % gpuName,
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
})
COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["违法种植"],
'seg_nclass': 2, # 分割模型类别数目默认2类
'segRegionCnt': 0,
'slopeIndex': [],
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/countryRoad/yolov5_%s_fp16.engine" % gpuName,
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
})
SHIP_MODEL = ("11", "011", "船只模型", 'ship2', lambda device, gpuName: {
'model_size': (608, 608),
'K': 100,
'conf_thresh': 0.18,
'device': 'cuda:%s' % device,
'down_ratio': 4,
'num_classes': 15,
'weights': '../AIlib2/weights/ship2/obb_608X608_%s_fp16.engine' % gpuName,
'dataset': 'dota',
'half': False,
'mean': (0.5, 0.5, 0.5),
'std': (1, 1, 1),
'heads': {'hm': None, 'wh': 10, 'reg': 2, 'cls_theta': 1},
'decoder': None,
'test_flag': True,
"rainbows": COLOR,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'drawBox': False,
'label_array': None,
'labelnames': ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "船只"),
})
BAIDU_MODEL = ("12", "012", "百度AI图片识别模型", None, None)
CHANNEL_EMERGENCY_MODEL = ("13", "013", "航道模型", 'channelEmergency', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': [""],
'seg_nclass': 2, # 分割模型类别数目默认2类
'segRegionCnt': 0,
'slopeIndex': [],
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/channelEmergency/yolov5_%s_fp16.engine" % gpuName,
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
})
RIVER2_MODEL = ("15", "015", "河道检测模型", 'river2', lambda device, gpuName: {
'device': device,
'labelnames': ["漂浮物", "岸坡垃圾", "排口", "违建", "菜地", "水生植物", "河湖人员", "钓鱼人员", "船只",
"蓝藻"],
'trtFlag_seg': True,
'trtFlag_det': True,
'seg_nclass': 2,
'segRegionCnt': 1,
'segPar': {
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': riverDetSegMixProcess,
'pars': {
'slopeIndex': [1, 3, 4, 7],
'riverIou': 0.1
}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.3,
"ovlap_thres_crossCategory": 0.65,
"classes": 5,
"rainbows": COLOR
},
# "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3]
'Detweights': "../AIlib2/weights/river2/yolov5_%s_fp16.engine" % gpuName,
# '../AIlib2/weights/conf/%s/stdc_360X640.pth' % modeType.value[3]
'Segweights': '../AIlib2/weights/river2/stdc_360X640_%s_fp16.engine' % gpuName
})
CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement2', lambda device, gpuName: {
'labelnames': ["车辆", "垃圾", "商贩", "违停"],
'postProcess':{
'function':dmpr_yolo_stdc,
'pars':{'carCls':0 ,'illCls':3,'scaleRatio':0.5,'border':80}
},
'models':[
{
#'weight':'../AIlib2/weights/conf/cityMangement3/yolov5.pt',
'weight':'../AIlib2/weights/cityMangement3/yolov5_%s_fp16.engine'%(gpuName),
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.8,"1":0.4,"2":0.5,"3":0.5 } }
},
{
'weight':'../AIlib2/weights/conf/cityMangement3/dmpr.pth',
'par':{
'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.1, 'dmprimg_size':640,
'name':'dmpr'
},
'model':DMPRModel,
'name':'dmpr'
},
{
'weight':'../AIlib2/weights/conf/cityMangement3/stdc_360X640.pth',
'par':{
'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,'seg_nclass':2},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'segRegionCnt':2,###分割模型结果需要保留的等值线数目
"pixScale": 1.2,
})
DROWING_MODEL = ("17", "017", "人员落水模型", 'drowning', lambda device, gpuName: {
'device': device,
'labelnames': ["人头", "", "船只"],
'trtFlag_seg': True,
'trtFlag_det': True,
'seg_nclass': 2,
'segRegionCnt': 2,
'segPar': {
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'predResize': True,
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': mixDrowing_water_postprocess,
'pars': {
'modelSize': (640, 360)
}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"rainbows": COLOR
},
# "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3]
'Detweights': "../AIlib2/weights/drowning/yolov5_%s_fp16.engine" % gpuName,
# '../AIlib2/weights/conf/%s/stdc_360X640.pth' % modeType.value[3]
'Segweights': '../AIlib2/weights/drowning/stdc_360X640_%s_fp16.engine' % gpuName
})
NOPARKING_MODEL = (
"18", "018", "城市违章模型", 'noParking', lambda device, gpuName: {
'device': device,
'labelnames': ["车辆", "违停"],
'trtFlag_seg': True,
'trtFlag_det': True,
'seg_nclass': 4,
'segRegionCnt': 2,
'segPar': {
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'predResize': True,
'numpy': False,
'RGB_convert_first': True, ###分割模型预处理参数
'mixFunction': {
'function': mixNoParking_road_postprocess,
'pars': {
'modelSize': (640, 360),
'roundness': 0.3,
'cls': 9,
'laneArea': 10,
'laneAngleCha': 5,
'RoadArea': 16000,
'fitOrder':2
}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"rainbows": COLOR
},
'Detweights': "../AIlib2/weights/noParking/yolov5_%s_fp16.engine" % gpuName,
'Segweights': '../AIlib2/weights/noParking/stdc_360X640_%s_fp16.engine' % gpuName
})
ILLPARKING_MODEL = ("19", "019", "车辆违停模型", 'illParking', lambda device, gpuName: {
'device': device,
'labelnames': ["", "T角点", "L角点", "违停"],
'trtFlag_seg': False,
'trtFlag_det': True,
'seg_nclass': 4,
'segRegionCnt': 2,
'segPar': {
'mixFunction': {
'function': illParking_postprocess,
'pars': {}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"rainbows": COLOR
},
'Detweights': "../AIlib2/weights/illParking/yolov5_%s_fp16.engine" % gpuName,
'Segweights': None
})
CITYROAD_MODEL = ("20", "020", "城市公路模型", 'cityRoad', lambda device, gpuName: {
'device': device,
'labelnames': ["护栏", "交通标志", "非交通标志", "施工", "施工"],
'trtFlag_seg': False,
'trtFlag_det': True,
'slopeIndex': [],
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.8,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Detweights': "../AIlib2/weights/cityRoad/yolov5_%s_fp16.engine" % gpuName,
'Segweights': None
})
POTHOLE_MODEL = ("23", "023", "坑槽检测模型", 'pothole', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["坑槽"],
'seg_nclass': 2, # 分割模型类别数目默认2类
'segRegionCnt': 0,
'slopeIndex': [],
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/pothole/yolov5_%s_fp16.engine" % gpuName,
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None,
})
CHANNEL2_MODEL = ("24", "024", "船只综合检测模型", 'channel2', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["国旗", "浮标", "船名", "船只","未挂国旗船只"],
'segRegionCnt': 0,
'postProcess':{'function':channel2_post_process,'name':'channel2','pars':{
'objs':[2],
'wRation':1/6.0,
'hRation':1/6.0,
'smallId':0,
'bigId':3,
'newId':4,
'recScale':1.2}},
'models':[
{
#'weight':'../AIlib2/weights/conf/channel2/yolov5.pt',
'weight':'../AIlib2/weights/channel2/yolov5_%s_fp16.engine'%(gpuName),
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.1,'iou_thres':0.45,'allowedList':list(range(20)),'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.7,"1":0.7,"2":0.8,"3":0.6} }
},
{
# 'weight' : '../AIlib2/weights/ocr2/crnn_ch_4090_fp16_192X32.engine',
'weight' : '../AIlib2/weights/conf/ocr2/crnn_ch.pth',
'name':'ocr',
'model':ocrModel,
'par':{
'char_file':'../AIlib2/weights/conf/ocr2/benchmark.txt',
'mode':'ch',
'nc':3,
'imgH':32,
'imgW':192,
'hidden':256,
'mean':[0.5,0.5,0.5],
'std':[0.5,0.5,0.5],
'dynamic':False,
},
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3]],
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None,
})
RIVERT_MODEL = ("25", "025", "河道检测模型(T)", 'riverT', lambda device, gpuName: {
'device': device,
'labelnames': ["漂浮物", "岸坡垃圾", "排口", "违建", "菜地", "水生植物", "河湖人员", "钓鱼人员", "船只",
"蓝藻"],
'trtFlag_seg': True,
'trtFlag_det': True,
'seg_nclass': 2,
'segRegionCnt': 1,
'segPar': {
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': riverDetSegMixProcess,
'pars': {
'slopeIndex': [1, 3, 4, 7],
'riverIou': 0.1
}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.3,
"ovlap_thres_crossCategory": 0.65,
"classes": 5,
"rainbows": COLOR
},
# "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3]
'Detweights': "../AIlib2/weights/riverT/yolov5_%s_fp16.engine" % gpuName,
# '../AIlib2/weights/conf/%s/stdc_360X640.pth' % modeType.value[3]
'Segweights': '../AIlib2/weights/riverT/stdc_360X640_%s_fp16.engine' % gpuName
})
FORESTCROWD_FARM_MODEL = ("2", "026", "森林人群模型", 'forestCrowd', lambda device, gpuName: {
'labelnames': ["林斑", "病死树", "行人", "火焰", "烟雾","人群"],
'postProcess':{'function':gather_post_process,'pars':{'pedestrianId':2,'crowdThreshold':4,'gatherId':5,'distancePersonScale':2.0}},
'models':
[
{
'weight':"../AIlib2/weights/forestCrowd/yolov5_%s_fp16.engine"%(gpuName),###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{ "0":0.25,"1":0.25,"2":0.6,"3":0.6,'4':0.6 ,'5':0.6 } },
}
],
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'segRegionCnt':2,###分割模型结果需要保留的等值线数目
"pixScale": 1.2,
})
TRAFFICFORDSJ_FARM_MODEL = ("27", "027", "交通模型-大数据局", 'highWay2T', lambda device, gpuName: {
'device': str(device),
'labelnames': ["行人", "车辆", "纵向裂缝", "横向裂缝", "修补", "网状裂纹", "坑槽", "块状裂纹", "积水", "影子", "事故", "桥梁外观","设施破损缺失","龙门架","防抛网","标识牌损坏","护栏损坏","钢筋裸露" ],
'trtFlag_seg': True,
'trtFlag_det': True,
'seg_nclass': 3,
'segRegionCnt': 2,
'segPar': {
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'predResize': True,
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': tracfficAccidentMixFunction,
'pars': {
'modelSize': (640, 360),
#'modelSize': (1920,1080),
'RoadArea': 16000,
'roadVehicleAngle': 15,
'speedRoadVehicleAngleMax': 75,
'roundness': 1.0,
'cls': 9,
'vehicleFactor': 0.1,
'confThres': 0.25,
'roadIou': 0.6,
'radius': 50,
'vehicleFlag': False,
'distanceFlag': False
}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 10,
"rainbows": COLOR
},
'Detweights': "../AIlib2/weights/highWay2/yolov5_%s_fp16.engine" % gpuName,
'Segweights': '../AIlib2/weights/highWay2/stdc_360X640_%s_fp16.engine' % gpuName
})
@staticmethod
def checkCode(code):
for model in ModelType:
if model.value[1] == code:
return True
return False
'''
参数1: 检测目标名称
参数2: 检测目标
参数3: 初始化百度检测客户端
'''
@unique
class BaiduModelTarget(Enum):
VEHICLE_DETECTION = (
"车辆检测", 0, lambda client0, client1, url, request_id: client0.vehicleDetectUrl(url, request_id))
HUMAN_DETECTION = (
"人体检测与属性识别", 1, lambda client0, client1, url, request_id: client1.bodyAttr(url, request_id))
PEOPLE_COUNTING = ("人流量统计", 2, lambda client0, client1, url, request_id: client1.bodyNum(url, request_id))
BAIDU_MODEL_TARGET_CONFIG = {
BaiduModelTarget.VEHICLE_DETECTION.value[1]: BaiduModelTarget.VEHICLE_DETECTION,
BaiduModelTarget.HUMAN_DETECTION.value[1]: BaiduModelTarget.HUMAN_DETECTION,
BaiduModelTarget.PEOPLE_COUNTING.value[1]: BaiduModelTarget.PEOPLE_COUNTING
}
EPIDEMIC_PREVENTION_CONFIG = {1: "行程码", 2: "健康码"}
# 模型分析方式
@unique
class ModelMethodTypeEnum(Enum):
# 方式一: 正常识别方式
NORMAL = 1
# 方式二: 追踪识别方式
TRACE = 2

762
enums/ModelTypeEnum2.py Normal file
View File

@ -0,0 +1,762 @@
import sys
from enum import Enum, unique
from common.Constant import COLOR
sys.path.extend(['..', '../AIlib2'])
from segutils.segmodel import SegModel
from utilsK.queRiver import riverDetSegMixProcess_N
from segutils.trafficUtils import tracfficAccidentMixFunction_N
from utilsK.drownUtils import mixDrowing_water_postprocess_N
from utilsK.noParkingUtils import mixNoParking_road_postprocess_N
from utilsK.illParkingUtils import illParking_postprocess
from DMPR import DMPRModel
from DMPRUtils.jointUtil import dmpr_yolo
from yolov5 import yolov5Model
from stdc import stdcModel
from AI import default_mix
from DMPRUtils.jointUtil import dmpr_yolo_stdc
'''
参数说明
1. 编号
2. 模型编号
3. 模型名称
4. 选用的模型名称
'''
@unique
class ModelType2(Enum):
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river', lambda device, gpuName: {
'device': device,
'labelnames': ["排口", "水生植被", "其它", "漂浮物", "污染排口", "菜地", "违建", "岸坡垃圾"],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6,7] ],###控制哪些检测类别显示、输出
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 10, # 每隔几次做一个跟踪和检测默认10。
'windowsize': 29, # 轨迹平滑长度一定是奇数表示每隔几帧做一平滑默认29。一个目标在多个帧中出现每一帧中都有一个位置这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量不宜少于100帧。
},
'postProcess':{'function':riverDetSegMixProcess_N,'pars':{'slopeIndex':[1,3,4,7], 'riverIou':0.1}}, #分割和检测混合处理的函数
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 80,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
},
'models':
[
{
'weight':"../AIlib2/weights/river/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{
'half':True,
'device':'cuda:0' ,
'conf_thres':0.25,
'iou_thres':0.45,
'allowedList':[0,1,2,3],
'segRegionCnt':1,
'trtFlag_det':False,
'trtFlag_seg':False,
"score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
},
{
'weight':'../AIlib2/weights/conf/river/stdc_360X640.pth',
'par':{
'modelSize':(640,360),
'mean':(0.485, 0.456, 0.406),
'std' :(0.229, 0.224, 0.225),
'numpy':False,
'RGB_convert_first':True,
'seg_nclass':2},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
})
FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2', lambda device, gpuName: {
'device': device,
'labelnames': ["林斑", "病死树", "行人", "火焰", "烟雾"],
'models':
[
{
'weight':"../AIlib2/weights/forest2/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,
'device':'cuda:0' ,
'conf_thres':0.25,
'iou_thres':0.45,
'allowedList':[0,1,2,3],
'segRegionCnt':1,
'trtFlag_det':False,
'trtFlag_seg':False,
"score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 }
},
}
],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 80,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})
TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'highWay2', lambda device, gpuName: {
'device': device,
'labelnames': ["行人", "车辆", "纵向裂缝", "横向裂缝", "修补", "网状裂纹", "坑槽", "块状裂纹", "积水", "事故"],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':5,'windowsize':29,'patchCnt':100},
'postProcess':{
'function':tracfficAccidentMixFunction_N,
'pars':{
'RoadArea': 16000,
'vehicleArea': 10,
'roadVehicleAngle': 15,
'speedRoadVehicleAngleMax': 75,
'radius': 50 ,
'roundness': 1.0,
'cls': 9,
'vehicleFactor': 0.1,
'cls':9,
'confThres':0.25,
'roadIou':0.6,
'vehicleFlag':False,
'distanceFlag': False,
'modelSize':(640,360),
}
},
'models':
[
{
'weight':"../AIlib2/weights/highWay2/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{
'half':True,
'device':'cuda:0' ,
'conf_thres':0.25,
'iou_thres':0.45,
'allowedList':[0,1,2,3],
'segRegionCnt':1,
'trtFlag_det':False,
'trtFlag_seg':False,
"score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
},
{
'weight':'../AIlib2/weights/conf/highWay2/stdc_360X640.pth',
'par':{
'modelSize':(640,360),
'mean':(0.485, 0.456, 0.406),
'std' :(0.229, 0.224, 0.225),
'predResize':True,
'numpy':False,
'RGB_convert_first':True,
'seg_nclass':3},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"rainbows": COLOR
},
'txtFontSize': 20,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'waterLineColor': (0, 255, 255),
'segLineShow': False,
'waterLineWidth': 2
}
})
EPIDEMIC_PREVENTION_MODEL = ("4", "004", "防疫模型", None, None)
PLATE_MODEL = ("5", "005", "车牌模型", None, None)
VEHICLE_MODEL = ("6", "006", "车辆模型", 'vehicle', lambda device, gpuName: {
'device': device,
'labelnames': ["车辆"],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/vehicle/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,
'device':'cuda:0' ,
'conf_thres':0.25,
'iou_thres':0.45,
'allowedList':[0,1,2,3],
'segRegionCnt':1,
'trtFlag_det':False,
'trtFlag_seg':False,
"score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'waterLineColor': (0, 255, 255),
'segLineShow': False,
'waterLineWidth': 3
}
})
PEDESTRIAN_MODEL = ("7", "007", "行人模型", 'pedestrian', lambda device, gpuName: {
'device': device,
'labelnames': ["行人"],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/pedestrian/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})
SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire', lambda device, gpuName: {
'device': device,
'labelnames': ["烟雾", "火焰"],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/smogfire/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
#'weight':'../AIlib2/weights/conf/%s/yolov5.pt'%(opt['business'] ),
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})
ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer', lambda device, gpuName: {
'device': device,
'labelnames': ["钓鱼", "游泳"],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/AnglerSwimmer/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
},
})
COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad', lambda device, gpuName: {
'device': device,
'labelnames': ["违法种植"],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/countryRoad/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})
SHIP_MODEL = ("11", "011", "船只模型", 'ship2', lambda device, gpuName: {
'obbModelPar': {
'labelnames': ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "船只"],
'model_size': (608, 608),
'K': 100,
'conf_thresh': 0.3,
'down_ratio': 4,
'num_classes': 15,
'dataset': 'dota',
'heads': {
'hm': None,
'wh': 10,
'reg': 2,
'cls_theta': 1
},
'mean': (0.5, 0.5, 0.5),
'std': (1, 1, 1),
'half': False,
'test_flag': True,
'decoder': None,
'weights': '../AIlib2/weights/ship2/obb_608X608_%s_fp16.engine' % gpuName
},
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 10, # 每隔几次做一个跟踪和检测默认10。
'windowsize': 29, # 轨迹平滑长度一定是奇数表示每隔几帧做一平滑默认29。一个目标在多个帧中出现每一帧中都有一个位置这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量不宜少于100帧。
},
'device': "cuda:%s" % device,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'drawBox': False,
'drawPar': {
"rainbows": COLOR,
'digitWordFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'wordSize': 40,
'fontSize': 1.0,
'label_location': 'leftTop'
}
},
'labelnames': ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "船只"]
})
BAIDU_MODEL = ("12", "012", "百度AI图片识别模型", None, None)
CHANNEL_EMERGENCY_MODEL = ("13", "013", "航道模型", 'channelEmergency', lambda device, gpuName: {
'device': device,
'labelnames': [""],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/channelEmergency/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
#'weight':'../AIlib2/weights/conf/%s/yolov5.pt'%(opt['business'] ),
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})
RIVER2_MODEL = ("15", "015", "河道检测模型", 'river2', lambda device, gpuName: {
'device': device,
'labelnames': ["漂浮物", "岸坡垃圾", "排口", "违建", "菜地", "水生植物", "河湖人员", "钓鱼人员", "船只",
"蓝藻"],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':riverDetSegMixProcess_N,'pars':{'slopeIndex':[1,3,4,7], 'riverIou':0.1}}, #分割和检测混合处理的函数
'models':
[
{
'weight':"../AIlib2/weights/river2/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
},
{
'weight':'../AIlib2/weights/conf/river2/stdc_360X640.pth',
'par':{
'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True,'seg_nclass':2},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.3,
"ovlap_thres_crossCategory": 0.65,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 80,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})
CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement2', lambda device, gpuName: {
'device': device,
'labelnames': ["车辆", "垃圾", "商贩", "违停"],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':5,'windowsize':29,'patchCnt':100},
'postProcess':{
'function':dmpr_yolo_stdc,
'pars':{'carCls':0 ,'illCls':3,'scaleRatio':0.5,'border':80}
},
'models':[
{
'weight':"../AIlib2/weights/cityMangement3/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.8,"1":0.5,"2":0.5,"3":0.5 } }
},
{
'weight':"../AIlib2/weights/cityMangement3/dmpr_%s.engine"% gpuName,###DMPR模型路径
'par':{
'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.3, 'dmprimg_size':640,
'name':'dmpr'
},
'model':DMPRModel,
'name':'dmpr'
},
{
'weight':"../AIlib2/weights/cityMangement3/stdc_360X640_%s_fp16.engine"% gpuName,###分割模型路径
'par':{
'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,'seg_nclass':2},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 20,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 2
}
})
DROWING_MODEL = ("17", "017", "人员落水模型", 'drowning', lambda device, gpuName: {
'device': device,
'labelnames': ["人头", "", "船只"],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':mixDrowing_water_postprocess_N,
'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/drowning/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
},
{
'weight':'../AIlib2/weights/conf/drowning/stdc_360X640.pth',
'par':{
'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,'seg_nclass':2},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"rainbows": COLOR
},
'txtFontSize': 20,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'waterLineColor': (0, 255, 255),
'segLineShow': False,
'waterLineWidth': 2
}
})
NOPARKING_MODEL = (
"18", "018", "城市违章模型", 'noParking', lambda device, gpuName: {
'device': device,
'labelnames': ["车辆", "违停"],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':mixNoParking_road_postprocess_N,
'pars': { 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5 ,'RoadArea': 16000,'fitOrder':2, 'modelSize':(640,360)}
} ,
'models':
[
{
'weight':"../AIlib2/weights/noParking/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
},
{
'weight':'../AIlib2/weights/conf/noParking/stdc_360X640.pth',
'par':{
'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,'seg_nclass':4},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"rainbows": COLOR
},
'txtFontSize': 20,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'waterLineColor': (0, 255, 255),
'segLineShow': False,
'waterLineWidth': 2
}
}
)
CITYROAD_MODEL = ("20", "020", "城市公路模型", 'cityRoad', lambda device, gpuName: {
'device': device,
'labelnames': ["护栏", "交通标志", "非交通标志", "施工", "施工"],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/cityRoad/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.8,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.8,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})
POTHOLE_MODEL = ("23", "023", "坑槽检测模型", 'pothole', lambda device, gpuName: { # 目前集成到另外的模型中去了 不单独使用
'device': device,
'labelnames': ["坑槽"],
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':3,'windowsize':29,'patchCnt':100},
'postProcess':{'function':default_mix,'pars':{ }},
'models':
[
{
'weight':"../AIlib2/weights/pothole/yolov5_%s_fp16.engine"% gpuName,###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3}},
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0]],###控制哪些检测类别显示、输出
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
},
})
@staticmethod
def checkCode(code):
for model in ModelType2:
if model.value[1] == code:
return True
return False
'''
参数1: 检测目标名称
参数2: 检测目标
参数3: 初始化百度检测客户端
'''
@unique
class BaiduModelTarget2(Enum):
VEHICLE_DETECTION = (
"车辆检测", 0, lambda client0, client1, url, request_id: client0.vehicleDetectUrl(url, request_id))
HUMAN_DETECTION = (
"人体检测与属性识别", 1, lambda client0, client1, url, request_id: client1.bodyAttr(url, request_id))
PEOPLE_COUNTING = ("人流量统计", 2, lambda client0, client1, url, request_id: client1.bodyNum(url, request_id))
BAIDU_MODEL_TARGET_CONFIG2 = {
BaiduModelTarget2.VEHICLE_DETECTION.value[1]: BaiduModelTarget2.VEHICLE_DETECTION,
BaiduModelTarget2.HUMAN_DETECTION.value[1]: BaiduModelTarget2.HUMAN_DETECTION,
BaiduModelTarget2.PEOPLE_COUNTING.value[1]: BaiduModelTarget2.PEOPLE_COUNTING
}
EPIDEMIC_PREVENTION_CONFIG = {1: "行程码", 2: "健康码"}
# 模型分析方式
@unique
class ModelMethodTypeEnum2(Enum):
# 方式一: 正常识别方式
NORMAL = 1
# 方式二: 追踪识别方式
TRACE = 2

View File

@ -0,0 +1,18 @@
from enum import Enum, unique
# 录屏状态枚举
@unique
class RecordingStatus(Enum):
RECORDING_WAITING = ("5", "待录制")
RECORDING_RETRYING = ("10", "重试中")
RECORDING_RUNNING = ("15", "录制中")
RECORDING_SUCCESS = ("20", "录制完成")
RECORDING_TIMEOUT = ("25", "录制超时")
RECORDING_FAILED = ("30", "录制失败")

33
enums/StatusEnum.py Normal file
View File

@ -0,0 +1,33 @@
from enum import Enum, unique
@unique
class PushStreamStatus(Enum):
WAITING = (5, "待推流")
RETRYING = (10, "重试中")
RUNNING = (15, "推流中")
STOPPING = (20, "停止中")
SUCCESS = (25, "完成")
TIMEOUT = (30, "超时")
FAILED = (35, "失败")
@unique
class ExecuteStatus(Enum):
WAITING = (5, "待执行")
RUNNING = (10, "执行中")
STOPPING = (15, "停止中")
SUCCESS = (20, "执行完成")
TIMEOUT = (25, "超时")
FAILED = (30, "失败")

0
enums/__init__.py Normal file
View File

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
from loguru import logger
"""
自定义异常
"""
class ServiceException(Exception): # 继承异常类
def __init__(self, code, msg, desc=None):
self.code = code
if desc is None:
self.msg = msg
else:
self.msg = msg % desc
def __str__(self):
logger.error("异常编码:{}, 异常描述:{}", self.code, self.msg)

0
exception/__init__.py Normal file
View File

Binary file not shown.

Binary file not shown.

Binary file not shown.

0
font/__init__.py Normal file
View File

Some files were not shown because too many files have changed in this diff Show More