1483 lines
84 KiB
Python
1483 lines
84 KiB
Python
# -*- coding: utf-8 -*-
|
||
import base64
|
||
import os
|
||
from concurrent.futures import ThreadPoolExecutor
|
||
from os.path import join, exists, getsize
|
||
from time import time, sleep
|
||
from traceback import format_exc
|
||
import requests
|
||
import cv2
|
||
|
||
from multiprocessing import Process, Queue
|
||
|
||
import numpy as np
|
||
from loguru import logger
|
||
|
||
from common.Constant import init_progess, success_progess
|
||
from concurrency.FileUploadThread import ImageTypeImageFileUpload
|
||
from concurrency.HeartbeatThread import Heartbeat
|
||
|
||
from concurrency.PullVideoStreamProcess import OnlinePullVideoStreamProcess, OfflinePullVideoStreamProcess
|
||
from concurrency.PushVideoStreamProcess import OnPushStreamProcess, OffPushStreamProcess
|
||
|
||
from util.GPUtils import check_gpu_resource
|
||
from util.LogUtils import init_log
|
||
from concurrency.CommonThread import Common
|
||
from concurrency.PullStreamThread import RecordingPullStreamThread
|
||
from concurrency.RecordingHeartbeatThread import RecordingHeartbeat
|
||
from enums.AnalysisStatusEnum import AnalysisStatus
|
||
from enums.AnalysisTypeEnum import AnalysisType
|
||
from enums.ExceptionEnum import ExceptionType
|
||
from enums.ModelTypeEnum import ModelType
|
||
from enums.RecordingStatusEnum import RecordingStatus
|
||
from util.AliyunSdk import ThAliyunVodSdk
|
||
from util.MinioSdk import MinioSdk
|
||
from util.CpuUtils import check_cpu
|
||
from util.Cv2Utils import write_or_video, push_video_stream, close_all_p
|
||
from entity.FeedBack import message_feedback, recording_feedback
|
||
from exception.CustomerException import ServiceException
|
||
from util.ImageUtils import url2Array, add_water_pic
|
||
from util.ModelUtils import MODEL_CONFIG
|
||
from util.OcrBaiduSdk import OcrBaiduSdk
|
||
|
||
from enums.BaiduSdkEnum import VehicleEnumVALUE
|
||
from enums.ModelTypeEnum import BaiduModelTarget
|
||
from util.PlotsUtils import xywh2xyxy2
|
||
from util.QueUtil import put_queue, get_no_block_queue, clear_queue
|
||
from util.TimeUtils import now_date_to_str, YMDHMSF
|
||
from util.CpuUtils import print_cpu_status
|
||
import inspect
|
||
class IntelligentRecognitionProcess(Process):
|
||
__slots__ = ('_fb_queue', '_msg', '_analyse_type', '_context', 'event_queue', '_pull_queue', '_hb_queue',
|
||
"_image_queue", "_push_queue", '_push_ex_queue')
|
||
|
||
def __init__(self, *args):
|
||
super().__init__()
|
||
# 入参
|
||
self._fb_queue, self._msg, self._analyse_type, self._context = args
|
||
# 初始化参数
|
||
self.event_queue, self._pull_queue, self._hb_queue, self._image_queue, self._push_queue, self._push_ex_queue = \
|
||
Queue(), Queue(10), Queue(), Queue(), Queue(), Queue()
|
||
|
||
# 发送waitting消息
|
||
put_queue(self._fb_queue, message_feedback(self._msg["request_id"], AnalysisStatus.WAITING.value,
|
||
self._analyse_type, progress=init_progess), timeout=2, is_ex=True)
|
||
self._storage_source = self._context['service']['storage_source']
|
||
self._algStatus = False
|
||
def sendEvent(self, eBody):
|
||
put_queue(self.event_queue, eBody, timeout=2, is_ex=True)
|
||
|
||
def clear_queue(self):
|
||
clear_queue(self.event_queue)
|
||
clear_queue(self._pull_queue)
|
||
clear_queue(self._hb_queue)
|
||
clear_queue(self._image_queue)
|
||
clear_queue(self._push_queue)
|
||
clear_queue(self._push_ex_queue)
|
||
|
||
@staticmethod
|
||
def build_video_path(context, msg, is_build_or=True):
|
||
random_time = now_date_to_str(YMDHMSF)
|
||
pre_path = '%s/%s%s' % (context["base_dir"], context["video"]["file_path"], random_time)
|
||
end_path = '%s%s' % (msg["request_id"], ".mp4")
|
||
if is_build_or:
|
||
context["orFilePath"] = '%s%s%s' % (pre_path, "_on_or_", end_path)
|
||
context["aiFilePath"] = '%s%s%s' % (pre_path, "_on_ai_", end_path)
|
||
|
||
@staticmethod
|
||
def start_heartbeat(fb_queue, hb_queue, request_id, analyse_type, context):
|
||
hb_thread = Heartbeat(fb_queue, hb_queue, request_id, analyse_type, context)
|
||
hb_thread.setDaemon(True)
|
||
hb_thread.start()
|
||
return hb_thread
|
||
|
||
|
||
|
||
|
||
|
||
class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
|
||
__slots__ = ()
|
||
|
||
@staticmethod
|
||
def start_push_stream(msg, push_queue, image_queue, push_ex_queue, hb_queue, context):
|
||
pushProcess = OnPushStreamProcess(msg, push_queue, image_queue, push_ex_queue, hb_queue, context)
|
||
pushProcess.daemon = True
|
||
pushProcess.start()
|
||
return pushProcess
|
||
|
||
@staticmethod
|
||
def start_pull_stream(msg, context, fb_queue, pull_queue, image_queue, analyse_type, frame_num):
|
||
pullProcess = OnlinePullVideoStreamProcess(msg, context, fb_queue, pull_queue, image_queue, analyse_type,
|
||
frame_num)
|
||
pullProcess.daemon = True
|
||
pullProcess.start()
|
||
return pullProcess
|
||
|
||
|
||
def upload_video(self,base_dir, env, request_id, orFilePath, aiFilePath):
|
||
if self._storage_source==1:
|
||
minioSdk = MinioSdk(base_dir, env, request_id )
|
||
upload_video_thread_or = Common(minioSdk.put_object, orFilePath, "or_online_%s.mp4" % request_id)
|
||
upload_video_thread_ai = Common(minioSdk.put_object, aiFilePath, "ai_online_%s.mp4" % request_id)
|
||
else:
|
||
aliyunVodSdk = ThAliyunVodSdk(base_dir, env, request_id)
|
||
upload_video_thread_or = Common(aliyunVodSdk.get_play_url, orFilePath, "or_online_%s" % request_id)
|
||
upload_video_thread_ai = Common(aliyunVodSdk.get_play_url, aiFilePath, "ai_online_%s" % request_id)
|
||
|
||
|
||
|
||
upload_video_thread_or.setDaemon(True)
|
||
upload_video_thread_ai.setDaemon(True)
|
||
upload_video_thread_or.start()
|
||
upload_video_thread_ai.start()
|
||
or_url = upload_video_thread_or.get_result()
|
||
ai_url = upload_video_thread_ai.get_result()
|
||
return or_url, ai_url
|
||
'''
|
||
@staticmethod
|
||
def upload_video(base_dir, env, request_id, orFilePath, aiFilePath):
|
||
aliyunVodSdk = ThAliyunVodSdk(base_dir, env, request_id)
|
||
upload_video_thread_or = Common(aliyunVodSdk.get_play_url, orFilePath, "or_online_%s" % request_id)
|
||
upload_video_thread_ai = Common(aliyunVodSdk.get_play_url, aiFilePath, "ai_online_%s" % request_id)
|
||
upload_video_thread_or.setDaemon(True)
|
||
upload_video_thread_ai.setDaemon(True)
|
||
upload_video_thread_or.start()
|
||
upload_video_thread_ai.start()
|
||
or_url = upload_video_thread_or.get_result()
|
||
ai_url = upload_video_thread_ai.get_result()
|
||
return or_url, ai_url
|
||
'''
|
||
|
||
@staticmethod
|
||
def ai_normal_dtection(model, frame, request_id):
|
||
model_conf, code = model
|
||
retResults = MODEL_CONFIG[code][3]([model_conf, frame, request_id])[0]
|
||
if type(retResults) is np.ndarray or len(retResults) == 0:
|
||
ret = retResults
|
||
if type(retResults) is np.ndarray:
|
||
ret = retResults.tolist()
|
||
else:
|
||
ret = retResults[2]
|
||
return code, ret
|
||
|
||
@staticmethod
|
||
def obj_det(self, model_array, frame, task_status, cframe, tt, request_id):
|
||
push_obj = []
|
||
if task_status[1] == 1:
|
||
dtection_result = []
|
||
for model in model_array:
|
||
result = tt.submit(self.ai_normal_dtection, model, frame, request_id)
|
||
dtection_result.append(result)
|
||
for d in dtection_result:
|
||
code, det_r = d.result()
|
||
if len(det_r) > 0:
|
||
push_obj.append((code, det_r))
|
||
if len(push_obj) == 0:
|
||
task_status[1] = 0
|
||
if task_status[1] == 0:
|
||
if cframe % 30 == 0:
|
||
dtection_result1 = []
|
||
for model in model_array:
|
||
result = tt.submit(self.ai_normal_dtection, model, frame, request_id)
|
||
dtection_result1.append(result)
|
||
for d in dtection_result1:
|
||
code, det_r = d.result()
|
||
if len(det_r) > 0:
|
||
push_obj.append((code, det_r))
|
||
if len(push_obj) > 0:
|
||
task_status[1] = 1
|
||
return push_obj
|
||
|
||
@staticmethod
|
||
def checkPT(start_time, service_timeout, pull_process, push_process, hb_thread, push_ex_queue, pull_queue,
|
||
request_id):
|
||
if time() - start_time > service_timeout:
|
||
logger.error("任务执行超时, requestId: {}", request_id)
|
||
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
|
||
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
|
||
if pull_process is not None and not pull_process.is_alive():
|
||
while True:
|
||
if pull_queue.empty() or pull_queue.qsize() == 0:
|
||
break
|
||
pull_result = get_no_block_queue(pull_queue)
|
||
if pull_result is not None and pull_result[0] == 1:
|
||
raise ServiceException(pull_result[1], pull_result[2])
|
||
logger.info("拉流进程异常停止, requestId: {}", request_id)
|
||
raise Exception("拉流进程异常停止!")
|
||
if hb_thread is not None and not hb_thread.is_alive():
|
||
logger.info("心跳线程异常停止, requestId: {}", request_id)
|
||
raise Exception("心跳线程异常停止!")
|
||
if push_process is not None and not push_process.is_alive():
|
||
while True:
|
||
if push_ex_queue.empty() or push_ex_queue.qsize() == 0:
|
||
break
|
||
push_result = get_no_block_queue(push_ex_queue)
|
||
if push_result is not None and push_result[0] == 1:
|
||
raise ServiceException(push_result[1], push_result[2])
|
||
logger.info("推流进程异常停止, requestId: {}", request_id)
|
||
raise Exception("推流进程异常停止!")
|
||
|
||
def run(self):
|
||
msg, context, analyse_type = self._msg, self._context, self._analyse_type
|
||
self.build_video_path(context, msg)
|
||
request_id = msg["request_id"]
|
||
base_dir, env = context["base_dir"], context["env"]
|
||
service_timeout = int(context["service"]["timeout"])
|
||
ex = None
|
||
# 拉流进程、推流进程、心跳线程
|
||
pull_process, push_process, hb_thread = None, None, None
|
||
|
||
# 事件队列、拉流队列、心跳队列、反馈队列
|
||
event_queue, pull_queue, hb_queue, fb_queue = self.event_queue, self._pull_queue, self._hb_queue, self._fb_queue
|
||
|
||
# 推流队列、推流异常队列、图片队列
|
||
push_queue, push_ex_queue, image_queue = self._push_queue, self._push_ex_queue, self._image_queue
|
||
try:
|
||
# 初始化日志
|
||
init_log(base_dir, env)
|
||
# 打印启动日志
|
||
logger.info("开始启动实时分析进程!requestId: {}", request_id)
|
||
|
||
# 启动拉流进程(包含拉流线程, 图片上传线程,mqtt读取线程)
|
||
# 拉流进程初始化时间长, 先启动
|
||
pull_process = self.start_pull_stream(msg, context, fb_queue, pull_queue, image_queue, analyse_type, 25)
|
||
#print_cpu_status(requestId=request_id,lineNum=inspect.currentframe().f_lineno) #7.0,
|
||
# 启动心跳线程
|
||
hb_thread = self.start_heartbeat(fb_queue, hb_queue, request_id, analyse_type, context)
|
||
|
||
|
||
#print_cpu_status(requestId=request_id,lineNum=inspect.currentframe().f_lineno) #7.0,
|
||
# 加载算法模型
|
||
model_array = get_model(msg, context, analyse_type)
|
||
#print_cpu_status(requestId=request_id,lineNum=inspect.currentframe().f_lineno) #9.5
|
||
# 启动推流进程
|
||
push_process = self.start_push_stream(msg, push_queue, image_queue, push_ex_queue, hb_queue, context)
|
||
#print_cpu_status(requestId=request_id,lineNum=inspect.currentframe().f_lineno)
|
||
# 第一个参数: 模型是否初始化 0:未初始化 1:初始化
|
||
# 第二个参数: 检测是否有问题 0: 没有问题, 1: 有问题
|
||
task_status = [0, 0]
|
||
draw_config = {}
|
||
start_time = time()
|
||
# 识别2个线程性能最优
|
||
with ThreadPoolExecutor(max_workers=2) as t:
|
||
# 可能使用模型组合, 模型组合最多3个模型, 1对3, 上面的2个线程对应6个线程
|
||
with ThreadPoolExecutor(max_workers=6) as tt:
|
||
while True:
|
||
# 检查拉流进程是否正常, 心跳线程是否正常
|
||
self.checkPT(start_time, service_timeout, pull_process, push_process, hb_thread, push_ex_queue,
|
||
pull_queue, request_id)
|
||
# 检查推流是否异常
|
||
push_status = get_no_block_queue(push_ex_queue)
|
||
#print_cpu_status(requestId=request_id,lineNum=inspect.currentframe().f_lineno) #9.5,11.2
|
||
if push_status is not None and push_status[0] == 1:
|
||
raise ServiceException(push_status[1], push_status[2])
|
||
# 获取停止指令
|
||
event_result = get_no_block_queue(event_queue)
|
||
|
||
if event_result:
|
||
cmdStr = event_result.get("command")
|
||
#接收到算法开启、或者关闭的命令
|
||
if cmdStr in ['algStart' , 'algStop' ]:
|
||
logger.info("发送向推流进程发送算法命令, requestId: {}, {}", request_id,cmdStr )
|
||
put_queue(push_queue, (2, cmdStr), timeout=1, is_ex=True)
|
||
pull_process.sendCommand({"command": cmdStr})
|
||
|
||
# 接收到停止指令
|
||
if "stop" == cmdStr:
|
||
logger.info("实时任务开始停止, requestId: {}", request_id)
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
#print_cpu_status(requestId=request_id,lineNum=inspect.currentframe().f_lineno)
|
||
pull_result = get_no_block_queue(pull_queue)
|
||
#print_cpu_status(requestId=request_id,lineNum=inspect.currentframe().f_lineno)
|
||
if pull_result is None:
|
||
sleep(1)
|
||
continue
|
||
# (4, (frame_list, frame_index_list, all_frames))
|
||
if pull_result[0] == 4:
|
||
frame_list, frame_index_list, all_frames = pull_result[1]
|
||
if len(frame_list) > 0:
|
||
# 判断是否已经初始化
|
||
if task_status[0] == 0:
|
||
task_status[0] = 1
|
||
for i, model in enumerate(model_array):
|
||
model_conf, code = model
|
||
model_param = model_conf[1]
|
||
# (modeType, model_param, allowedList, names, rainbows)
|
||
MODEL_CONFIG[code][2](frame_list[0].shape[1], frame_list[0].shape[0],
|
||
model_conf)
|
||
if draw_config.get("font_config") is None:
|
||
draw_config["font_config"] = model_param['font_config']
|
||
if draw_config.get(code) is None:
|
||
draw_config[code] = {}
|
||
draw_config[code]["allowedList"] = model_conf[2]
|
||
draw_config[code]["rainbows"] = model_conf[4]
|
||
draw_config[code]["label_arrays"] = model_param['label_arraylist']
|
||
if "label_dict" in model_param:
|
||
draw_config[code]["label_dict"] = model_param['label_dict']
|
||
#print_cpu_status(requestId=request_id,lineNum=inspect.currentframe().f_lineno)
|
||
# 多线程并发处理, 经过测试两个线程最优
|
||
det_array = []
|
||
for i, frame in enumerate(frame_list):
|
||
det_result = t.submit(self.obj_det, self, model_array, frame, task_status,
|
||
frame_index_list[i], tt, request_id)
|
||
det_array.append(det_result)
|
||
push_objs = [det.result() for det in det_array]
|
||
#print_cpu_status(requestId=request_id,lineNum=inspect.currentframe().f_lineno)
|
||
put_queue(push_queue,
|
||
(1, (frame_list, frame_index_list, all_frames, draw_config, push_objs)),
|
||
timeout=2, is_ex=True)
|
||
#print_cpu_status(requestId=request_id,lineNum=inspect.currentframe().f_lineno)
|
||
del det_array, push_objs
|
||
del frame_list, frame_index_list, all_frames
|
||
elif pull_result[0] == 1:
|
||
# 拉流发生异常
|
||
put_queue(push_queue, (2, 'stop_ex'), timeout=1, is_ex=True)
|
||
push_process.join(120)
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
pull_process.join(120)
|
||
raise ServiceException(pull_result[1], pull_result[2])
|
||
elif pull_result[0] == 2:
|
||
put_queue(push_queue, (2, 'stop'), timeout=1, is_ex=True)
|
||
push_process.join(120)
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
pull_process.join(120)
|
||
break
|
||
else:
|
||
raise Exception("未知拉流状态异常!")
|
||
except ServiceException as s:
|
||
logger.exception("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, request_id)
|
||
ex = s.code, s.msg
|
||
except Exception:
|
||
logger.error("服务异常: {}, requestId: {},", format_exc(), request_id)
|
||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
|
||
finally:
|
||
orFilePath, aiFilePath = context["orFilePath"], context["aiFilePath"]
|
||
base_dir, env = context["base_dir"], context["env"]
|
||
or_url, ai_url, exc = "", "", None
|
||
try:
|
||
# 如果拉流进程存在, 关闭拉流进程(拉流线程、图片上传线程)
|
||
if push_process and push_process.is_alive():
|
||
put_queue(push_queue, (2, 'stop_ex'), timeout=1)
|
||
logger.info("关闭推流进程, requestId:{}", request_id)
|
||
push_process.join(timeout=120)
|
||
logger.info("关闭推流进程1, requestId:{}", request_id)
|
||
if pull_process and pull_process.is_alive():
|
||
pull_process.sendCommand({"command": 'stop_ex'})
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
logger.info("关闭拉流进程, requestId:{}", request_id)
|
||
pull_process.join(timeout=120)
|
||
logger.info("关闭拉流进程1, requestId:{}", request_id)
|
||
if exists(orFilePath) and exists(aiFilePath) and getsize(orFilePath) > 100:
|
||
or_url, ai_url = self.upload_video(base_dir, env, request_id, orFilePath, aiFilePath)
|
||
if or_url is None or ai_url is None:
|
||
logger.error("原视频或AI视频播放上传VOD失败!, requestId: {}", request_id)
|
||
raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0],
|
||
ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1])
|
||
# 停止心跳线程
|
||
if hb_thread and hb_thread.is_alive():
|
||
put_queue(hb_queue, {"command": "stop"}, timeout=1)
|
||
hb_thread.join(timeout=120)
|
||
if exists(orFilePath):
|
||
logger.info("开始删除原视频, orFilePath: {}, requestId: {}", orFilePath, request_id)
|
||
os.remove(orFilePath)
|
||
logger.info("删除原视频成功, orFilePath: {}, requestId: {}", orFilePath, request_id)
|
||
if exists(aiFilePath):
|
||
logger.info("开始删除AI视频, aiFilePath: {}, requestId: {}", aiFilePath, request_id)
|
||
os.remove(aiFilePath)
|
||
logger.info("删除AI视频成功, aiFilePath: {}, requestId: {}", aiFilePath, request_id)
|
||
# 如果有异常, 检查是否有原视频和AI视频,有则上传,响应失败
|
||
if ex:
|
||
code, msg = ex
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
|
||
analyse_type,
|
||
error_code=code,
|
||
error_msg=msg,
|
||
video_url=or_url,
|
||
ai_video_url=ai_url), timeout=2, is_ex=False)
|
||
else:
|
||
if or_url is None or len(or_url) == 0 or ai_url is None or len(ai_url) == 0:
|
||
raise ServiceException(ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[0],
|
||
ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[1])
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.SUCCESS.value,
|
||
analyse_type,
|
||
progress=success_progess,
|
||
video_url=or_url,
|
||
ai_video_url=ai_url), timeout=2, is_ex=False)
|
||
|
||
except ServiceException as s:
|
||
logger.exception("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, request_id)
|
||
exc = s.code, s.msg
|
||
except Exception:
|
||
logger.error("服务异常: {}, requestId: {},", format_exc(), request_id)
|
||
exc = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
|
||
finally:
|
||
if push_process and push_process.is_alive():
|
||
put_queue(push_queue, (2, 'stop_ex'), timeout=1)
|
||
logger.info("关闭推流进程, requestId:{}", request_id)
|
||
push_process.join(timeout=120)
|
||
logger.info("关闭推流进程1, requestId:{}", request_id)
|
||
if pull_process and pull_process.is_alive():
|
||
pull_process.sendCommand({"command": 'stop_ex'})
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
logger.info("关闭拉流进程, requestId:{}", request_id)
|
||
pull_process.join(timeout=120)
|
||
logger.info("关闭拉流进程1, requestId:{}", request_id)
|
||
if exc:
|
||
code, msg = exc
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
|
||
analyse_type,
|
||
error_code=code,
|
||
error_msg=msg,
|
||
video_url=or_url,
|
||
ai_video_url=ai_url), timeout=2, is_ex=False)
|
||
logger.info("清理队列, requestId:{}", request_id)
|
||
self.clear_queue()
|
||
logger.info("清理队列完成, requestId:{}", request_id)
|
||
|
||
|
||
class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
|
||
__slots__ = ()
|
||
|
||
def upload_video(self,base_dir, env, request_id, aiFilePath):
|
||
aliyunVodSdk = ThAliyunVodSdk(base_dir, env, request_id)
|
||
upload_video_thread_ai = Common(aliyunVodSdk.get_play_url, aiFilePath, "ai_online_%s" % request_id)
|
||
|
||
if self._storage_source==1:
|
||
minioSdk = MinioSdk(base_dir, env, request_id )
|
||
upload_video_thread_ai = Common(minioSdk.put_object, aiFilePath, "ai_online_%s.mp4" % request_id)
|
||
else:
|
||
aliyunVodSdk = ThAliyunVodSdk(base_dir, env, request_id)
|
||
upload_video_thread_ai = Common(aliyunVodSdk.get_play_url, aiFilePath, "ai_online_%s" % request_id)
|
||
|
||
upload_video_thread_ai.setDaemon(True)
|
||
upload_video_thread_ai.start()
|
||
ai_url = upload_video_thread_ai.get_result()
|
||
return ai_url
|
||
|
||
'''
|
||
@staticmethod
|
||
def upload_video(base_dir, env, request_id, aiFilePath):
|
||
aliyunVodSdk = ThAliyunVodSdk(base_dir, env, request_id)
|
||
upload_video_thread_ai = Common(aliyunVodSdk.get_play_url, aiFilePath, "ai_online_%s" % request_id)
|
||
upload_video_thread_ai.setDaemon(True)
|
||
upload_video_thread_ai.start()
|
||
ai_url = upload_video_thread_ai.get_result()
|
||
return ai_url
|
||
'''
|
||
@staticmethod
|
||
def ai_normal_dtection(model, frame, request_id):
|
||
model_conf, code = model
|
||
retResults = MODEL_CONFIG[code][3]([model_conf, frame, request_id])[0]
|
||
if type(retResults) is np.ndarray or len(retResults) == 0:
|
||
ret = retResults
|
||
if type(retResults) is np.ndarray:
|
||
ret = retResults.tolist()
|
||
else:
|
||
ret = retResults[2]
|
||
return code, ret
|
||
|
||
@staticmethod
|
||
def obj_det(self, model_array, frame, task_status, cframe, tt, request_id):
|
||
push_obj = []
|
||
if task_status[1] == 1:
|
||
dtection_result = []
|
||
for model in model_array:
|
||
result = tt.submit(self.ai_normal_dtection, model, frame, request_id)
|
||
dtection_result.append(result)
|
||
for d in dtection_result:
|
||
code, det_r = d.result()
|
||
if len(det_r) > 0:
|
||
push_obj.append((code, det_r))
|
||
if len(push_obj) == 0:
|
||
task_status[1] = 0
|
||
if task_status[1] == 0:
|
||
if cframe % 30 == 0:
|
||
dtection_result1 = []
|
||
for model in model_array:
|
||
result = tt.submit(self.ai_normal_dtection, model, frame, request_id)
|
||
dtection_result1.append(result)
|
||
for d in dtection_result1:
|
||
code, det_r = d.result()
|
||
if len(det_r) > 0:
|
||
push_obj.append((code, det_r))
|
||
if len(push_obj) > 0:
|
||
task_status[1] = 1
|
||
return push_obj
|
||
|
||
@staticmethod
|
||
def start_push_stream(msg, push_queue, image_queue, push_ex_queue, hb_queue, context):
|
||
pushProcess = OffPushStreamProcess(msg, push_queue, image_queue, push_ex_queue, hb_queue, context)
|
||
pushProcess.daemon = True
|
||
pushProcess.start()
|
||
return pushProcess
|
||
|
||
@staticmethod
|
||
def start_pull_stream(msg, context, fb_queue, pull_queue, image_queue, analyse_type, frame_num):
|
||
pullProcess = OfflinePullVideoStreamProcess(msg, context, fb_queue, pull_queue, image_queue, analyse_type,
|
||
frame_num)
|
||
pullProcess.daemon = True
|
||
pullProcess.start()
|
||
return pullProcess
|
||
|
||
@staticmethod
|
||
def checkPT(service_timeout, start_time, pull_process, push_process, hb_thread, push_ex_queue, pull_queue,
|
||
request_id):
|
||
if time() - start_time > service_timeout:
|
||
logger.error("任务执行超时, requestId: {}", request_id)
|
||
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
|
||
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
|
||
if pull_process is not None and not pull_process.is_alive():
|
||
while True:
|
||
if pull_queue.empty() or pull_queue.qsize() == 0:
|
||
break
|
||
pull_result = get_no_block_queue(pull_queue)
|
||
if pull_result is not None and pull_result[0] == 1:
|
||
raise ServiceException(pull_result[1], pull_result[2])
|
||
logger.info("拉流进程异常停止, requestId: {}", request_id)
|
||
raise Exception("拉流进程异常停止!")
|
||
if hb_thread is not None and not hb_thread.is_alive():
|
||
logger.info("心跳线程异常停止, requestId: {}", request_id)
|
||
raise Exception("心跳线程异常停止!")
|
||
if push_process is not None and not push_process.is_alive():
|
||
while True:
|
||
if push_ex_queue.empty() or push_ex_queue.qsize() == 0:
|
||
break
|
||
push_result = get_no_block_queue(push_ex_queue)
|
||
if push_result is not None and push_result[0] == 1:
|
||
raise ServiceException(push_result[1], push_result[2])
|
||
logger.info("推流进程异常停止, requestId: {}", request_id)
|
||
raise Exception("推流进程异常停止!")
|
||
|
||
def run(self):
|
||
msg, context, analyse_type, ex = self._msg, self._context, self._analyse_type, None
|
||
self.build_video_path(context, msg, is_build_or=False)
|
||
request_id, base_dir, env = msg["request_id"], context["base_dir"], context["env"]
|
||
# 拉流进程、推流进程、心跳线程
|
||
pull_process, push_process, hb_thread = None, None, None
|
||
service_timeout = int(context["service"]["timeout"])
|
||
# 事件队列、拉流队列、心跳队列、反馈队列
|
||
event_queue, pull_queue, hb_queue, fb_queue = self.event_queue, self._pull_queue, self._hb_queue, self._fb_queue
|
||
# 推流队列、推流异常队列、图片队列
|
||
push_queue, push_ex_queue, image_queue = self._push_queue, self._push_ex_queue, self._image_queue
|
||
try:
|
||
# 初始化日志
|
||
init_log(base_dir, env)
|
||
# 打印启动日志
|
||
logger.info("开始启动离线分析进程!requestId: {}", request_id)
|
||
# 启动拉流进程(包含拉流线程, 图片上传线程)
|
||
# 拉流进程初始化时间长, 先启动
|
||
pull_process = self.start_pull_stream(msg, context, fb_queue, pull_queue, image_queue, analyse_type, 25)
|
||
# 启动心跳线程
|
||
hb_thread = self.start_heartbeat(fb_queue, hb_queue, request_id, analyse_type, context)
|
||
# 加载算法模型
|
||
model_array = get_model(msg, context, analyse_type)
|
||
# 启动推流进程
|
||
push_process = self.start_push_stream(msg, push_queue, image_queue, push_ex_queue, hb_queue, context)
|
||
# 第一个参数: 模型是否初始化 0:未初始化 1:初始化
|
||
# 第二个参数: 检测是否有问题 0: 没有问题, 1: 有问题
|
||
task_status = [0, 0]
|
||
draw_config = {}
|
||
start_time = time()
|
||
# 识别2个线程性能最优
|
||
with ThreadPoolExecutor(max_workers=2) as t:
|
||
# 可能使用模型组合, 模型组合最多3个模型, 1对3, 上面的2个线程对应6个线程
|
||
with ThreadPoolExecutor(max_workers=6) as tt:
|
||
while True:
|
||
# 检查拉流进程是否正常, 心跳线程是否正常
|
||
self.checkPT(service_timeout, start_time, pull_process, push_process, hb_thread, push_ex_queue,
|
||
pull_queue, request_id)
|
||
# 检查推流是否异常
|
||
push_status = get_no_block_queue(push_ex_queue)
|
||
if push_status is not None and push_status[0] == 1:
|
||
raise ServiceException(push_status[1], push_status[2])
|
||
# 获取停止指令
|
||
event_result = get_no_block_queue(event_queue)
|
||
if event_result:
|
||
cmdStr = event_result.get("command")
|
||
# 接收到停止指令
|
||
if "stop" == cmdStr:
|
||
logger.info("离线任务开始停止, requestId: {}", request_id)
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
if cmdStr in ['algStart' , 'algStop' ]:
|
||
logger.info("发送向推流进程发送算法命令, requestId: {}, {}", request_id,cmdStr )
|
||
put_queue(push_queue, (2, cmdStr), timeout=1, is_ex=True)
|
||
pull_process.sendCommand({"command": cmdStr})
|
||
|
||
pull_result = get_no_block_queue(pull_queue)
|
||
if pull_result is None:
|
||
sleep(1)
|
||
continue
|
||
# (4, (frame_list, frame_index_list, all_frames))
|
||
if pull_result[0] == 4:
|
||
frame_list, frame_index_list, all_frames = pull_result[1]
|
||
if len(frame_list) > 0:
|
||
# 判断是否已经初始化
|
||
if task_status[0] == 0:
|
||
task_status[0] = 1
|
||
for i, model in enumerate(model_array):
|
||
model_conf, code = model
|
||
model_param = model_conf[1]
|
||
# (modeType, model_param, allowedList, names, rainbows)
|
||
MODEL_CONFIG[code][2](frame_list[0].shape[1], frame_list[0].shape[0],
|
||
model_conf)
|
||
if draw_config.get("font_config") is None:
|
||
draw_config["font_config"] = model_param['font_config']
|
||
if draw_config.get(code) is None:
|
||
draw_config[code] = {}
|
||
draw_config[code]["allowedList"] = model_conf[2]
|
||
draw_config[code]["rainbows"] = model_conf[4]
|
||
draw_config[code]["label_arrays"] = model_param['label_arraylist']
|
||
if "label_dict" in model_param:
|
||
draw_config[code]["label_dict"] = model_param['label_dict']
|
||
det_array = []
|
||
for i, frame in enumerate(frame_list):
|
||
det_result = t.submit(self.obj_det, self, model_array, frame, task_status,
|
||
frame_index_list[i], tt, request_id)
|
||
det_array.append(det_result)
|
||
push_objs = [det.result() for det in det_array]
|
||
put_queue(push_queue,
|
||
(1, (frame_list, frame_index_list, all_frames, draw_config, push_objs)),
|
||
timeout=2, is_ex=True)
|
||
del det_array, push_objs
|
||
del frame_list, frame_index_list, all_frames
|
||
elif pull_result[0] == 1:
|
||
put_queue(push_queue, (2, 'stop_ex'), timeout=1, is_ex=True)
|
||
logger.info("关闭推流进程, requestId:{}", request_id)
|
||
push_process.join(timeout=120)
|
||
logger.info("关闭推流进程1, requestId:{}", request_id)
|
||
raise ServiceException(pull_result[1], pull_result[2])
|
||
elif pull_result[0] == 2:
|
||
logger.info("离线任务开始停止, requestId: {}", request_id)
|
||
put_queue(push_queue, (2, 'stop'), timeout=1, is_ex=True)
|
||
push_process.join(120)
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
pull_process.join(120)
|
||
break
|
||
else:
|
||
raise Exception("未知拉流状态异常!")
|
||
except ServiceException as s:
|
||
logger.exception("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, request_id)
|
||
ex = s.code, s.msg
|
||
except Exception:
|
||
logger.error("服务异常: {}, requestId: {},", format_exc(), request_id)
|
||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
|
||
finally:
|
||
base_dir, env, aiFilePath = context["base_dir"], context["env"], context["aiFilePath"]
|
||
ai_url, exc = "", None
|
||
try:
|
||
if push_process and push_process.is_alive():
|
||
put_queue(push_queue, (2, 'stop_ex'), timeout=1)
|
||
push_process.join(timeout=120)
|
||
if pull_process and pull_process.is_alive():
|
||
pull_process.sendCommand({"command": 'stop_ex'})
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
pull_process.join(timeout=120)
|
||
if exists(aiFilePath) and getsize(aiFilePath) > 100:
|
||
ai_url = self.upload_video(base_dir, env, request_id, aiFilePath)
|
||
if ai_url is None:
|
||
logger.error("原视频或AI视频播放上传VOD失败!, requestId: {}", request_id)
|
||
raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0],
|
||
ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1])
|
||
# 停止心跳线程
|
||
if hb_thread and hb_thread.is_alive():
|
||
put_queue(hb_queue, {"command": "stop"}, timeout=1)
|
||
hb_thread.join(timeout=120)
|
||
if exists(aiFilePath):
|
||
logger.info("开始删除AI视频, aiFilePath: {}, requestId: {}", aiFilePath, request_id)
|
||
os.remove(aiFilePath)
|
||
logger.info("删除AI视频成功, aiFilePath: {}, requestId: {}", aiFilePath, request_id)
|
||
# 如果有异常, 检查是否有原视频和AI视频,有则上传,响应失败
|
||
if ex:
|
||
code, msg = ex
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
|
||
analyse_type,
|
||
error_code=code,
|
||
error_msg=msg,
|
||
ai_video_url=ai_url), timeout=2, is_ex=False)
|
||
else:
|
||
if ai_url is None or len(ai_url) == 0:
|
||
raise ServiceException(ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[0],
|
||
ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[1])
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.SUCCESS.value,
|
||
analyse_type,
|
||
progress=success_progess,
|
||
ai_video_url=ai_url), timeout=2, is_ex=False)
|
||
|
||
except ServiceException as s:
|
||
logger.exception("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, request_id)
|
||
exc = s.code, s.msg
|
||
except Exception:
|
||
logger.error("服务异常: {}, requestId: {},", format_exc(), request_id)
|
||
exc = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
|
||
finally:
|
||
if push_process and push_process.is_alive():
|
||
put_queue(push_queue, (2, 'stop_ex'), timeout=1)
|
||
push_process.join(timeout=120)
|
||
if pull_process and pull_process.is_alive():
|
||
pull_process.sendCommand({"command": 'stop_ex'})
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
pull_process.join(timeout=120)
|
||
if hb_thread and hb_thread.is_alive():
|
||
put_queue(hb_queue, {"command": "stop"}, timeout=1)
|
||
hb_thread.join(timeout=120)
|
||
if exc:
|
||
code, msg = exc
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
|
||
analyse_type,
|
||
error_code=code,
|
||
error_msg=msg,
|
||
ai_video_url=ai_url), timeout=2, is_ex=False)
|
||
self.clear_queue()
|
||
|
||
|
||
'''
|
||
图片识别
|
||
'''
|
||
|
||
|
||
class PhotosIntelligentRecognitionProcess(Process):
|
||
__slots__ = ("_fb_queue", "_msg", "_analyse_type", "_context", "_image_queue")
|
||
|
||
def __init__(self, *args):
|
||
super().__init__()
|
||
self._fb_queue, self._msg, self._analyse_type, self._context = args
|
||
self._image_queue = Queue()
|
||
put_queue(self._fb_queue, message_feedback(self._msg["request_id"], AnalysisStatus.WAITING.value,
|
||
self._analyse_type, progress=init_progess), timeout=2, is_ex=True)
|
||
self.build_logo(self._msg, self._context)
|
||
self._storage_source = self._context['service']['storage_source']
|
||
|
||
@staticmethod
|
||
def build_logo(msg, context):
|
||
logo = None
|
||
if context["video"]["video_add_water"]:
|
||
logo = msg.get("logo_url")
|
||
if logo is not None and len(logo) > 0:
|
||
logo = url2Array(logo, enable_ex=False)
|
||
if logo is None:
|
||
logo = cv2.imread(join(context['base_dir'], "image/logo.png"), -1)
|
||
context['logo'] = logo
|
||
|
||
def epidemic_prevention(self, imageUrl, model, orc, request_id):
|
||
try:
|
||
# modeType, allowedList, new_device, model, par, img_type
|
||
model_conf, code = model
|
||
modeType, allowedList, new_device, model, par, img_type = model_conf
|
||
image = url2Array(imageUrl)
|
||
param = [image, new_device, model, par, img_type, request_id]
|
||
dataBack = MODEL_CONFIG[code][3](param)
|
||
if img_type == 'plate':
|
||
carCode = ''
|
||
if dataBack is None or dataBack.get("plateImage") is None or len(dataBack.get("plateImage")) == 0:
|
||
result = orc.license_plate_recognition(image, request_id)
|
||
score = ''
|
||
if result is None or result.get("words_result") is None or len(result.get("words_result")) == 0:
|
||
logger.error("车牌识别为空: {}", result)
|
||
carCode = ''
|
||
else:
|
||
for word in result.get("words_result"):
|
||
if word is not None and word.get("number") is not None:
|
||
if len(carCode) == 0:
|
||
carCode = word.get("number")
|
||
else:
|
||
carCode = carCode + "," + word.get("number")
|
||
else:
|
||
result = orc.license_plate_recognition(dataBack.get("plateImage")[0], request_id)
|
||
score = dataBack.get("plateImage")[1]
|
||
if result is None or result.get("words_result") is None or len(result.get("words_result")) == 0:
|
||
result = orc.license_plate_recognition(image, request_id)
|
||
if result is None or result.get("words_result") is None or len(result.get("words_result")) == 0:
|
||
logger.error("车牌识别为空: {}", result)
|
||
carCode = ''
|
||
else:
|
||
for word in result.get("words_result"):
|
||
if word is not None and word.get("number") is not None:
|
||
if len(carCode) == 0:
|
||
carCode = word.get("number")
|
||
else:
|
||
carCode = carCode + "," + word.get("number")
|
||
else:
|
||
for word in result.get("words_result"):
|
||
if word is not None and word.get("number") is not None:
|
||
if len(carCode) == 0:
|
||
carCode = word.get("number")
|
||
else:
|
||
carCode = carCode + "," + word.get("number")
|
||
if len(carCode) > 0:
|
||
plate_result = {'type': str(3), 'modelCode': code, 'carUrl': imageUrl,
|
||
'carCode': carCode,
|
||
'score': score}
|
||
put_queue(self._fb_queue, message_feedback(request_id,
|
||
AnalysisStatus.RUNNING.value,
|
||
AnalysisType.IMAGE.value, "", "",
|
||
'',
|
||
imageUrl,
|
||
imageUrl,
|
||
str(code),
|
||
str(3),
|
||
plate_result), timeout=2, is_ex=True)
|
||
if img_type == 'code':
|
||
if dataBack is None or dataBack.get("type") is None:
|
||
return
|
||
# 行程码
|
||
if dataBack.get("type") == 1 and 1 in allowedList:
|
||
# 手机号
|
||
if dataBack.get("phoneNumberImage") is None or len(dataBack.get("phoneNumberImage")) == 0:
|
||
phoneNumberRecognition = ''
|
||
phone_score = ''
|
||
else:
|
||
phone = orc.universal_text_recognition(dataBack.get("phoneNumberImage")[0], request_id)
|
||
phone_score = dataBack.get("phoneNumberImage")[1]
|
||
if phone is None or phone.get("words_result") is None or len(phone.get("words_result")) == 0:
|
||
logger.error("手机号识别为空: {}", phone)
|
||
phoneNumberRecognition = ''
|
||
else:
|
||
phoneNumberRecognition = phone.get("words_result")
|
||
if dataBack.get("cityImage") is None or len(dataBack.get("cityImage")) == 0:
|
||
cityRecognition = ''
|
||
city_score = ''
|
||
else:
|
||
city = orc.universal_text_recognition(dataBack.get("cityImage")[0], request_id)
|
||
city_score = dataBack.get("cityImage")[1]
|
||
if city is None or city.get("words_result") is None or len(city.get("words_result")) == 0:
|
||
logger.error("城市识别为空: {}", city)
|
||
cityRecognition = ''
|
||
else:
|
||
cityRecognition = city.get("words_result")
|
||
if len(phoneNumberRecognition) > 0 or len(cityRecognition) > 0:
|
||
trip_result = {'type': str(1),
|
||
'modelCode': code,
|
||
'imageUrl': imageUrl,
|
||
'phoneNumberRecognition': phoneNumberRecognition,
|
||
'phone_sorce': phone_score,
|
||
'cityRecognition': cityRecognition,
|
||
'city_score': city_score}
|
||
put_queue(self._fb_queue, message_feedback(request_id,
|
||
AnalysisStatus.RUNNING.value,
|
||
AnalysisType.IMAGE.value, "", "",
|
||
'',
|
||
imageUrl,
|
||
imageUrl,
|
||
str(code),
|
||
str(1),
|
||
trip_result), timeout=2, is_ex=True)
|
||
if dataBack.get("type") == 2 and 2 in allowedList:
|
||
if dataBack.get("nameImage") is None or len(dataBack.get("nameImage")) == 0:
|
||
nameRecognition = ''
|
||
name_score = ''
|
||
else:
|
||
name = orc.universal_text_recognition(dataBack.get("nameImage")[0], request_id)
|
||
name_score = dataBack.get("nameImage")[1]
|
||
if name is None or name.get("words_result") is None or len(name.get("words_result")) == 0:
|
||
logger.error("名字识别为空: {}", name)
|
||
nameRecognition = ''
|
||
else:
|
||
nameRecognition = name.get("words_result")
|
||
|
||
if dataBack.get("phoneNumberImage") is None or len(dataBack.get("phoneNumberImage")) == 0:
|
||
phoneNumberRecognition = ''
|
||
phone_score = ''
|
||
else:
|
||
phone = orc.universal_text_recognition(dataBack.get("phoneNumberImage")[0], request_id)
|
||
phone_score = dataBack.get("phoneNumberImage")[1]
|
||
if phone is None or phone.get("words_result") is None or len(phone.get("words_result")) == 0:
|
||
logger.error("手机号识别为空: {}", phone)
|
||
phoneNumberRecognition = ''
|
||
else:
|
||
phoneNumberRecognition = phone.get("words_result")
|
||
if dataBack.get("hsImage") is None or len(dataBack.get("hsImage")) == 0:
|
||
hsRecognition = ''
|
||
hs_score = ''
|
||
else:
|
||
hs = orc.universal_text_recognition(dataBack.get("hsImage")[0], request_id)
|
||
hs_score = dataBack.get("hsImage")[1]
|
||
if hs is None or hs.get("words_result") is None or len(hs.get("words_result")) == 0:
|
||
logger.error("核酸识别为空: {}", hs)
|
||
hsRecognition = ''
|
||
else:
|
||
hsRecognition = hs.get("words_result")
|
||
if len(nameRecognition) > 0 or len(phoneNumberRecognition) > 0 or len(hsRecognition) > 0:
|
||
healthy_result = {'type': str(2),
|
||
'modelCode': code,
|
||
'imageUrl': imageUrl,
|
||
'color': dataBack.get("color"),
|
||
'nameRecognition': nameRecognition,
|
||
'name_score': name_score,
|
||
'phoneNumberRecognition': phoneNumberRecognition,
|
||
'phone_score': phone_score,
|
||
'hsRecognition': hsRecognition,
|
||
'hs_score': hs_score}
|
||
put_queue(self._fb_queue, message_feedback(request_id,
|
||
AnalysisStatus.RUNNING.value,
|
||
AnalysisType.IMAGE.value, "", "",
|
||
'',
|
||
imageUrl,
|
||
imageUrl,
|
||
str(code),
|
||
str(2),
|
||
healthy_result), timeout=2, is_ex=True)
|
||
except ServiceException as s:
|
||
raise s
|
||
except Exception as e:
|
||
logger.error("模型分析异常: {}, requestId: {}", format_exc(), request_id)
|
||
raise e
|
||
|
||
'''
|
||
# 防疫模型
|
||
'''
|
||
|
||
def epidemicPrevention(self, imageUrls, model, base_dir, env, request_id):
|
||
with ThreadPoolExecutor(max_workers=2) as t:
|
||
orc = OcrBaiduSdk(base_dir, env)
|
||
obj_list = []
|
||
for imageUrl in imageUrls:
|
||
obj = t.submit(self.epidemic_prevention, imageUrl, model, orc, request_id)
|
||
obj_list.append(obj)
|
||
for r in obj_list:
|
||
r.result(60)
|
||
|
||
def image_recognition(self, imageUrl, mod, image_queue, logo, request_id):
|
||
try:
|
||
model_conf, code = mod
|
||
model_param = model_conf[1]
|
||
image = url2Array(imageUrl)
|
||
MODEL_CONFIG[code][2](image.shape[1], image.shape[0], model_conf)
|
||
p_result = MODEL_CONFIG[code][3]([model_conf, image, request_id])[0]
|
||
#print(' line872:p_result[2]:',p_result[2] )
|
||
if p_result is None or len(p_result) < 3 or p_result[2] is None or len(p_result[2]) == 0:
|
||
return
|
||
if logo:
|
||
image = add_water_pic(image, logo, request_id)
|
||
# (modeType, model_param, allowedList, names, rainbows)
|
||
allowedList = model_conf[2]
|
||
label_arraylist = model_param['label_arraylist']
|
||
font_config = model_param['font_config']
|
||
rainbows = model_conf[4]
|
||
det_xywh = {code: {}}
|
||
ai_result_list = p_result[2]
|
||
for ai_result in ai_result_list:
|
||
box, score, cls = xywh2xyxy2(ai_result)
|
||
# 如果检测目标在识别任务中,继续处理
|
||
if cls in allowedList:
|
||
label_array = label_arraylist[cls]
|
||
color = rainbows[cls]
|
||
cd = det_xywh[code].get(cls)
|
||
if cd is None:
|
||
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
|
||
else:
|
||
det_xywh[code][cls].append([cls, box, score, label_array, color])
|
||
#print('ai_result_list:{},allowlist:{}'.format(ai_result_list,allowedList ))
|
||
if len(det_xywh) > 0:
|
||
put_queue(image_queue, (1, (det_xywh, imageUrl, image, font_config, "")), timeout=2, is_ex=False)
|
||
except ServiceException as s:
|
||
raise s
|
||
except Exception as e:
|
||
logger.error("模型分析异常: {}, requestId: {}", format_exc(), self._msg.get("request_id"))
|
||
raise e
|
||
|
||
def publicIdentification(self, imageUrls, mod, image_queue, logo, request_id):
|
||
with ThreadPoolExecutor(max_workers=2) as t:
|
||
obj_list = []
|
||
for imageUrl in imageUrls:
|
||
obj = t.submit(self.image_recognition, imageUrl, mod, image_queue, logo, request_id)
|
||
obj_list.append(obj)
|
||
for r in obj_list:
|
||
r.result(60)
|
||
|
||
'''
|
||
1. imageUrls: 图片url数组,多张图片
|
||
2. mod: 模型对象
|
||
3. image_queue: 图片队列
|
||
'''
|
||
|
||
def baiduRecognition(self, imageUrls, mod, image_queue, logo, request_id):
|
||
with ThreadPoolExecutor(max_workers=2) as t:
|
||
thread_result = []
|
||
for imageUrl in imageUrls:
|
||
obj = t.submit(self.baidu_recognition, imageUrl, mod, image_queue, logo, request_id)
|
||
thread_result.append(obj)
|
||
for r in thread_result:
|
||
r.result(60)
|
||
|
||
def baidu_recognition(self, imageUrl, mod, image_queue, logo, request_id):
|
||
with ThreadPoolExecutor(max_workers=2) as t:
|
||
try:
|
||
# modeType, aipImageClassifyClient, aipBodyAnalysisClient, allowedList, rainbows,
|
||
# vehicle_names, person_names, requestId
|
||
model_conf, code = mod
|
||
allowedList = model_conf[3]
|
||
rainbows = model_conf[4]
|
||
# 图片转数组
|
||
img = url2Array(imageUrl)
|
||
vehicle_label_arrays, person_label_arrays, font_config = MODEL_CONFIG[code][2](img.shape[1],
|
||
img.shape[0],
|
||
model_conf)
|
||
obj_list = []
|
||
for target in allowedList:
|
||
parm = [target, imageUrl, model_conf[1], model_conf[2], request_id]
|
||
reuslt = t.submit(self.baidu_method, code, parm, img, image_queue, vehicle_label_arrays,
|
||
person_label_arrays, font_config, rainbows, logo)
|
||
obj_list.append(reuslt)
|
||
for r in obj_list:
|
||
r.result(60)
|
||
except ServiceException as s:
|
||
raise s
|
||
except Exception as e:
|
||
logger.error("百度AI分析异常: {}, requestId: {}", format_exc(), request_id)
|
||
raise e
|
||
|
||
@staticmethod
|
||
def baidu_method(code, parm, img, image_queue, vehicle_label_arrays, person_label_arrays, font_config,
|
||
rainbows, logo):
|
||
# [target, url, aipImageClassifyClient, aipBodyAnalysisClient, requestId]
|
||
request_id = parm[4]
|
||
target = parm[0]
|
||
image_url = parm[1]
|
||
result = MODEL_CONFIG[code][3](parm)
|
||
if target == BaiduModelTarget.VEHICLE_DETECTION.value[1] and result is not None:
|
||
vehicleInfo = result.get("vehicle_info")
|
||
if vehicleInfo is not None and len(vehicleInfo) > 0:
|
||
det_xywh = {code: {}}
|
||
copy_frame = img.copy()
|
||
for i, info in enumerate(vehicleInfo):
|
||
value = VehicleEnumVALUE.get(info.get("type"))
|
||
target_num = value.value[2]
|
||
label_array = vehicle_label_arrays[target_num]
|
||
color = rainbows[target_num]
|
||
if value is None:
|
||
logger.error("车辆识别出现未支持的目标类型!type:{}, requestId:{}", info.get("type"), request_id)
|
||
return
|
||
left_top = (int(info.get("location").get("left")), int(info.get("location").get("top")))
|
||
right_top = (int(info.get("location").get("left")) + int(info.get("location").get("width")),
|
||
int(info.get("location").get("top")))
|
||
right_bottom = (int(info.get("location").get("left")) + int(info.get("location").get("width")),
|
||
int(info.get("location").get("top")) + int(info.get("location").get("height")))
|
||
left_bottom = (int(info.get("location").get("left")),
|
||
int(info.get("location").get("top")) + int(info.get("location").get("height")))
|
||
box = [left_top, right_top, right_bottom, left_bottom]
|
||
score = float("%.2f" % info.get("probability"))
|
||
if logo:
|
||
copy_frame = add_water_pic(copy_frame, logo, request_id)
|
||
if det_xywh[code].get(target) is None:
|
||
det_xywh[code][target] = [[target, box, score, label_array, color]]
|
||
else:
|
||
det_xywh[code][target].append([target, box, score, label_array, color])
|
||
info["id"] = str(i)
|
||
if len(det_xywh[code]) > 0:
|
||
result["type"] = str(target)
|
||
result["modelCode"] = code
|
||
put_queue(image_queue, (1, (det_xywh, image_url, copy_frame, font_config, result)), timeout=2,
|
||
is_ex=True)
|
||
# 人体识别
|
||
if target == BaiduModelTarget.HUMAN_DETECTION.value[1] and result is not None:
|
||
personInfo = result.get("person_info")
|
||
personNum = result.get("person_num")
|
||
if personNum is not None and personNum > 0 and personInfo is not None and len(personInfo) > 0:
|
||
det_xywh = {code: {}}
|
||
copy_frame = img.copy()
|
||
for i, info in enumerate(personInfo):
|
||
left_top = (int(info.get("location").get("left")), int(info.get("location").get("top")))
|
||
right_top = (int(info.get("location").get("left")) + int(info.get("location").get("width")),
|
||
int(info.get("location").get("top")))
|
||
right_bottom = (int(info.get("location").get("left")) + int(info.get("location").get("width")),
|
||
int(info.get("location").get("top")) + int(info.get("location").get("height")))
|
||
left_bottom = (int(info.get("location").get("left")),
|
||
int(info.get("location").get("top")) + int(info.get("location").get("height")))
|
||
box = [left_top, right_top, right_bottom, left_bottom]
|
||
score = float("%.2f" % info.get("location").get("score"))
|
||
label_array = person_label_arrays[0]
|
||
color = rainbows[0]
|
||
if logo:
|
||
copy_frame = add_water_pic(copy_frame, logo, request_id)
|
||
if det_xywh[code].get(target) is None:
|
||
det_xywh[code][target] = [[target, box, score, label_array, color]]
|
||
else:
|
||
det_xywh[code][target].append([target, box, score, label_array, color])
|
||
info["id"] = str(i)
|
||
if len(det_xywh[code]) > 0:
|
||
result["type"] = str(target)
|
||
result["modelCode"] = code
|
||
put_queue(image_queue, (1, (det_xywh, image_url, copy_frame, font_config, result)), timeout=2)
|
||
# 人流量
|
||
if target == BaiduModelTarget.PEOPLE_COUNTING.value[1] and result is not None:
|
||
base64Image = result.get("image")
|
||
if base64Image is not None and len(base64Image) > 0:
|
||
baiduImage = base64.b64decode(base64Image)
|
||
result["type"] = str(target)
|
||
result["modelCode"] = code
|
||
del result["image"]
|
||
put_queue(image_queue, (1, (None, image_url, baiduImage, None, result)), timeout=2)
|
||
|
||
@staticmethod
|
||
def start_File_upload(fb_queue, context, msg, image_queue, analyse_type):
|
||
image_thread = ImageTypeImageFileUpload(fb_queue, context, msg, image_queue, analyse_type)
|
||
image_thread.setDaemon(True)
|
||
image_thread.start()
|
||
return image_thread
|
||
def check_ImageUrl_Vaild(self,url,timeout=1):
|
||
try:
|
||
# 发送 HTTP 请求,尝试访问图片
|
||
response = requests.get(url, timeout=timeout) # 设置超时时间为 10 秒
|
||
if response.status_code == 200:
|
||
return True,url
|
||
else:
|
||
return False,f"图片地址无效,状态码:{response.status_code}"
|
||
except requests.exceptions.RequestException as e:
|
||
# 捕获请求过程中可能出现的异常(如网络问题、超时等)
|
||
return False,str(e)
|
||
|
||
def run(self):
|
||
fb_queue, msg, analyse_type, context = self._fb_queue, self._msg, self._analyse_type, self._context
|
||
request_id, logo, image_queue = msg["request_id"], context['logo'], self._image_queue
|
||
base_dir, env = context["base_dir"], context["env"]
|
||
imageUrls = msg["image_urls"]
|
||
image_thread = None
|
||
init_log(base_dir, env)
|
||
valFlag=True
|
||
for url in imageUrls:
|
||
valFlag,ret = self.check_ImageUrl_Vaild(url,timeout=1)
|
||
|
||
if not valFlag:
|
||
logger.error("图片分析异常: {}, requestId:{},url:{}",ret, request_id,url)
|
||
#print("AnalysisStatus.FAILED.value:{},ExceptionType.URL_ADDRESS_ACCESS_FAILED.value[0]:{},ExceptionType.URL_ADDRESS_ACCESS_FAILED.value[1]:{}".format(AnalysisStatus.FAILED.value,ExceptionType.URL_ADDRESS_ACCESS_FAILED.value[0],ExceptionType.URL_ADDRESS_ACCESS_FAILED.value[1] ) )
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
|
||
analyse_type,
|
||
ExceptionType.URL_ADDRESS_ACCESS_FAILED.value[0],
|
||
ExceptionType.URL_ADDRESS_ACCESS_FAILED.value[1]), timeout=2)
|
||
|
||
return
|
||
|
||
|
||
with ThreadPoolExecutor(max_workers=1) as t:
|
||
try:
|
||
#init_log(base_dir, env)
|
||
logger.info("开始启动图片识别进程, requestId: {}", request_id)
|
||
model_array = get_model(msg, context, analyse_type)
|
||
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type)
|
||
task_list = []
|
||
for model in model_array:
|
||
# 百度模型逻辑
|
||
if model[1] == ModelType.BAIDU_MODEL.value[1]:
|
||
result = t.submit(self.baiduRecognition, imageUrls, model, image_queue, logo, request_id)
|
||
task_list.append(result)
|
||
# 防疫模型
|
||
elif model[1] == ModelType.EPIDEMIC_PREVENTION_MODEL.value[1]:
|
||
result = t.submit(self.epidemicPrevention, imageUrls, model, base_dir, env, request_id)
|
||
task_list.append(result)
|
||
# 车牌模型
|
||
elif model[1] == ModelType.PLATE_MODEL.value[1]:
|
||
result = t.submit(self.epidemicPrevention, imageUrls, model, base_dir, env, request_id)
|
||
task_list.append(result)
|
||
else:
|
||
result = t.submit(self.publicIdentification, imageUrls, model, image_queue, logo, request_id)
|
||
task_list.append(result)
|
||
for r in task_list:
|
||
r.result(60)
|
||
if image_thread and not image_thread.is_alive():
|
||
raise Exception("图片识别图片上传线程异常停止!!!")
|
||
if image_thread and image_thread.is_alive():
|
||
put_queue(image_queue, (2, 'stop'), timeout=2)
|
||
image_thread.join(120)
|
||
logger.info("图片进程任务完成,requestId:{}", request_id)
|
||
put_queue(fb_queue, message_feedback(request_id,
|
||
AnalysisStatus.SUCCESS.value,
|
||
analyse_type,
|
||
progress=success_progess), timeout=2, is_ex=True)
|
||
except ServiceException as s:
|
||
logger.error("图片分析异常,异常编号:{}, 异常描述:{}, requestId:{}", s.code, s.msg, request_id)
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
|
||
analyse_type,
|
||
s.code,
|
||
s.msg), timeout=2)
|
||
except Exception:
|
||
logger.error("图片分析异常: {}, requestId:{}", format_exc(), request_id)
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
|
||
analyse_type,
|
||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
|
||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]), timeout=2)
|
||
finally:
|
||
if image_thread and image_thread.is_alive():
|
||
clear_queue(image_queue)
|
||
put_queue(image_queue, (2, 'stop'), timeout=2)
|
||
image_thread.join(120)
|
||
clear_queue(image_queue)
|
||
|
||
|
||
class ScreenRecordingProcess(Process):
|
||
__slots__ = ('_fb_queue', '_context', '_msg', '_analysisType', '_event_queue', '_hb_queue', '_analysisType')
|
||
|
||
def __init__(self, *args):
|
||
super().__init__()
|
||
# 传参
|
||
self._fb_queue, self._context, self._msg, self._analysisType = args
|
||
self._event_queue, self._hb_queue, self._pull_queue = Queue(), Queue(), Queue(10)
|
||
put_queue(self._fb_queue,
|
||
recording_feedback(self._msg["request_id"], RecordingStatus.RECORDING_WAITING.value[0]),
|
||
timeout=1, is_ex=True)
|
||
self._storage_source = self._context['service']['storage_source']
|
||
def sendEvent(self, result):
|
||
put_queue(self._event_queue, result, timeout=2, is_ex=True)
|
||
|
||
@staticmethod
|
||
def start_pull_stream_thread(msg, context, pull_queue, hb_queue, fb_queue, frame_num):
|
||
pullThread = RecordingPullStreamThread(msg, context, pull_queue, hb_queue, fb_queue, frame_num)
|
||
pullThread.setDaemon(True)
|
||
pullThread.start()
|
||
return pullThread
|
||
|
||
@staticmethod
|
||
def start_hb_thread(fb_queue, hb_queue, request_id):
|
||
hb = RecordingHeartbeat(fb_queue, hb_queue, request_id)
|
||
hb.setDaemon(True)
|
||
hb.start()
|
||
return hb
|
||
|
||
@staticmethod
|
||
def check(start_time, service_timeout, pull_thread, hb_thread, request_id):
|
||
if time() - start_time > service_timeout:
|
||
logger.error("录屏超时, requestId: {}", request_id)
|
||
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
|
||
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
|
||
|
||
if pull_thread and not pull_thread.is_alive():
|
||
logger.info("录屏拉流线程停止异常, requestId: {}", request_id)
|
||
raise Exception("录屏拉流线程异常停止")
|
||
if hb_thread and not hb_thread.is_alive():
|
||
logger.info("录屏心跳线程异常停止, requestId: {}", request_id)
|
||
raise Exception("录屏心跳线程异常停止")
|
||
|
||
def run(self):
|
||
msg, context = self._msg, self._context
|
||
request_id, push_url = msg['request_id'], msg.get('push_url')
|
||
pull_queue, fb_queue, hb_queue, event_queue = self._pull_queue, self._fb_queue, self._hb_queue, \
|
||
self._event_queue
|
||
base_dir, env, service_timeout = context['base_dir'], context['env'], int(context["service"]["timeout"])
|
||
pre_path, end_path = '%s/%s%s' % (base_dir, context["video"]["file_path"], now_date_to_str(YMDHMSF)), \
|
||
'%s%s' % (request_id, ".mp4")
|
||
orFilePath = '%s%s%s' % (pre_path, "_on_or_", end_path)
|
||
pull_thread, hb_thread = None, None
|
||
or_write_status, p_push_status = [0, 0], [0, 0]
|
||
or_video_file, push_p = None, None
|
||
ex = None
|
||
try:
|
||
# 初始化日志
|
||
init_log(base_dir, env)
|
||
# 启动拉流线程
|
||
pull_thread = self.start_pull_stream_thread(msg, context, pull_queue, hb_queue, fb_queue, 25)
|
||
hb_thread = self.start_hb_thread(fb_queue, hb_queue, request_id)
|
||
start_time = time()
|
||
with ThreadPoolExecutor(max_workers=2) as t:
|
||
while True:
|
||
# 检查拉流线程和心跳线程
|
||
self.check(start_time, service_timeout, pull_thread, hb_thread, request_id)
|
||
# 判断是否需要停止录屏
|
||
event_result = get_no_block_queue(event_queue)
|
||
if event_result is not None:
|
||
cmdStr = event_result.get("command")
|
||
# 接收到停止指令
|
||
if 'stop' == cmdStr:
|
||
logger.info("录屏任务开始停止, requestId: {}", request_id)
|
||
pull_thread.sendEvent({"command": "stop"})
|
||
pull_result = get_no_block_queue(pull_queue)
|
||
if pull_result is None:
|
||
sleep(1)
|
||
continue
|
||
if pull_result[0] == 1:
|
||
close_all_p(push_p, or_video_file, None, request_id)
|
||
pull_thread.sendEvent({"command": "stop"})
|
||
pull_thread.join(180)
|
||
raise ServiceException(pull_result[1], pull_result[2])
|
||
elif pull_result[0] == 2:
|
||
close_all_p(push_p, or_video_file, None, request_id)
|
||
pull_thread.sendEvent({"command": "stop"})
|
||
pull_thread.join(180)
|
||
break
|
||
elif pull_result[0] == 4:
|
||
frame_list, frame_index_list, all_frames = pull_result[1]
|
||
if len(frame_list) > 0:
|
||
for i, frame in enumerate(frame_list):
|
||
if frame_index_list[i] % 300 == 0 and frame_index_list[i] < all_frames:
|
||
task_process = "%.2f" % (float(frame_index_list[i]) / float(all_frames))
|
||
put_queue(hb_queue, {"progress": task_process}, timeout=1)
|
||
write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file,
|
||
or_write_status, request_id)
|
||
if push_url is not None and len(push_url) > 0:
|
||
push_p_result = t.submit(push_video_stream, frame, push_p, push_url, p_push_status,
|
||
request_id)
|
||
push_p = push_p_result.result()
|
||
or_video_file = write_or_video_result.result()
|
||
else:
|
||
raise Exception("未知拉流状态异常!")
|
||
logger.info("录屏线程任务完成,requestId:{}", self._msg.get("request_id"))
|
||
except ServiceException as s:
|
||
logger.error("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, self._msg.get("request_id"))
|
||
ex = s.code, s.msg
|
||
except Exception:
|
||
logger.error("服务异常: {}, requestId: {},", format_exc(), self._msg.get("request_id"))
|
||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
|
||
finally:
|
||
or_url = ""
|
||
exn = None
|
||
try:
|
||
# 关闭推流管道, 原视频写流客户端
|
||
close_all_p(push_p, or_video_file, None, request_id)
|
||
# 关闭拉流线程
|
||
if pull_thread and pull_thread.is_alive():
|
||
pull_thread.sendEvent({"command": "stop_ex"})
|
||
pull_thread.sendEvent({"command": "stop"})
|
||
pull_thread.join(120)
|
||
# 判断是否需要上传视频
|
||
if exists(orFilePath) and getsize(orFilePath) > 100:
|
||
or_url = self.upload_video(base_dir, env, request_id, orFilePath)
|
||
if or_url is None or len(or_url) == 0:
|
||
logger.error("原视频或AI视频播放上传VOD失败!, requestId: {}", request_id)
|
||
raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0],
|
||
ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1])
|
||
# 停止心跳线程
|
||
if hb_thread and hb_thread.is_alive():
|
||
put_queue(hb_queue, {"command": "stop"}, timeout=10, is_ex=False)
|
||
hb_thread.join(timeout=120)
|
||
if exists(orFilePath):
|
||
logger.info("开始删除原视频, orFilePath: {}, requestId: {}", orFilePath, request_id)
|
||
os.remove(orFilePath)
|
||
logger.info("删除原视频成功, orFilePath: {}, requestId: {}", orFilePath, request_id)
|
||
# 如果有异常, 检查是否有原视频和AI视频,有则上传,响应失败
|
||
if ex:
|
||
code, msg = ex
|
||
put_queue(fb_queue, recording_feedback(request_id, RecordingStatus.RECORDING_FAILED.value[0],
|
||
error_code=code,
|
||
error_msg=msg,
|
||
video_url=or_url), timeout=10, is_ex=False)
|
||
else:
|
||
if or_url is None or len(or_url) == 0:
|
||
raise ServiceException(ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[0],
|
||
ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[1])
|
||
put_queue(fb_queue, recording_feedback(request_id, RecordingStatus.RECORDING_SUCCESS.value[0],
|
||
progress=success_progess,
|
||
video_url=or_url), timeout=10, is_ex=False)
|
||
except ServiceException as s:
|
||
exn = s.code, s.msg
|
||
except Exception:
|
||
logger.error("异常:{}, requestId: {}", format_exc(), request_id)
|
||
exn = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
|
||
finally:
|
||
if pull_thread and pull_thread.is_alive():
|
||
pull_thread.sendEvent({"command": "stop"})
|
||
pull_thread.join(120)
|
||
if hb_thread and hb_thread.is_alive():
|
||
put_queue(hb_queue, {"command": "stop"}, timeout=10, is_ex=False)
|
||
hb_thread.join(timeout=120)
|
||
self.clear_queue_end()
|
||
if exn:
|
||
code, msg = exn
|
||
put_queue(fb_queue, recording_feedback(request_id, RecordingStatus.RECORDING_FAILED.value[0],
|
||
error_code=code,
|
||
error_msg=msg,
|
||
video_url=or_url), timeout=10, is_ex=False)
|
||
|
||
def clear_queue_end(self):
|
||
clear_queue(self._event_queue)
|
||
clear_queue(self._hb_queue)
|
||
clear_queue(self._pull_queue)
|
||
|
||
|
||
|
||
|
||
def upload_video(self,base_dir, env, request_id, orFilePath):
|
||
if self._storage_source==1:
|
||
minioSdk = MinioSdk(base_dir, env, request_id )
|
||
upload_video_thread_ai = Common(minioSdk.put_object, aiFilePath, "%s/ai_online.mp4" % request_id)
|
||
else:
|
||
aliyunVodSdk = ThAliyunVodSdk(base_dir, env, request_id)
|
||
upload_video_thread_ai = Common(aliyunVodSdk.get_play_url, aiFilePath, "ai_online_%s" % request_id)
|
||
|
||
upload_video_thread_ai.setDaemon(True)
|
||
upload_video_thread_ai.start()
|
||
or_url = upload_video_thread_ai.get_result()
|
||
return or_url
|
||
'''
|
||
@staticmethod
|
||
def upload_video(base_dir, env, request_id, orFilePath):
|
||
aliyunVodSdk = ThAliyunVodSdk(base_dir, env, request_id)
|
||
upload_video_thread_ai = Common(aliyunVodSdk.get_play_url, orFilePath, "or_online_%s" % request_id)
|
||
upload_video_thread_ai.setDaemon(True)
|
||
upload_video_thread_ai.start()
|
||
or_url = upload_video_thread_ai.get_result()
|
||
return or_url
|
||
'''
|
||
|
||
"""
|
||
"models": [{
|
||
"code": "模型编号",
|
||
"categories":[{
|
||
"id": "模型id",
|
||
"config": {
|
||
"k1": "v1",
|
||
"k2": "v2"
|
||
}
|
||
}]
|
||
}]
|
||
"""
|
||
|
||
|
||
def get_model(msg, context, analyse_type):
|
||
# 初始变量
|
||
request_id, base_dir, gpu_name, env = msg["request_id"], context["base_dir"], context["gpu_name"], context["env"]
|
||
models, model_num_limit = msg["models"], context["service"]["model"]['limit']
|
||
try:
|
||
# 实时、离线元组
|
||
analyse_type_tuple = (AnalysisType.ONLINE.value, AnalysisType.OFFLINE.value)
|
||
# (实时、离线)检查模型组合, 目前只支持3个模型组合
|
||
if analyse_type in analyse_type_tuple:
|
||
if len(models) > model_num_limit:
|
||
raise ServiceException(ExceptionType.MODEL_GROUP_LIMIT_EXCEPTION.value[0],
|
||
ExceptionType.MODEL_GROUP_LIMIT_EXCEPTION.value[1])
|
||
modelArray, codeArray = [], set()
|
||
for model in models:
|
||
# 模型编码
|
||
code = model["code"]
|
||
# 检验code是否重复
|
||
if code in codeArray:
|
||
raise ServiceException(ExceptionType.MODEL_DUPLICATE_EXCEPTION.value[0],
|
||
ExceptionType.MODEL_DUPLICATE_EXCEPTION.value[1])
|
||
codeArray.add(code)
|
||
# 检测目标数组
|
||
needed_objectsIndex = list(set([int(category["id"]) for category in model["categories"]]))
|
||
logger.info("模型编号: {}, 检查目标: {}, requestId: {}", code, needed_objectsIndex, request_id)
|
||
model_method = MODEL_CONFIG.get(code)
|
||
if model_method is None:
|
||
logger.error("未匹配到对应的模型, requestId:{}", request_id)
|
||
raise ServiceException(ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[0],
|
||
ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[1])
|
||
# 检查cpu资源、gpu资源
|
||
check_cpu(base_dir, request_id)
|
||
gpu_ids = check_gpu_resource(request_id)
|
||
# 如果实时识别、离线识别
|
||
if analyse_type in analyse_type_tuple:
|
||
if model["is_video"] == "1":
|
||
mod = model_method[0](gpu_ids[0], needed_objectsIndex, request_id, gpu_name, base_dir, env)
|
||
modelArray.append((mod.model_conf, code))
|
||
else:
|
||
raise ServiceException(ExceptionType.MODEL_NOT_SUPPORT_VIDEO_EXCEPTION.value[0],
|
||
ExceptionType.MODEL_NOT_SUPPORT_VIDEO_EXCEPTION.value[1],
|
||
model_method[1].value[2])
|
||
# 如果是图片识别
|
||
if analyse_type == AnalysisType.IMAGE.value:
|
||
if model["is_image"] == "1":
|
||
mod = model_method[0](gpu_ids[0], needed_objectsIndex, request_id, gpu_name, base_dir, env)
|
||
modelArray.append((mod.model_conf, code))
|
||
else:
|
||
raise ServiceException(ExceptionType.MODEL_NOT_SUPPORT_IMAGE_EXCEPTION.value[0],
|
||
ExceptionType.MODEL_NOT_SUPPORT_IMAGE_EXCEPTION.value[1],
|
||
model_method[1].value[2])
|
||
if len(modelArray) == 0:
|
||
raise ServiceException(ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[0],
|
||
ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[1])
|
||
return modelArray
|
||
except ServiceException as s:
|
||
raise s
|
||
except Exception:
|
||
logger.error("模型配置处理异常: {}, request_id: {}", format_exc(), request_id)
|
||
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0],
|
||
ExceptionType.MODEL_LOADING_EXCEPTION.value[1])
|