1099 lines
62 KiB
Python
1099 lines
62 KiB
Python
# -*- coding: utf-8 -*-
|
||
import base64
|
||
import os
|
||
from concurrent.futures import ThreadPoolExecutor
|
||
from os.path import join, exists, getsize
|
||
from time import time, sleep
|
||
from traceback import format_exc
|
||
|
||
import cv2
|
||
|
||
from multiprocessing import Process, Queue
|
||
from loguru import logger
|
||
|
||
from common.Constant import init_progess, success_progess
|
||
|
||
from concurrency.FileUploadThread import ImageTypeImageFileUpload
|
||
from concurrency.HeartbeatThread import Heartbeat
|
||
from concurrency.PullVideoStreamProcess2 import OnlinePullVideoStreamProcess2, OfflinePullVideoStreamProcess2
|
||
from concurrency.PushVideoStreamProcess2 import OnPushStreamProcess2, OffPushStreamProcess2
|
||
|
||
from util.GPUtils import check_gpu_resource
|
||
from util.LogUtils import init_log
|
||
from concurrency.CommonThread import Common
|
||
from enums.AnalysisStatusEnum import AnalysisStatus
|
||
from enums.AnalysisTypeEnum import AnalysisType
|
||
from enums.ExceptionEnum import ExceptionType
|
||
from enums.ModelTypeEnum2 import ModelType2
|
||
from util import TimeUtils
|
||
from util.AliyunSdk import ThAliyunVodSdk
|
||
from util.CpuUtils import check_cpu
|
||
from entity.FeedBack import message_feedback
|
||
from exception.CustomerException import ServiceException
|
||
from util.ImageUtils import url2Array, add_water_pic
|
||
from util.ModelUtils2 import MODEL_CONFIG2
|
||
from util.OcrBaiduSdk import OcrBaiduSdk
|
||
|
||
from enums.BaiduSdkEnum import VehicleEnumVALUE
|
||
from enums.ModelTypeEnum import BaiduModelTarget
|
||
from util.PlotsUtils import xywh2xyxy2
|
||
from util.QueUtil import put_queue, get_no_block_queue, clear_queue
|
||
from util.TimeUtils import now_date_to_str, YMDHMSF
|
||
|
||
|
||
class IntelligentRecognitionProcess2(Process):
|
||
__slots__ = ('_fb_queue', '_msg', '_analyse_type', '_context', 'event_queue', '_pull_queue', '_hb_queue',
|
||
"_image_queue", "_push_queue", '_push_ex_queue')
|
||
|
||
def __init__(self, *args):
|
||
super().__init__()
|
||
self._fb_queue, self._msg, self._analyse_type, self._context = args
|
||
self.event_queue, self._pull_queue, self._hb_queue, self._image_queue = Queue(), Queue(10), Queue(), Queue()
|
||
self._push_queue, self._push_ex_queue = Queue(), Queue()
|
||
put_queue(self._fb_queue, message_feedback(self._msg["request_id"],
|
||
AnalysisStatus.WAITING.value,
|
||
self._analyse_type,
|
||
progress=init_progess), timeout=2, is_ex=True)
|
||
|
||
def sendEvent(self, eBody):
|
||
try:
|
||
self.event_queue.put(eBody, timeout=2)
|
||
except Exception:
|
||
logger.error("添加事件到队列超时异常:{}, requestId:{}", format_exc(), self._msg["request_id"])
|
||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
|
||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
|
||
|
||
def clear_queue(self):
|
||
clear_queue(self.event_queue)
|
||
clear_queue(self._pull_queue)
|
||
clear_queue(self._hb_queue)
|
||
clear_queue(self._image_queue)
|
||
clear_queue(self._push_queue)
|
||
clear_queue(self._push_ex_queue)
|
||
|
||
@staticmethod
|
||
def ai_dtection(model, frame_list, frame_index_list, request_id):
|
||
retResults = MODEL_CONFIG2[model[1]][3]([frame_list, frame_index_list, model[0][1], request_id])[0]
|
||
if len(frame_list) == 1:
|
||
return model[1], [retResults[2]]
|
||
return model[1], retResults[2]
|
||
|
||
@staticmethod
|
||
def build_video_path(context, msg, is_build_or=True):
|
||
random_time = now_date_to_str(YMDHMSF)
|
||
pre_path = '%s/%s%s' % (context["base_dir"], context["video"]["file_path"], random_time)
|
||
end_path = '%s%s' % (msg["request_id"], ".mp4")
|
||
if is_build_or:
|
||
context["orFilePath"] = '%s%s%s' % (pre_path, "_on_or_", end_path)
|
||
context["aiFilePath"] = '%s%s%s' % (pre_path, "_on_ai_", end_path)
|
||
|
||
@staticmethod
|
||
def start_heartbeat(fb_queue, hb_queue, request_id, analyse_type, context):
|
||
hb_thread = Heartbeat(fb_queue, hb_queue, request_id, analyse_type, context)
|
||
hb_thread.setDaemon(True)
|
||
hb_thread.start()
|
||
return hb_thread
|
||
|
||
|
||
class OnlineIntelligentRecognitionProcess2(IntelligentRecognitionProcess2):
|
||
__slots__ = ()
|
||
|
||
@staticmethod
|
||
def upload_video(base_dir, env, request_id, orFilePath, aiFilePath):
|
||
aliyunVodSdk = ThAliyunVodSdk(base_dir, env, request_id)
|
||
upload_video_thread_or = Common(aliyunVodSdk.get_play_url, orFilePath, "or_online_%s" % request_id)
|
||
upload_video_thread_ai = Common(aliyunVodSdk.get_play_url, aiFilePath, "ai_online_%s" % request_id)
|
||
upload_video_thread_or.setDaemon(True)
|
||
upload_video_thread_ai.setDaemon(True)
|
||
upload_video_thread_or.start()
|
||
upload_video_thread_ai.start()
|
||
or_url = upload_video_thread_or.get_result()
|
||
ai_url = upload_video_thread_ai.get_result()
|
||
return or_url, ai_url
|
||
|
||
@staticmethod
|
||
def start_push_stream2(msg, push_queue, image_queue, push_ex_queue, hb_queue, context):
|
||
pushProcess = OnPushStreamProcess2(msg, push_queue, image_queue, push_ex_queue, hb_queue, context)
|
||
pushProcess.daemon = True
|
||
pushProcess.start()
|
||
return pushProcess
|
||
|
||
@staticmethod
|
||
def start_pull_stream2(msg, context, fb_queue, pull_queue, image_queue, analyse_type, frame_num):
|
||
pullProcess = OnlinePullVideoStreamProcess2(msg, context, fb_queue, pull_queue, image_queue, analyse_type,
|
||
frame_num)
|
||
pullProcess.daemon = True
|
||
pullProcess.start()
|
||
return pullProcess
|
||
|
||
@staticmethod
|
||
def checkPT(start_time, service_timeout, pull_process, push_process, hb_thread, push_ex_queue, pull_queue,
|
||
request_id):
|
||
if time() - start_time > service_timeout:
|
||
logger.error("推流超时, requestId: {}", request_id)
|
||
raise ServiceException(ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[0],
|
||
ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[1])
|
||
if pull_process is not None and not pull_process.is_alive():
|
||
while True:
|
||
if pull_queue.empty() or pull_queue.qsize() == 0:
|
||
break
|
||
pull_result = get_no_block_queue(pull_queue)
|
||
if pull_result is not None and pull_result[0] == 1:
|
||
raise ServiceException(pull_result[1], pull_result[2])
|
||
logger.info("拉流进程异常停止, requestId: {}", request_id)
|
||
raise Exception("拉流进程异常停止!")
|
||
if hb_thread is not None and not hb_thread.is_alive():
|
||
logger.info("心跳线程异常停止, requestId: {}", request_id)
|
||
raise Exception("心跳线程异常停止!")
|
||
if push_process is not None and not push_process.is_alive():
|
||
while True:
|
||
if push_ex_queue.empty() or push_ex_queue.qsize() == 0:
|
||
break
|
||
push_result = get_no_block_queue(push_ex_queue)
|
||
if push_result is not None and push_result[0] == 1:
|
||
raise ServiceException(push_result[1], push_result[2])
|
||
logger.info("推流进程异常停止, requestId: {}", request_id)
|
||
raise Exception("推流进程异常停止!")
|
||
|
||
def run(self):
|
||
msg, context, analyse_type, ex = self._msg, self._context, self._analyse_type, None
|
||
self.build_video_path(context, msg)
|
||
request_id, base_dir, env = msg["request_id"], context["base_dir"], context["env"]
|
||
# 拉流进程、推流进程、心跳线程
|
||
pull_process, push_process, hb_thread = None, None, None
|
||
# 事件队列、拉流队列、心跳队列、反馈队列
|
||
event_queue, pull_queue, hb_queue, fb_queue = self.event_queue, self._pull_queue, self._hb_queue, self._fb_queue
|
||
# 推流队列、推流异常队列、图片队列
|
||
push_queue, push_ex_queue, image_queue = self._push_queue, self._push_ex_queue, self._image_queue
|
||
service_timeout = int(context["service"]["timeout"])
|
||
try:
|
||
# 初始化日志
|
||
init_log(base_dir, env)
|
||
# 打印启动日志
|
||
logger.info("开始启动实时分析进程!requestId: {}", request_id)
|
||
# 启动拉流进程(包含拉流线程, 图片上传线程)
|
||
# 拉流进程初始化时间长, 先启动
|
||
pull_process = self.start_pull_stream2(msg, context, fb_queue, pull_queue, image_queue, analyse_type, 100)
|
||
# 启动心跳线程
|
||
hb_thread = self.start_heartbeat(fb_queue, hb_queue, request_id, analyse_type, context)
|
||
# 加载算法模型
|
||
model_array = get_model(msg, context, analyse_type)
|
||
# 启动推流进程
|
||
push_process = self.start_push_stream2(msg, push_queue, image_queue, push_ex_queue, hb_queue, context)
|
||
# 第一个参数: 模型是否初始化 0:未初始化 1:初始化
|
||
task_status = [0]
|
||
draw_config = {}
|
||
start_time = time()
|
||
with ThreadPoolExecutor(max_workers=3) as t:
|
||
while True:
|
||
# 检查拉流进程是否正常, 心跳线程是否正常
|
||
self.checkPT(start_time, service_timeout, pull_process, push_process, hb_thread, push_ex_queue,
|
||
pull_queue, request_id)
|
||
# 检查推流是否异常
|
||
push_status = get_no_block_queue(push_ex_queue)
|
||
if push_status is not None and push_status[0] == 1:
|
||
raise ServiceException(push_status[1], push_status[2])
|
||
# 获取停止指令
|
||
event_result = get_no_block_queue(event_queue)
|
||
if event_result:
|
||
cmdStr = event_result.get("command")
|
||
# 接收到停止指令
|
||
if "stop" == cmdStr:
|
||
logger.info("实时任务开始停止, requestId: {}", request_id)
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
pull_result = get_no_block_queue(pull_queue)
|
||
if pull_result is None:
|
||
sleep(1)
|
||
continue
|
||
# (4, (frame_list, frame_index_list, all_frames))
|
||
if pull_result[0] == 4:
|
||
frame_list, frame_index_list, all_frames = pull_result[1]
|
||
if len(frame_list) > 0:
|
||
# 判断是否已经初始化
|
||
if task_status[0] == 0:
|
||
task_status[0] = 1
|
||
for i, model in enumerate(model_array):
|
||
# (modeType, model_param, allowedList, names, rainbows)
|
||
model_conf, code = model
|
||
model_param = model_conf[1]
|
||
MODEL_CONFIG2[code][2](frame_list[0].shape[1], frame_list[0].shape[0], model_conf)
|
||
if draw_config.get("font_config") is None:
|
||
draw_config["font_config"] = model_param['font_config']
|
||
if draw_config.get(code) is None:
|
||
draw_config[code] = {}
|
||
draw_config[code]["allowedList"] = model_conf[2]
|
||
draw_config[code]["rainbows"] = model_conf[4]
|
||
draw_config[code]["label_arrays"] = model_param['label_arraylist']
|
||
det_array = []
|
||
for model in model_array:
|
||
result = t.submit(self.ai_dtection, model, frame_list, frame_index_list, request_id)
|
||
det_array.append(result)
|
||
push_objs = [det.result() for det in det_array]
|
||
put_queue(push_queue,
|
||
(1, (frame_list, frame_index_list, all_frames, draw_config, push_objs)),
|
||
timeout=10)
|
||
del det_array, push_objs
|
||
del frame_list, frame_index_list, all_frames
|
||
elif pull_result[0] == 1:
|
||
put_queue(push_queue, (2, 'stop_ex'), timeout=1, is_ex=True)
|
||
push_process.join(120)
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
pull_process.join(120)
|
||
raise ServiceException(pull_result[1], pull_result[2])
|
||
elif pull_result[0] == 2:
|
||
put_queue(push_queue, (2, 'stop'), timeout=1, is_ex=True)
|
||
push_process.join(120)
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
pull_process.join(120)
|
||
break
|
||
else:
|
||
raise Exception("未知拉流状态异常!")
|
||
except ServiceException as s:
|
||
logger.exception("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, request_id)
|
||
ex = s.code, s.msg
|
||
except Exception:
|
||
logger.error("服务异常: {}, requestId: {},", format_exc(), request_id)
|
||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
|
||
finally:
|
||
orFilePath, aiFilePath = context["orFilePath"], context["aiFilePath"]
|
||
base_dir, env = context["base_dir"], context["env"]
|
||
or_url, ai_url, exc = "", "", None
|
||
try:
|
||
if push_process and push_process.is_alive():
|
||
put_queue(push_queue, (2, 'stop_ex'), timeout=1)
|
||
logger.info("关闭推流进程, requestId:{}", request_id)
|
||
push_process.join(timeout=120)
|
||
logger.info("关闭推流进程1, requestId:{}", request_id)
|
||
if pull_process and pull_process.is_alive():
|
||
pull_process.sendCommand({"command": 'stop_ex'})
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
logger.info("关闭拉流进程, requestId:{}", request_id)
|
||
pull_process.join(timeout=120)
|
||
logger.info("关闭拉流进程1, requestId:{}", request_id)
|
||
if exists(orFilePath) and exists(aiFilePath) and getsize(orFilePath) > 100:
|
||
or_url, ai_url = self.upload_video(base_dir, env, request_id, orFilePath, aiFilePath)
|
||
if or_url is None or ai_url is None:
|
||
logger.error("原视频或AI视频播放上传VOD失败!, requestId: {}", request_id)
|
||
raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0],
|
||
ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1])
|
||
# 停止心跳线程
|
||
if hb_thread and hb_thread.is_alive():
|
||
put_queue(hb_queue, {"command": "stop"}, timeout=10, is_ex=False)
|
||
hb_thread.join(timeout=120)
|
||
if exists(orFilePath):
|
||
logger.info("开始删除原视频, orFilePath: {}, requestId: {}", orFilePath, request_id)
|
||
os.remove(orFilePath)
|
||
logger.info("删除原视频成功, orFilePath: {}, requestId: {}", orFilePath, request_id)
|
||
if exists(aiFilePath):
|
||
logger.info("开始删除AI视频, aiFilePath: {}, requestId: {}", aiFilePath, request_id)
|
||
os.remove(aiFilePath)
|
||
logger.info("删除AI视频成功, aiFilePath: {}, requestId: {}", aiFilePath, request_id)
|
||
# 如果有异常, 检查是否有原视频和AI视频,有则上传,响应失败
|
||
if ex:
|
||
code, msg = ex
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
|
||
analyse_type,
|
||
error_code=code,
|
||
error_msg=msg,
|
||
video_url=or_url,
|
||
ai_video_url=ai_url), timeout=10, is_ex=False)
|
||
else:
|
||
if or_url is None or len(or_url) == 0 or ai_url is None or len(ai_url) == 0:
|
||
raise ServiceException(ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[0],
|
||
ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[1])
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.SUCCESS.value,
|
||
analyse_type,
|
||
progress=success_progess,
|
||
video_url=or_url,
|
||
ai_video_url=ai_url), timeout=10, is_ex=False)
|
||
|
||
except ServiceException as s:
|
||
logger.exception("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, request_id)
|
||
exc = s.code, s.msg
|
||
except Exception:
|
||
logger.error("服务异常: {}, requestId: {},", format_exc(), request_id)
|
||
exc = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
|
||
finally:
|
||
if push_process and push_process.is_alive():
|
||
put_queue(push_queue, (2, 'stop_ex'), timeout=1)
|
||
logger.info("关闭推流进程, requestId:{}", request_id)
|
||
push_process.join(timeout=120)
|
||
logger.info("关闭推流进程1, requestId:{}", request_id)
|
||
if pull_process and pull_process.is_alive():
|
||
pull_process.sendCommand({"command": 'stop_ex'})
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
logger.info("关闭拉流进程, requestId:{}", request_id)
|
||
pull_process.join(timeout=120)
|
||
logger.info("关闭拉流进程1, requestId:{}", request_id)
|
||
if hb_thread and hb_thread.is_alive():
|
||
put_queue(hb_queue, {"command": "stop"}, timeout=10, is_ex=False)
|
||
hb_thread.join(timeout=120)
|
||
if exc:
|
||
code, msg = exc
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
|
||
analyse_type,
|
||
error_code=code,
|
||
error_msg=msg,
|
||
video_url=or_url,
|
||
ai_video_url=ai_url), timeout=10, is_ex=False)
|
||
self.clear_queue()
|
||
|
||
|
||
class OfflineIntelligentRecognitionProcess2(IntelligentRecognitionProcess2):
|
||
__slots__ = ()
|
||
|
||
@staticmethod
|
||
def upload_video(base_dir, env, request_id, aiFilePath):
|
||
aliyunVodSdk = ThAliyunVodSdk(base_dir, env, request_id)
|
||
upload_video_thread_ai = Common(aliyunVodSdk.get_play_url, aiFilePath, "ai_online_%s" % request_id)
|
||
upload_video_thread_ai.setDaemon(True)
|
||
upload_video_thread_ai.start()
|
||
ai_url = upload_video_thread_ai.get_result()
|
||
return ai_url
|
||
|
||
@staticmethod
|
||
def start_push_stream2(msg, push_queue, image_queue, push_ex_queue, hb_queue, context):
|
||
pushProcess = OffPushStreamProcess2(msg, push_queue, image_queue, push_ex_queue, hb_queue, context)
|
||
pushProcess.daemon = True
|
||
pushProcess.start()
|
||
return pushProcess
|
||
|
||
@staticmethod
|
||
def start_pull_stream2(msg, context, fb_queue, pull_queue, image_queue, analyse_type, frame_num):
|
||
pullProcess = OfflinePullVideoStreamProcess2(msg, context, fb_queue, pull_queue, image_queue, analyse_type,
|
||
frame_num)
|
||
pullProcess.daemon = True
|
||
pullProcess.start()
|
||
return pullProcess
|
||
|
||
@staticmethod
|
||
def checkPT(start_time, service_timeout, pull_process, push_process, hb_thread, push_ex_queue, pull_queue,
|
||
request_id):
|
||
if time() - start_time > service_timeout:
|
||
logger.error("推流超时, requestId: {}", request_id)
|
||
raise ServiceException(ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[0],
|
||
ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[1])
|
||
if pull_process is not None and not pull_process.is_alive():
|
||
while True:
|
||
if pull_queue.empty() or pull_queue.qsize() == 0:
|
||
break
|
||
pull_result = get_no_block_queue(pull_queue)
|
||
if pull_result is not None and pull_result[0] == 1:
|
||
raise ServiceException(pull_result[1], pull_result[2])
|
||
logger.info("拉流进程异常停止, requestId: {}", request_id)
|
||
raise Exception("拉流进程异常停止!")
|
||
if hb_thread is not None and not hb_thread.is_alive():
|
||
logger.info("心跳线程异常停止, requestId: {}", request_id)
|
||
raise Exception("心跳线程异常停止!")
|
||
if push_process is not None and not push_process.is_alive():
|
||
while True:
|
||
if push_ex_queue.empty() or push_ex_queue.qsize() == 0:
|
||
break
|
||
push_result = get_no_block_queue(push_ex_queue)
|
||
if push_result is not None and push_result[0] == 1:
|
||
raise ServiceException(push_result[1], push_result[2])
|
||
logger.info("推流进程异常停止, requestId: {}", request_id)
|
||
raise Exception("推流进程异常停止!")
|
||
|
||
def run(self):
|
||
msg, context, analyse_type, ex = self._msg, self._context, self._analyse_type, None
|
||
self.build_video_path(context, msg, is_build_or=False)
|
||
request_id, base_dir, env = msg["request_id"], context["base_dir"], context["env"]
|
||
# 拉流进程、推流进程
|
||
pull_process, push_process = None, None
|
||
# 心跳线程
|
||
hb_thread = None
|
||
# 事件队列、拉流队列、心跳队列、反馈队列
|
||
event_queue, pull_queue, hb_queue, fb_queue = self.event_queue, self._pull_queue, self._hb_queue, self._fb_queue
|
||
# 推流队列、推流异常队列、图片队列
|
||
push_queue, push_ex_queue, image_queue = self._push_queue, self._push_ex_queue, self._image_queue
|
||
try:
|
||
# 初始化日志
|
||
init_log(base_dir, env)
|
||
# 打印启动日志
|
||
logger.info("开始启动离线分析进程!requestId: {}", request_id)
|
||
# 启动拉流进程(包含拉流线程, 图片上传线程)
|
||
# 拉流进程初始化时间长, 先启动
|
||
pull_process = self.start_pull_stream2(msg, context, fb_queue, pull_queue, image_queue, analyse_type, 100)
|
||
# 启动心跳线程
|
||
hb_thread = self.start_heartbeat(fb_queue, hb_queue, request_id, analyse_type, context)
|
||
# 加载算法模型
|
||
model_array = get_model(msg, context, analyse_type)
|
||
# 启动推流进程
|
||
push_process = self.start_push_stream2(msg, push_queue, image_queue, push_ex_queue, hb_queue, context)
|
||
# 第一个参数: 模型是否初始化 0:未初始化 1:初始化
|
||
# 第二个参数: 检测是否有问题 0: 没有问题, 1: 有问题
|
||
task_status = [0, 0]
|
||
draw_config = {}
|
||
service_timeout = int(context["service"]["timeout"])
|
||
start_time = time()
|
||
with ThreadPoolExecutor(max_workers=2) as t:
|
||
while True:
|
||
# 检查拉流进程是否正常, 心跳线程是否正常
|
||
self.checkPT(start_time, service_timeout, pull_process, push_process, hb_thread, push_ex_queue,
|
||
pull_queue, request_id)
|
||
# 检查推流是否异常
|
||
push_status = get_no_block_queue(push_ex_queue)
|
||
if push_status is not None and push_status[0] == 1:
|
||
raise ServiceException(push_status[1], push_status[2])
|
||
# 获取停止指令
|
||
event_result = get_no_block_queue(event_queue)
|
||
if event_result:
|
||
cmdStr = event_result.get("command")
|
||
# 接收到停止指令
|
||
if "stop" == cmdStr:
|
||
logger.info("离线任务开始停止, requestId: {}", request_id)
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
pull_result = get_no_block_queue(pull_queue)
|
||
if pull_result is None:
|
||
sleep(1)
|
||
continue
|
||
# (4, (frame_list, frame_index_list, all_frames))
|
||
if pull_result[0] == 4:
|
||
frame_list, frame_index_list, all_frames = pull_result[1]
|
||
if len(frame_list) > 0:
|
||
# 判断是否已经初始化
|
||
if task_status[0] == 0:
|
||
task_status[0] = 1
|
||
for i, model in enumerate(model_array):
|
||
model_conf, code = model
|
||
model_param = model_conf[1]
|
||
# (modeType, model_param, allowedList, names, rainbows)
|
||
MODEL_CONFIG2[code][2](frame_list[0].shape[1], frame_list[0].shape[0],
|
||
model_conf)
|
||
if draw_config.get("font_config") is None:
|
||
draw_config["font_config"] = model_param['font_config']
|
||
if draw_config.get(code) is None:
|
||
draw_config[code] = {}
|
||
draw_config[code]["allowedList"] = model_conf[2]
|
||
draw_config[code]["rainbows"] = model_conf[4]
|
||
draw_config[code]["label_arrays"] = model_param['label_arraylist']
|
||
# 多线程并发处理, 经过测试两个线程最优
|
||
det_array = []
|
||
for model in model_array:
|
||
result = t.submit(self.ai_dtection, model, frame_list, frame_index_list, request_id)
|
||
det_array.append(result)
|
||
push_objs = [det.result() for det in det_array]
|
||
put_queue(push_queue,
|
||
(1, (frame_list, frame_index_list, all_frames, draw_config, push_objs)),
|
||
timeout=10)
|
||
del det_array, push_objs
|
||
del frame_list, frame_index_list, all_frames
|
||
elif pull_result[0] == 1:
|
||
put_queue(push_queue, (2, 'stop_ex'), timeout=1, is_ex=True)
|
||
logger.info("关闭推流进程, requestId:{}", request_id)
|
||
push_process.join(timeout=120)
|
||
logger.info("关闭推流进程1, requestId:{}", request_id)
|
||
raise ServiceException(pull_result[1], pull_result[2])
|
||
elif pull_result[0] == 2:
|
||
logger.info("离线任务开始停止, requestId: {}", request_id)
|
||
put_queue(push_queue, (2, 'stop'), timeout=1, is_ex=True)
|
||
push_process.join(120)
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
pull_process.join(120)
|
||
break
|
||
else:
|
||
raise Exception("未知拉流状态异常!")
|
||
except ServiceException as s:
|
||
logger.exception("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, request_id)
|
||
ex = s.code, s.msg
|
||
except Exception:
|
||
logger.error("服务异常: {}, requestId: {},", format_exc(), request_id)
|
||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
|
||
finally:
|
||
base_dir, env, aiFilePath = context["base_dir"], context["env"], context["aiFilePath"]
|
||
ai_url, exc = "", None
|
||
try:
|
||
if push_process and push_process.is_alive():
|
||
put_queue(push_queue, (2, 'stop_ex'), timeout=1)
|
||
push_process.join(timeout=120)
|
||
if pull_process and pull_process.is_alive():
|
||
pull_process.sendCommand({"command": 'stop_ex'})
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
pull_process.join(timeout=120)
|
||
if exists(aiFilePath) and getsize(aiFilePath) > 100:
|
||
ai_url = self.upload_video(base_dir, env, request_id, aiFilePath)
|
||
if ai_url is None:
|
||
logger.error("原视频或AI视频播放上传VOD失败!, requestId: {}", request_id)
|
||
raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0],
|
||
ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1])
|
||
# 停止心跳线程
|
||
if hb_thread and hb_thread.is_alive():
|
||
put_queue(hb_queue, {"command": "stop"}, timeout=2, is_ex=False)
|
||
hb_thread.join(timeout=120)
|
||
if exists(aiFilePath):
|
||
logger.info("开始删除AI视频, aiFilePath: {}, requestId: {}", aiFilePath, request_id)
|
||
os.remove(aiFilePath)
|
||
logger.info("删除AI视频成功, aiFilePath: {}, requestId: {}", aiFilePath, request_id)
|
||
# 如果有异常, 检查是否有原视频和AI视频,有则上传,响应失败
|
||
if ex:
|
||
code, msg = ex
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
|
||
analyse_type,
|
||
error_code=code,
|
||
error_msg=msg,
|
||
ai_video_url=ai_url), timeout=10, is_ex=False)
|
||
else:
|
||
if ai_url is None or len(ai_url) == 0:
|
||
raise ServiceException(ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[0],
|
||
ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[1])
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.SUCCESS.value,
|
||
analyse_type,
|
||
progress=success_progess,
|
||
ai_video_url=ai_url), timeout=10, is_ex=False)
|
||
|
||
except ServiceException as s:
|
||
logger.exception("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, request_id)
|
||
exc = s.code, s.msg
|
||
except Exception:
|
||
logger.error("服务异常: {}, requestId: {},", format_exc(), request_id)
|
||
exc = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
|
||
finally:
|
||
if push_process and push_process.is_alive():
|
||
put_queue(push_queue, (2, 'stop_ex'), timeout=1)
|
||
push_process.join(timeout=120)
|
||
if pull_process and pull_process.is_alive():
|
||
pull_process.sendCommand({"command": 'stop_ex'})
|
||
pull_process.sendCommand({"command": 'stop'})
|
||
pull_process.join(timeout=120)
|
||
if hb_thread and hb_thread.is_alive():
|
||
put_queue(hb_queue, {"command": "stop"}, timeout=10, is_ex=False)
|
||
hb_thread.join(timeout=120)
|
||
if exc:
|
||
code, msg = exc
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
|
||
analyse_type,
|
||
error_code=code,
|
||
error_msg=msg,
|
||
ai_video_url=ai_url), timeout=10, is_ex=False)
|
||
self.clear_queue()
|
||
|
||
|
||
'''
|
||
图片识别
|
||
'''
|
||
|
||
|
||
class PhotosIntelligentRecognitionProcess2(Process):
|
||
__slots__ = ("_fb_queue", "_msg", "_analyse_type", "_context", "_image_queue")
|
||
|
||
def __init__(self, *args):
|
||
super().__init__()
|
||
self._fb_queue, self._msg, self._analyse_type, self._context = args
|
||
self._image_queue = Queue()
|
||
put_queue(self._fb_queue, message_feedback(self._msg["request_id"],
|
||
AnalysisStatus.WAITING.value,
|
||
self._analyse_type,
|
||
progress=init_progess), timeout=1, is_ex=True)
|
||
|
||
self.build_logo(self._msg, self._context)
|
||
|
||
@staticmethod
|
||
def build_logo(msg, context):
|
||
logo = None
|
||
if context["video"]["video_add_water"]:
|
||
logo = msg.get("logo_url")
|
||
if logo is not None and len(logo) > 0:
|
||
logo = url2Array(logo, enable_ex=False)
|
||
if logo is None:
|
||
logo = cv2.imread(join(context['base_dir'], "image/logo.png"), -1)
|
||
context['logo'] = logo
|
||
|
||
def epidemic_prevention(self, imageUrl, model, orc, request_id):
|
||
try:
|
||
# modeType, allowedList, new_device, model, par, img_type
|
||
model_conf, code = model
|
||
modeType, model_param, allowedList = model_conf
|
||
img_type = model_param["img_type"]
|
||
image = url2Array(imageUrl)
|
||
param = [model_param, image, request_id]
|
||
dataBack = MODEL_CONFIG2[code][3](param)
|
||
if img_type == 'plate':
|
||
carCode = ''
|
||
if dataBack is None or dataBack.get("plateImage") is None or len(dataBack.get("plateImage")) == 0:
|
||
result = orc.license_plate_recognition(image, request_id)
|
||
score = ''
|
||
if result is None or result.get("words_result") is None or len(result.get("words_result")) == 0:
|
||
logger.error("车牌识别为空: {}", result)
|
||
carCode = ''
|
||
else:
|
||
for word in result.get("words_result"):
|
||
if word is not None and word.get("number") is not None:
|
||
if len(carCode) == 0:
|
||
carCode = word.get("number")
|
||
else:
|
||
carCode = carCode + "," + word.get("number")
|
||
else:
|
||
result = orc.license_plate_recognition(dataBack.get("plateImage")[0], request_id)
|
||
score = dataBack.get("plateImage")[1]
|
||
if result is None or result.get("words_result") is None or len(result.get("words_result")) == 0:
|
||
result = orc.license_plate_recognition(image, request_id)
|
||
if result is None or result.get("words_result") is None or len(result.get("words_result")) == 0:
|
||
logger.error("车牌识别为空: {}", result)
|
||
carCode = ''
|
||
else:
|
||
for word in result.get("words_result"):
|
||
if word is not None and word.get("number") is not None:
|
||
if len(carCode) == 0:
|
||
carCode = word.get("number")
|
||
else:
|
||
carCode = carCode + "," + word.get("number")
|
||
else:
|
||
for word in result.get("words_result"):
|
||
if word is not None and word.get("number") is not None:
|
||
if len(carCode) == 0:
|
||
carCode = word.get("number")
|
||
else:
|
||
carCode = carCode + "," + word.get("number")
|
||
if len(carCode) > 0:
|
||
plate_result = {'type': str(3), 'modelCode': code, 'carUrl': imageUrl,
|
||
'carCode': carCode,
|
||
'score': score}
|
||
put_queue(self._fb_queue, message_feedback(request_id,
|
||
AnalysisStatus.RUNNING.value,
|
||
AnalysisType.IMAGE.value, "", "",
|
||
'',
|
||
imageUrl,
|
||
imageUrl,
|
||
str(code),
|
||
str(3),
|
||
plate_result),
|
||
timeout=1)
|
||
if img_type == 'code':
|
||
if dataBack is None or dataBack.get("type") is None:
|
||
return
|
||
# 行程码
|
||
if dataBack.get("type") == 1 and 1 in allowedList:
|
||
# 手机号
|
||
if dataBack.get("phoneNumberImage") is None or len(dataBack.get("phoneNumberImage")) == 0:
|
||
phoneNumberRecognition = ''
|
||
phone_score = ''
|
||
else:
|
||
phone = orc.universal_text_recognition(dataBack.get("phoneNumberImage")[0], request_id)
|
||
phone_score = dataBack.get("phoneNumberImage")[1]
|
||
if phone is None or phone.get("words_result") is None or len(phone.get("words_result")) == 0:
|
||
logger.error("手机号识别为空: {}", phone)
|
||
phoneNumberRecognition = ''
|
||
else:
|
||
phoneNumberRecognition = phone.get("words_result")
|
||
if dataBack.get("cityImage") is None or len(dataBack.get("cityImage")) == 0:
|
||
cityRecognition = ''
|
||
city_score = ''
|
||
else:
|
||
city = orc.universal_text_recognition(dataBack.get("cityImage")[0], request_id)
|
||
city_score = dataBack.get("cityImage")[1]
|
||
if city is None or city.get("words_result") is None or len(city.get("words_result")) == 0:
|
||
logger.error("城市识别为空: {}", city)
|
||
cityRecognition = ''
|
||
else:
|
||
cityRecognition = city.get("words_result")
|
||
if len(phoneNumberRecognition) > 0 or len(cityRecognition) > 0:
|
||
trip_result = {'type': str(1),
|
||
'modelCode': code,
|
||
'imageUrl': imageUrl,
|
||
'phoneNumberRecognition': phoneNumberRecognition,
|
||
'phone_sorce': phone_score,
|
||
'cityRecognition': cityRecognition,
|
||
'city_score': city_score}
|
||
put_queue(self._fb_queue, message_feedback(request_id,
|
||
AnalysisStatus.RUNNING.value,
|
||
AnalysisType.IMAGE.value, "", "",
|
||
'',
|
||
imageUrl,
|
||
imageUrl,
|
||
str(code),
|
||
str(1),
|
||
trip_result),
|
||
timeout=1)
|
||
if dataBack.get("type") == 2 and 2 in allowedList:
|
||
if dataBack.get("nameImage") is None or len(dataBack.get("nameImage")) == 0:
|
||
nameRecognition = ''
|
||
name_score = ''
|
||
else:
|
||
name = orc.universal_text_recognition(dataBack.get("nameImage")[0], request_id)
|
||
name_score = dataBack.get("nameImage")[1]
|
||
if name is None or name.get("words_result") is None or len(name.get("words_result")) == 0:
|
||
logger.error("名字识别为空: {}", name)
|
||
nameRecognition = ''
|
||
else:
|
||
nameRecognition = name.get("words_result")
|
||
|
||
if dataBack.get("phoneNumberImage") is None or len(dataBack.get("phoneNumberImage")) == 0:
|
||
phoneNumberRecognition = ''
|
||
phone_score = ''
|
||
else:
|
||
phone = orc.universal_text_recognition(dataBack.get("phoneNumberImage")[0], request_id)
|
||
phone_score = dataBack.get("phoneNumberImage")[1]
|
||
if phone is None or phone.get("words_result") is None or len(phone.get("words_result")) == 0:
|
||
logger.error("手机号识别为空: {}", phone)
|
||
phoneNumberRecognition = ''
|
||
else:
|
||
phoneNumberRecognition = phone.get("words_result")
|
||
if dataBack.get("hsImage") is None or len(dataBack.get("hsImage")) == 0:
|
||
hsRecognition = ''
|
||
hs_score = ''
|
||
else:
|
||
hs = orc.universal_text_recognition(dataBack.get("hsImage")[0], request_id)
|
||
hs_score = dataBack.get("hsImage")[1]
|
||
if hs is None or hs.get("words_result") is None or len(hs.get("words_result")) == 0:
|
||
logger.error("核酸识别为空: {}", hs)
|
||
hsRecognition = ''
|
||
else:
|
||
hsRecognition = hs.get("words_result")
|
||
if len(nameRecognition) > 0 or len(phoneNumberRecognition) > 0 or len(hsRecognition) > 0:
|
||
healthy_result = {'type': str(2),
|
||
'modelCode': code,
|
||
'imageUrl': imageUrl,
|
||
'color': dataBack.get("color"),
|
||
'nameRecognition': nameRecognition,
|
||
'name_score': name_score,
|
||
'phoneNumberRecognition': phoneNumberRecognition,
|
||
'phone_score': phone_score,
|
||
'hsRecognition': hsRecognition,
|
||
'hs_score': hs_score}
|
||
put_queue(self._fb_queue, message_feedback(request_id,
|
||
AnalysisStatus.RUNNING.value,
|
||
AnalysisType.IMAGE.value, "", "",
|
||
'',
|
||
imageUrl,
|
||
imageUrl,
|
||
str(code),
|
||
str(2),
|
||
healthy_result),
|
||
timeout=1)
|
||
except ServiceException as s:
|
||
raise s
|
||
except Exception as e:
|
||
logger.error("模型分析异常: {}, requestId: {}", format_exc(), request_id)
|
||
raise e
|
||
|
||
'''
|
||
# 防疫模型
|
||
'''
|
||
|
||
def epidemicPrevention(self, imageUrls, model, base_dir, env, request_id):
|
||
with ThreadPoolExecutor(max_workers=2) as t:
|
||
orc = OcrBaiduSdk(base_dir, env)
|
||
obj_list = []
|
||
for imageUrl in imageUrls:
|
||
obj = t.submit(self.epidemic_prevention, imageUrl, model, orc, request_id)
|
||
obj_list.append(obj)
|
||
for r in obj_list:
|
||
r.result(60)
|
||
|
||
def image_recognition(self, imageUrl, mod, image_queue, logo, request_id):
|
||
try:
|
||
model_conf, code = mod
|
||
model_param = model_conf[1]
|
||
image = url2Array(imageUrl)
|
||
MODEL_CONFIG2[code][2](image.shape[1], image.shape[0], model_conf)
|
||
p_result = MODEL_CONFIG2[code][3]([[image], [0], model_param, request_id])[0]
|
||
if p_result is None or len(p_result) < 3 or p_result[2] is None or len(p_result[2]) == 0:
|
||
return
|
||
if logo:
|
||
image = add_water_pic(image, logo, request_id)
|
||
# (modeType, model_param, allowedList, names, rainbows)
|
||
allowedList = model_conf[2]
|
||
label_arraylist = model_param['label_arraylist']
|
||
font_config = model_param['font_config']
|
||
rainbows = model_conf[4]
|
||
det_xywh = {code: {}}
|
||
ai_result_list = p_result[2]
|
||
for ai_result in ai_result_list:
|
||
box, score, cls = xywh2xyxy2(ai_result)
|
||
# 如果检测目标在识别任务中,继续处理
|
||
if cls in allowedList:
|
||
label_array = label_arraylist[cls]
|
||
color = rainbows[cls]
|
||
cd = det_xywh[code].get(cls)
|
||
if cd is None:
|
||
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
|
||
else:
|
||
det_xywh[code][cls].append([cls, box, score, label_array, color])
|
||
logger.info(" line813 ai_result_list{}, det_xywh: {}, allowedList:{} ".format( ai_result_list, det_xywh,allowedList) )
|
||
if len(det_xywh) > 0:
|
||
put_queue(image_queue, (1, (det_xywh, imageUrl, image, font_config, "")))
|
||
except ServiceException as s:
|
||
raise s
|
||
except Exception as e:
|
||
logger.error("模型分析异常: {}, requestId: {}", format_exc(), self._msg.get("request_id"))
|
||
raise e
|
||
|
||
def publicIdentification(self, imageUrls, mod, image_queue, logo, request_id):
|
||
with ThreadPoolExecutor(max_workers=2) as t:
|
||
obj_list = []
|
||
logger.info(' publicIdentification line823')
|
||
for imageUrl in imageUrls:
|
||
obj = t.submit(self.image_recognition, imageUrl, mod, image_queue, logo, request_id)
|
||
obj_list.append(obj)
|
||
for r in obj_list:
|
||
r.result(60)
|
||
|
||
'''
|
||
1. imageUrls: 图片url数组,多张图片
|
||
2. mod: 模型对象
|
||
3. image_queue: 图片队列
|
||
'''
|
||
|
||
def baiduRecognition(self, imageUrls, mod, image_queue, logo, request_id):
|
||
with ThreadPoolExecutor(max_workers=2) as t:
|
||
thread_result = []
|
||
for imageUrl in imageUrls:
|
||
obj = t.submit(self.baidu_recognition, imageUrl, mod, image_queue, logo, request_id)
|
||
thread_result.append(obj)
|
||
for r in thread_result:
|
||
r.result()
|
||
|
||
def baidu_recognition(self, imageUrl, mod, image_queue, logo, request_id):
|
||
with ThreadPoolExecutor(max_workers=2) as t:
|
||
try:
|
||
# (modeType, model_param, allowedList, (vehicle_names, person_names), rainbows)
|
||
model_conf, code = mod
|
||
model_param = model_conf[1]
|
||
allowedList = model_conf[2]
|
||
rainbows = model_conf[4]
|
||
# 图片转数组
|
||
img = url2Array(imageUrl)
|
||
MODEL_CONFIG2[code][2](img.shape[1], img.shape[0], model_conf)
|
||
vehicle_label_arrays = model_param["vehicle_label_arrays"]
|
||
person_label_arrays = model_param["person_label_arrays"]
|
||
font_config = model_param["font_config"]
|
||
obj_list = []
|
||
for target in allowedList:
|
||
parm = [model_param, target, imageUrl, request_id]
|
||
reuslt = t.submit(self.baidu_method, code, parm, img, image_queue, vehicle_label_arrays,
|
||
person_label_arrays, font_config, rainbows, logo)
|
||
obj_list.append(reuslt)
|
||
for r in obj_list:
|
||
r.result()
|
||
except ServiceException as s:
|
||
raise s
|
||
except Exception as e:
|
||
logger.error("百度AI分析异常: {}, requestId: {}", format_exc(), request_id)
|
||
raise e
|
||
|
||
@staticmethod
|
||
def baidu_method(code, parm, img, image_queue, vehicle_label_arrays, person_label_arrays, font_config,
|
||
rainbows, logo):
|
||
# [model_param, target, imageUrl, request_id]]
|
||
request_id = parm[3]
|
||
target = parm[1]
|
||
image_url = parm[2]
|
||
result = MODEL_CONFIG2[code][3](parm)
|
||
if target == BaiduModelTarget.VEHICLE_DETECTION.value[1] and result is not None:
|
||
vehicleInfo = result.get("vehicle_info")
|
||
if vehicleInfo is not None and len(vehicleInfo) > 0:
|
||
det_xywh = {code: {}}
|
||
copy_frame = img.copy()
|
||
for i, info in enumerate(vehicleInfo):
|
||
value = VehicleEnumVALUE.get(info.get("type"))
|
||
target_num = value.value[2]
|
||
label_array = vehicle_label_arrays[target_num]
|
||
color = rainbows[target_num]
|
||
if value is None:
|
||
logger.error("车辆识别出现未支持的目标类型!type:{}, requestId:{}", info.get("type"), request_id)
|
||
return
|
||
left_top = (int(info.get("location").get("left")), int(info.get("location").get("top")))
|
||
right_top = (int(info.get("location").get("left")) + int(info.get("location").get("width")),
|
||
int(info.get("location").get("top")))
|
||
right_bottom = (int(info.get("location").get("left")) + int(info.get("location").get("width")),
|
||
int(info.get("location").get("top")) + int(info.get("location").get("height")))
|
||
left_bottom = (int(info.get("location").get("left")),
|
||
int(info.get("location").get("top")) + int(info.get("location").get("height")))
|
||
box = [left_top, right_top, right_bottom, left_bottom]
|
||
score = float("%.2f" % info.get("probability"))
|
||
if logo:
|
||
copy_frame = add_water_pic(copy_frame, logo, request_id)
|
||
if det_xywh[code].get(target) is None:
|
||
det_xywh[code][target] = [[target, box, score, label_array, color]]
|
||
else:
|
||
det_xywh[code][target].append([target, box, score, label_array, color])
|
||
info["id"] = str(i)
|
||
if len(det_xywh[code]) > 0:
|
||
result["type"] = str(target)
|
||
result["modelCode"] = code
|
||
put_queue(image_queue, (1, (det_xywh, image_url, copy_frame, font_config, result)))
|
||
# 人体识别
|
||
if target == BaiduModelTarget.HUMAN_DETECTION.value[1] and result is not None:
|
||
personInfo = result.get("person_info")
|
||
personNum = result.get("person_num")
|
||
if personNum is not None and personNum > 0 and personInfo is not None and len(personInfo) > 0:
|
||
det_xywh = {code: {}}
|
||
copy_frame = img.copy()
|
||
for i, info in enumerate(personInfo):
|
||
left_top = (int(info.get("location").get("left")), int(info.get("location").get("top")))
|
||
right_top = (int(info.get("location").get("left")) + int(info.get("location").get("width")),
|
||
int(info.get("location").get("top")))
|
||
right_bottom = (int(info.get("location").get("left")) + int(info.get("location").get("width")),
|
||
int(info.get("location").get("top")) + int(info.get("location").get("height")))
|
||
left_bottom = (int(info.get("location").get("left")),
|
||
int(info.get("location").get("top")) + int(info.get("location").get("height")))
|
||
box = [left_top, right_top, right_bottom, left_bottom]
|
||
score = float("%.2f" % info.get("location").get("score"))
|
||
label_array = person_label_arrays[0]
|
||
color = rainbows[0]
|
||
if logo:
|
||
copy_frame = add_water_pic(copy_frame, logo, request_id)
|
||
if det_xywh[code].get(target) is None:
|
||
det_xywh[code][target] = [[target, box, score, label_array, color]]
|
||
else:
|
||
det_xywh[code][target].append([target, box, score, label_array, color])
|
||
info["id"] = str(i)
|
||
if len(det_xywh[code]) > 0:
|
||
result["type"] = str(target)
|
||
result["modelCode"] = code
|
||
put_queue(image_queue, (1, (det_xywh, image_url, copy_frame, font_config, result)))
|
||
# 人流量
|
||
if target == BaiduModelTarget.PEOPLE_COUNTING.value[1] and result is not None:
|
||
base64Image = result.get("image")
|
||
if base64Image is not None and len(base64Image) > 0:
|
||
baiduImage = base64.b64decode(base64Image)
|
||
result["type"] = str(target)
|
||
result["modelCode"] = code
|
||
del result["image"]
|
||
put_queue(image_queue, (1, (None, image_url, baiduImage, None, result)))
|
||
|
||
@staticmethod
|
||
def start_File_upload(*args):
|
||
fb_queue, context, msg, image_queue, analyse_type = args
|
||
image_thread = ImageTypeImageFileUpload(fb_queue, context, msg, image_queue, analyse_type)
|
||
image_thread.setDaemon(True)
|
||
image_thread.start()
|
||
return image_thread
|
||
|
||
def run(self):
|
||
fb_queue, msg, analyse_type, context = self._fb_queue, self._msg, self._analyse_type, self._context
|
||
request_id, logo, image_queue = msg["request_id"], context['logo'], self._image_queue
|
||
base_dir, env = context["base_dir"], context["env"]
|
||
imageUrls = msg["image_urls"]
|
||
image_thread = None
|
||
with ThreadPoolExecutor(max_workers=3) as t:
|
||
try:
|
||
init_log(base_dir, env)
|
||
logger.info("开始启动图片识别进程, requestId: {}", request_id)
|
||
model_array = get_model(msg, context, analyse_type)
|
||
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type)
|
||
task_list = []
|
||
for model in model_array:
|
||
# 百度模型逻辑
|
||
if model[1] == ModelType2.BAIDU_MODEL.value[1]:
|
||
result = t.submit(self.baiduRecognition, imageUrls, model, image_queue, logo, request_id)
|
||
task_list.append(result)
|
||
# 防疫模型
|
||
elif model[1] == ModelType2.EPIDEMIC_PREVENTION_MODEL.value[1]:
|
||
result = t.submit(self.epidemicPrevention, imageUrls, model, base_dir, env, request_id)
|
||
task_list.append(result)
|
||
# 车牌模型
|
||
elif model[1] == ModelType2.PLATE_MODEL.value[1]:
|
||
result = t.submit(self.epidemicPrevention, imageUrls, model, base_dir, env, request_id)
|
||
task_list.append(result)
|
||
else:
|
||
result = t.submit(self.publicIdentification, imageUrls, model, image_queue, logo, request_id)
|
||
task_list.append(result)
|
||
for r in task_list:
|
||
r.result(timeout=60)
|
||
logger.info(' line993: result:-------- ' )
|
||
if image_thread and not image_thread.is_alive():
|
||
raise Exception("图片识别图片上传线程异常停止!!!")
|
||
if image_thread and image_thread.is_alive():
|
||
put_queue(image_queue, (2, 'stop'), timeout=10, is_ex=True)
|
||
image_thread.join(120)
|
||
logger.info("图片进程任务完成,requestId:{}", request_id)
|
||
put_queue(fb_queue, message_feedback(request_id,
|
||
AnalysisStatus.SUCCESS.value,
|
||
analyse_type,
|
||
progress=success_progess), timeout=10, is_ex=True)
|
||
except ServiceException as s:
|
||
logger.error("图片分析异常,异常编号:{}, 异常描述:{}, requestId:{}", s.code, s.msg, request_id)
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
|
||
analyse_type,
|
||
s.code,
|
||
s.msg))
|
||
except Exception:
|
||
logger.error("图片分析异常: {}, requestId:{}", format_exc(), request_id)
|
||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
|
||
analyse_type,
|
||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
|
||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]))
|
||
finally:
|
||
if image_thread and image_thread.is_alive():
|
||
put_queue(image_queue, (2, 'stop'), timeout=10, is_ex=True)
|
||
image_thread.join(120)
|
||
clear_queue(image_queue)
|
||
|
||
|
||
"""
|
||
"models": [{
|
||
"code": "模型编号",
|
||
"categories":[{
|
||
"id": "模型id",
|
||
"config": {
|
||
"k1": "v1",
|
||
"k2": "v2"
|
||
}
|
||
}]
|
||
}]
|
||
"""
|
||
|
||
|
||
def get_model(msg, context, analyse_type):
|
||
# 初始变量
|
||
request_id, base_dir, gpu_name, env = msg["request_id"], context["base_dir"], context["gpu_name"], context["env"]
|
||
models, model_num_limit = msg["models"], context["service"]["model"]['limit']
|
||
try:
|
||
# 实时、离线元组
|
||
analyse_type_tuple = (AnalysisType.ONLINE.value, AnalysisType.OFFLINE.value)
|
||
# (实时、离线)检查模型组合, 目前只支持3个模型组合
|
||
if analyse_type in analyse_type_tuple:
|
||
if len(models) > model_num_limit:
|
||
raise ServiceException(ExceptionType.MODEL_GROUP_LIMIT_EXCEPTION.value[0],
|
||
ExceptionType.MODEL_GROUP_LIMIT_EXCEPTION.value[1])
|
||
modelArray, codeArray = [], set()
|
||
for model in models:
|
||
# 模型编码
|
||
code = model["code"]
|
||
# 检验code是否重复
|
||
if code in codeArray:
|
||
raise ServiceException(ExceptionType.MODEL_DUPLICATE_EXCEPTION.value[0],
|
||
ExceptionType.MODEL_DUPLICATE_EXCEPTION.value[1])
|
||
codeArray.add(code)
|
||
# 检测目标数组
|
||
needed_objectsIndex = list(set([int(category["id"]) for category in model["categories"]]))
|
||
logger.info("模型编号: {}, 检查目标: {}, requestId: {}", code, needed_objectsIndex, request_id)
|
||
model_method = MODEL_CONFIG2.get(code)
|
||
if model_method is None:
|
||
logger.error("未匹配到对应的模型, requestId:{}", request_id)
|
||
raise ServiceException(ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[0],
|
||
ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[1])
|
||
# 检查cpu资源、gpu资源
|
||
check_cpu(base_dir, request_id)
|
||
gpu_ids = check_gpu_resource(request_id)
|
||
# 如果实时识别、离线识别
|
||
if analyse_type in analyse_type_tuple:
|
||
if model["is_video"] == "1":
|
||
mod = model_method[0](gpu_ids[0], needed_objectsIndex, request_id, gpu_name, base_dir, env)
|
||
modelArray.append((mod.model_conf, code))
|
||
else:
|
||
raise ServiceException(ExceptionType.MODEL_NOT_SUPPORT_VIDEO_EXCEPTION.value[0],
|
||
ExceptionType.MODEL_NOT_SUPPORT_VIDEO_EXCEPTION.value[1],
|
||
model_method[1].value[2])
|
||
# 如果是图片识别
|
||
if analyse_type == AnalysisType.IMAGE.value:
|
||
if model["is_image"] == "1":
|
||
mod = model_method[0](gpu_ids[0], needed_objectsIndex, request_id, gpu_name, base_dir, env)
|
||
modelArray.append((mod.model_conf, code))
|
||
else:
|
||
raise ServiceException(ExceptionType.MODEL_NOT_SUPPORT_IMAGE_EXCEPTION.value[0],
|
||
ExceptionType.MODEL_NOT_SUPPORT_IMAGE_EXCEPTION.value[1],
|
||
model_method[1].value[2])
|
||
if len(modelArray) == 0:
|
||
raise ServiceException(ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[0],
|
||
ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[1])
|
||
return modelArray
|
||
except ServiceException as s:
|
||
raise s
|
||
except Exception:
|
||
logger.error("模型配置处理异常: {}, request_id: {}", format_exc(), request_id)
|
||
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0],
|
||
ExceptionType.MODEL_LOADING_EXCEPTION.value[1])
|