No puede seleccionar más de 25 temas Los temas deben comenzar con una letra o número, pueden incluir guiones ('-') y pueden tener hasta 35 caracteres de largo.

1347 líneas
76KB

  1. # -*- coding: utf-8 -*-
  2. import base64
  3. import os
  4. from concurrent.futures import ThreadPoolExecutor
  5. from os.path import join, exists, getsize
  6. from time import time, sleep
  7. from traceback import format_exc
  8. import cv2
  9. from multiprocessing import Process, Queue
  10. from loguru import logger
  11. from common.Constant import init_progess, success_progess
  12. from concurrency.FileUploadThread import ImageTypeImageFileUpload
  13. from concurrency.HeartbeatThread import Heartbeat
  14. from concurrency.PullVideoStreamProcess import OnlinePullVideoStreamProcess, OfflinePullVideoStreamProcess
  15. from concurrency.PushVideoStreamProcess import OnPushStreamProcess, OffPushStreamProcess
  16. from util.GPUtils import check_gpu_resource
  17. from util.LogUtils import init_log
  18. from concurrency.CommonThread import Common
  19. from concurrency.PullStreamThread import RecordingPullStreamThread
  20. from concurrency.RecordingHeartbeatThread import RecordingHeartbeat
  21. from enums.AnalysisStatusEnum import AnalysisStatus
  22. from enums.AnalysisTypeEnum import AnalysisType
  23. from enums.ExceptionEnum import ExceptionType
  24. from enums.ModelTypeEnum import ModelType
  25. from enums.RecordingStatusEnum import RecordingStatus
  26. from util.AliyunSdk import ThAliyunVodSdk
  27. from util.CpuUtils import check_cpu
  28. from util.Cv2Utils import write_or_video, push_video_stream, close_all_p
  29. from entity.FeedBack import message_feedback, recording_feedback
  30. from exception.CustomerException import ServiceException
  31. from util.ImageUtils import url2Array, add_water_pic
  32. from util.ModelUtils import MODEL_CONFIG
  33. from util.OcrBaiduSdk import OcrBaiduSdk
  34. from enums.BaiduSdkEnum import VehicleEnumVALUE
  35. from enums.ModelTypeEnum import BaiduModelTarget
  36. from util.PlotsUtils import xywh2xyxy2
  37. from util.QueUtil import put_queue, get_no_block_queue, clear_queue
  38. from util.TimeUtils import now_date_to_str, YMDHMSF
  39. class IntelligentRecognitionProcess(Process):
  40. __slots__ = ('_fb_queue', '_msg', '_analyse_type', '_context', 'event_queue', '_pull_queue', '_hb_queue',
  41. "_image_queue", "_push_queue", '_push_ex_queue')
  42. def __init__(self, *args):
  43. super().__init__()
  44. # 入参
  45. self._fb_queue, self._msg, self._analyse_type, self._context = args
  46. # 初始化参数
  47. self.event_queue, self._pull_queue, self._hb_queue, self._image_queue, self._push_queue, self._push_ex_queue = \
  48. Queue(), Queue(10), Queue(), Queue(), Queue(), Queue()
  49. # 发送waitting消息
  50. put_queue(self._fb_queue, message_feedback(self._msg["request_id"], AnalysisStatus.WAITING.value,
  51. self._analyse_type, progress=init_progess), timeout=2, is_ex=True)
  52. def sendEvent(self, eBody):
  53. put_queue(self.event_queue, eBody, timeout=2, is_ex=True)
  54. def clear_queue(self):
  55. clear_queue(self.event_queue)
  56. clear_queue(self._pull_queue)
  57. clear_queue(self._hb_queue)
  58. clear_queue(self._image_queue)
  59. clear_queue(self._push_queue)
  60. clear_queue(self._push_ex_queue)
  61. @staticmethod
  62. def build_video_path(context, msg, is_build_or=True):
  63. random_time = now_date_to_str(YMDHMSF)
  64. pre_path = '%s/%s%s' % (context["base_dir"], context["video"]["file_path"], random_time)
  65. end_path = '%s%s' % (msg["request_id"], ".mp4")
  66. if is_build_or:
  67. context["orFilePath"] = '%s%s%s' % (pre_path, "_on_or_", end_path)
  68. context["aiFilePath"] = '%s%s%s' % (pre_path, "_on_ai_", end_path)
  69. @staticmethod
  70. def start_heartbeat(fb_queue, hb_queue, request_id, analyse_type, context):
  71. hb_thread = Heartbeat(fb_queue, hb_queue, request_id, analyse_type, context)
  72. hb_thread.setDaemon(True)
  73. hb_thread.start()
  74. return hb_thread
  75. class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
  76. __slots__ = ()
  77. @staticmethod
  78. def start_push_stream(msg, push_queue, image_queue, push_ex_queue, hb_queue, context):
  79. pushProcess = OnPushStreamProcess(msg, push_queue, image_queue, push_ex_queue, hb_queue, context)
  80. pushProcess.daemon = True
  81. pushProcess.start()
  82. return pushProcess
  83. @staticmethod
  84. def start_pull_stream(msg, context, fb_queue, pull_queue, image_queue, analyse_type, frame_num):
  85. pullProcess = OnlinePullVideoStreamProcess(msg, context, fb_queue, pull_queue, image_queue, analyse_type,
  86. frame_num)
  87. pullProcess.daemon = True
  88. pullProcess.start()
  89. return pullProcess
  90. @staticmethod
  91. def upload_video(base_dir, env, request_id, orFilePath, aiFilePath):
  92. aliyunVodSdk = ThAliyunVodSdk(base_dir, env, request_id)
  93. upload_video_thread_or = Common(aliyunVodSdk.get_play_url, orFilePath, "or_online_%s" % request_id)
  94. upload_video_thread_ai = Common(aliyunVodSdk.get_play_url, aiFilePath, "ai_online_%s" % request_id)
  95. upload_video_thread_or.setDaemon(True)
  96. upload_video_thread_ai.setDaemon(True)
  97. upload_video_thread_or.start()
  98. upload_video_thread_ai.start()
  99. or_url = upload_video_thread_or.get_result()
  100. ai_url = upload_video_thread_ai.get_result()
  101. return or_url, ai_url
  102. @staticmethod
  103. def ai_normal_dtection(model, frame, request_id):
  104. model_conf, code = model
  105. retResults = MODEL_CONFIG[code][3]([model_conf, frame, request_id])[0]
  106. return code, retResults[2]
  107. @staticmethod
  108. def obj_det(self, model_array, frame, task_status, cframe, tt, request_id):
  109. push_obj = []
  110. if task_status[1] == 1:
  111. dtection_result = []
  112. for model in model_array:
  113. result = tt.submit(self.ai_normal_dtection, model, frame, request_id)
  114. dtection_result.append(result)
  115. for d in dtection_result:
  116. code, det_r = d.result()
  117. if len(det_r) > 0:
  118. push_obj.append((code, det_r))
  119. if len(push_obj) == 0:
  120. task_status[1] = 0
  121. if task_status[1] == 0:
  122. if cframe % 30 == 0:
  123. dtection_result1 = []
  124. for model in model_array:
  125. result = tt.submit(self.ai_normal_dtection, model, frame, request_id)
  126. dtection_result1.append(result)
  127. for d in dtection_result1:
  128. code, det_r = d.result()
  129. if len(det_r) > 0:
  130. push_obj.append((code, det_r))
  131. if len(push_obj) > 0:
  132. task_status[1] = 1
  133. return push_obj
  134. @staticmethod
  135. def checkPT(start_time, service_timeout, pull_process, push_process, hb_thread, push_ex_queue, pull_queue,
  136. request_id):
  137. if time() - start_time > service_timeout:
  138. logger.error("任务执行超时, requestId: {}", request_id)
  139. raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
  140. ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
  141. if pull_process is not None and not pull_process.is_alive():
  142. while True:
  143. if pull_queue.empty() or pull_queue.qsize() == 0:
  144. break
  145. pull_result = get_no_block_queue(pull_queue)
  146. if pull_result is not None and pull_result[0] == 1:
  147. raise ServiceException(pull_result[1], pull_result[2])
  148. logger.info("拉流进程异常停止, requestId: {}", request_id)
  149. raise Exception("拉流进程异常停止!")
  150. if hb_thread is not None and not hb_thread.is_alive():
  151. logger.info("心跳线程异常停止, requestId: {}", request_id)
  152. raise Exception("心跳线程异常停止!")
  153. if push_process is not None and not push_process.is_alive():
  154. while True:
  155. if push_ex_queue.empty() or push_ex_queue.qsize() == 0:
  156. break
  157. push_result = get_no_block_queue(push_ex_queue)
  158. if push_result is not None and push_result[0] == 1:
  159. raise ServiceException(push_result[1], push_result[2])
  160. logger.info("推流进程异常停止, requestId: {}", request_id)
  161. raise Exception("推流进程异常停止!")
  162. def run(self):
  163. msg, context, analyse_type = self._msg, self._context, self._analyse_type
  164. self.build_video_path(context, msg)
  165. request_id = msg["request_id"]
  166. base_dir, env = context["base_dir"], context["env"]
  167. service_timeout = int(context["service"]["timeout"])
  168. ex = None
  169. # 拉流进程、推流进程、心跳线程
  170. pull_process, push_process, hb_thread = None, None, None
  171. # 事件队列、拉流队列、心跳队列、反馈队列
  172. event_queue, pull_queue, hb_queue, fb_queue = self.event_queue, self._pull_queue, self._hb_queue, self._fb_queue
  173. # 推流队列、推流异常队列、图片队列
  174. push_queue, push_ex_queue, image_queue = self._push_queue, self._push_ex_queue, self._image_queue
  175. try:
  176. # 初始化日志
  177. init_log(base_dir, env)
  178. # 打印启动日志
  179. logger.info("开始启动实时分析进程!requestId: {}", request_id)
  180. # 启动拉流进程(包含拉流线程, 图片上传线程)
  181. # 拉流进程初始化时间长, 先启动
  182. pull_process = self.start_pull_stream(msg, context, fb_queue, pull_queue, image_queue, analyse_type, 25)
  183. # 启动心跳线程
  184. hb_thread = self.start_heartbeat(fb_queue, hb_queue, request_id, analyse_type, context)
  185. # 加载算法模型
  186. model_array = get_model(msg, context, analyse_type)
  187. # 启动推流进程
  188. push_process = self.start_push_stream(msg, push_queue, image_queue, push_ex_queue, hb_queue, context)
  189. # 第一个参数: 模型是否初始化 0:未初始化 1:初始化
  190. # 第二个参数: 检测是否有问题 0: 没有问题, 1: 有问题
  191. task_status = [0, 0]
  192. draw_config = {}
  193. start_time = time()
  194. # 识别2个线程性能最优
  195. with ThreadPoolExecutor(max_workers=2) as t:
  196. # 可能使用模型组合, 模型组合最多3个模型, 1对3, 上面的2个线程对应6个线程
  197. with ThreadPoolExecutor(max_workers=6) as tt:
  198. while True:
  199. # 检查拉流进程是否正常, 心跳线程是否正常
  200. self.checkPT(start_time, service_timeout, pull_process, push_process, hb_thread, push_ex_queue,
  201. pull_queue, request_id)
  202. # 检查推流是否异常
  203. push_status = get_no_block_queue(push_ex_queue)
  204. if push_status is not None and push_status[0] == 1:
  205. raise ServiceException(push_status[1], push_status[2])
  206. # 获取停止指令
  207. event_result = get_no_block_queue(event_queue)
  208. if event_result:
  209. cmdStr = event_result.get("command")
  210. # 接收到停止指令
  211. if "stop" == cmdStr:
  212. logger.info("实时任务开始停止, requestId: {}", request_id)
  213. pull_process.sendCommand({"command": 'stop'})
  214. pull_result = get_no_block_queue(pull_queue)
  215. if pull_result is None:
  216. sleep(1)
  217. continue
  218. # (4, (frame_list, frame_index_list, all_frames))
  219. if pull_result[0] == 4:
  220. frame_list, frame_index_list, all_frames = pull_result[1]
  221. if len(frame_list) > 0:
  222. # 判断是否已经初始化
  223. if task_status[0] == 0:
  224. task_status[0] = 1
  225. for i, model in enumerate(model_array):
  226. model_conf, code = model
  227. model_param = model_conf[1]
  228. # (modeType, model_param, allowedList, names, rainbows)
  229. MODEL_CONFIG[code][2](frame_list[0].shape[1], frame_list[0].shape[0],
  230. model_conf)
  231. if draw_config.get("font_config") is None:
  232. draw_config["font_config"] = model_param['font_config']
  233. if draw_config.get(code) is None:
  234. draw_config[code] = {}
  235. draw_config[code]["allowedList"] = model_conf[2]
  236. draw_config[code]["rainbows"] = model_conf[4]
  237. draw_config[code]["label_arrays"] = model_param['label_arraylist']
  238. # 多线程并发处理, 经过测试两个线程最优
  239. det_array = []
  240. for i, frame in enumerate(frame_list):
  241. det_result = t.submit(self.obj_det, self, model_array, frame, task_status,
  242. frame_index_list[i], tt, request_id)
  243. det_array.append(det_result)
  244. push_objs = [det.result() for det in det_array]
  245. put_queue(push_queue,
  246. (1, (frame_list, frame_index_list, all_frames, draw_config, push_objs)),
  247. timeout=2, is_ex=True)
  248. del det_array, push_objs
  249. del frame_list, frame_index_list, all_frames
  250. elif pull_result[0] == 1:
  251. # 拉流发生异常
  252. put_queue(push_queue, (2, 'stop_ex'), timeout=1, is_ex=True)
  253. push_process.join(120)
  254. pull_process.sendCommand({"command": 'stop'})
  255. pull_process.join(120)
  256. raise ServiceException(pull_result[1], pull_result[2])
  257. elif pull_result[0] == 2:
  258. put_queue(push_queue, (2, 'stop'), timeout=1, is_ex=True)
  259. push_process.join(120)
  260. pull_process.sendCommand({"command": 'stop'})
  261. pull_process.join(120)
  262. break
  263. else:
  264. raise Exception("未知拉流状态异常!")
  265. except ServiceException as s:
  266. logger.exception("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, request_id)
  267. ex = s.code, s.msg
  268. except Exception:
  269. logger.error("服务异常: {}, requestId: {},", format_exc(), request_id)
  270. ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
  271. finally:
  272. orFilePath, aiFilePath = context["orFilePath"], context["aiFilePath"]
  273. base_dir, env = context["base_dir"], context["env"]
  274. or_url, ai_url, exc = "", "", None
  275. try:
  276. # 如果拉流进程存在, 关闭拉流进程(拉流线程、图片上传线程)
  277. if push_process and push_process.is_alive():
  278. put_queue(push_queue, (2, 'stop_ex'), timeout=1)
  279. logger.info("关闭推流进程, requestId:{}", request_id)
  280. push_process.join(timeout=120)
  281. logger.info("关闭推流进程1, requestId:{}", request_id)
  282. if pull_process and pull_process.is_alive():
  283. pull_process.sendCommand({"command": 'stop_ex'})
  284. pull_process.sendCommand({"command": 'stop'})
  285. logger.info("关闭拉流进程, requestId:{}", request_id)
  286. pull_process.join(timeout=120)
  287. logger.info("关闭拉流进程1, requestId:{}", request_id)
  288. if exists(orFilePath) and exists(aiFilePath) and getsize(orFilePath) > 100:
  289. or_url, ai_url = self.upload_video(base_dir, env, request_id, orFilePath, aiFilePath)
  290. if or_url is None or ai_url is None:
  291. logger.error("原视频或AI视频播放上传VOD失败!, requestId: {}", request_id)
  292. raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0],
  293. ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1])
  294. # 停止心跳线程
  295. if hb_thread and hb_thread.is_alive():
  296. put_queue(hb_queue, {"command": "stop"}, timeout=1)
  297. hb_thread.join(timeout=120)
  298. if exists(orFilePath):
  299. logger.info("开始删除原视频, orFilePath: {}, requestId: {}", orFilePath, request_id)
  300. os.remove(orFilePath)
  301. logger.info("删除原视频成功, orFilePath: {}, requestId: {}", orFilePath, request_id)
  302. if exists(aiFilePath):
  303. logger.info("开始删除AI视频, aiFilePath: {}, requestId: {}", aiFilePath, request_id)
  304. os.remove(aiFilePath)
  305. logger.info("删除AI视频成功, aiFilePath: {}, requestId: {}", aiFilePath, request_id)
  306. # 如果有异常, 检查是否有原视频和AI视频,有则上传,响应失败
  307. if ex:
  308. code, msg = ex
  309. put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
  310. analyse_type,
  311. error_code=code,
  312. error_msg=msg,
  313. video_url=or_url,
  314. ai_video_url=ai_url), timeout=2, is_ex=False)
  315. else:
  316. if or_url is None or len(or_url) == 0 or ai_url is None or len(ai_url) == 0:
  317. raise ServiceException(ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[0],
  318. ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[1])
  319. put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.SUCCESS.value,
  320. analyse_type,
  321. progress=success_progess,
  322. video_url=or_url,
  323. ai_video_url=ai_url), timeout=2, is_ex=False)
  324. except ServiceException as s:
  325. logger.exception("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, request_id)
  326. exc = s.code, s.msg
  327. except Exception:
  328. logger.error("服务异常: {}, requestId: {},", format_exc(), request_id)
  329. exc = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
  330. finally:
  331. if push_process and push_process.is_alive():
  332. put_queue(push_queue, (2, 'stop_ex'), timeout=1)
  333. logger.info("关闭推流进程, requestId:{}", request_id)
  334. push_process.join(timeout=120)
  335. logger.info("关闭推流进程1, requestId:{}", request_id)
  336. if pull_process and pull_process.is_alive():
  337. pull_process.sendCommand({"command": 'stop_ex'})
  338. pull_process.sendCommand({"command": 'stop'})
  339. logger.info("关闭拉流进程, requestId:{}", request_id)
  340. pull_process.join(timeout=120)
  341. logger.info("关闭拉流进程1, requestId:{}", request_id)
  342. if exc:
  343. code, msg = exc
  344. put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
  345. analyse_type,
  346. error_code=code,
  347. error_msg=msg,
  348. video_url=or_url,
  349. ai_video_url=ai_url), timeout=2, is_ex=False)
  350. logger.info("清理队列, requestId:{}", request_id)
  351. self.clear_queue()
  352. logger.info("清理队列完成, requestId:{}", request_id)
  353. class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
  354. __slots__ = ()
  355. @staticmethod
  356. def upload_video(base_dir, env, request_id, aiFilePath):
  357. aliyunVodSdk = ThAliyunVodSdk(base_dir, env, request_id)
  358. upload_video_thread_ai = Common(aliyunVodSdk.get_play_url, aiFilePath, "ai_online_%s" % request_id)
  359. upload_video_thread_ai.setDaemon(True)
  360. upload_video_thread_ai.start()
  361. ai_url = upload_video_thread_ai.get_result()
  362. return ai_url
  363. @staticmethod
  364. def ai_normal_dtection(model, frame, request_id):
  365. model_conf, code = model
  366. retResults = MODEL_CONFIG[code][3]([model_conf, frame, request_id])[0]
  367. # [float(cls_c), xc,yc,w,h, float(conf_c)]
  368. return code, retResults[2]
  369. @staticmethod
  370. def obj_det(self, model_array, frame, task_status, cframe, tt, request_id):
  371. push_obj = []
  372. if task_status[1] == 1:
  373. dtection_result = []
  374. for model in model_array:
  375. result = tt.submit(self.ai_normal_dtection, model, frame, request_id)
  376. dtection_result.append(result)
  377. for d in dtection_result:
  378. code, det_r = d.result()
  379. if len(det_r) > 0:
  380. push_obj.append((code, det_r))
  381. if len(push_obj) == 0:
  382. task_status[1] = 0
  383. if task_status[1] == 0:
  384. if cframe % 30 == 0:
  385. dtection_result1 = []
  386. for model in model_array:
  387. result = tt.submit(self.ai_normal_dtection, model, frame, request_id)
  388. dtection_result1.append(result)
  389. for d in dtection_result1:
  390. code, det_r = d.result()
  391. if len(det_r) > 0:
  392. push_obj.append((code, det_r))
  393. if len(push_obj) > 0:
  394. task_status[1] = 1
  395. return push_obj
  396. @staticmethod
  397. def start_push_stream(msg, push_queue, image_queue, push_ex_queue, hb_queue, context):
  398. pushProcess = OffPushStreamProcess(msg, push_queue, image_queue, push_ex_queue, hb_queue, context)
  399. pushProcess.daemon = True
  400. pushProcess.start()
  401. return pushProcess
  402. @staticmethod
  403. def start_pull_stream(msg, context, fb_queue, pull_queue, image_queue, analyse_type, frame_num):
  404. pullProcess = OfflinePullVideoStreamProcess(msg, context, fb_queue, pull_queue, image_queue, analyse_type,
  405. frame_num)
  406. pullProcess.daemon = True
  407. pullProcess.start()
  408. return pullProcess
  409. @staticmethod
  410. def checkPT(service_timeout, start_time, pull_process, push_process, hb_thread, push_ex_queue, pull_queue,
  411. request_id):
  412. if time() - start_time > service_timeout:
  413. logger.error("任务执行超时, requestId: {}", request_id)
  414. raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
  415. ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
  416. if pull_process is not None and not pull_process.is_alive():
  417. while True:
  418. if pull_queue.empty() or pull_queue.qsize() == 0:
  419. break
  420. pull_result = get_no_block_queue(pull_queue)
  421. if pull_result is not None and pull_result[0] == 1:
  422. raise ServiceException(pull_result[1], pull_result[2])
  423. logger.info("拉流进程异常停止, requestId: {}", request_id)
  424. raise Exception("拉流进程异常停止!")
  425. if hb_thread is not None and not hb_thread.is_alive():
  426. logger.info("心跳线程异常停止, requestId: {}", request_id)
  427. raise Exception("心跳线程异常停止!")
  428. if push_process is not None and not push_process.is_alive():
  429. while True:
  430. if push_ex_queue.empty() or push_ex_queue.qsize() == 0:
  431. break
  432. push_result = get_no_block_queue(push_ex_queue)
  433. if push_result is not None and push_result[0] == 1:
  434. raise ServiceException(push_result[1], push_result[2])
  435. logger.info("推流进程异常停止, requestId: {}", request_id)
  436. raise Exception("推流进程异常停止!")
  437. def run(self):
  438. msg, context, analyse_type, ex = self._msg, self._context, self._analyse_type, None
  439. self.build_video_path(context, msg, is_build_or=False)
  440. request_id, base_dir, env = msg["request_id"], context["base_dir"], context["env"]
  441. # 拉流进程、推流进程、心跳线程
  442. pull_process, push_process, hb_thread = None, None, None
  443. service_timeout = int(context["service"]["timeout"])
  444. # 事件队列、拉流队列、心跳队列、反馈队列
  445. event_queue, pull_queue, hb_queue, fb_queue = self.event_queue, self._pull_queue, self._hb_queue, self._fb_queue
  446. # 推流队列、推流异常队列、图片队列
  447. push_queue, push_ex_queue, image_queue = self._push_queue, self._push_ex_queue, self._image_queue
  448. try:
  449. # 初始化日志
  450. init_log(base_dir, env)
  451. # 打印启动日志
  452. logger.info("开始启动离线分析进程!requestId: {}", request_id)
  453. # 启动拉流进程(包含拉流线程, 图片上传线程)
  454. # 拉流进程初始化时间长, 先启动
  455. pull_process = self.start_pull_stream(msg, context, fb_queue, pull_queue, image_queue, analyse_type, 25)
  456. # 启动心跳线程
  457. hb_thread = self.start_heartbeat(fb_queue, hb_queue, request_id, analyse_type, context)
  458. # 加载算法模型
  459. model_array = get_model(msg, context, analyse_type)
  460. # 启动推流进程
  461. push_process = self.start_push_stream(msg, push_queue, image_queue, push_ex_queue, hb_queue, context)
  462. # 第一个参数: 模型是否初始化 0:未初始化 1:初始化
  463. # 第二个参数: 检测是否有问题 0: 没有问题, 1: 有问题
  464. task_status = [0, 0]
  465. draw_config = {}
  466. start_time = time()
  467. # 识别2个线程性能最优
  468. with ThreadPoolExecutor(max_workers=2) as t:
  469. # 可能使用模型组合, 模型组合最多3个模型, 1对3, 上面的2个线程对应6个线程
  470. with ThreadPoolExecutor(max_workers=6) as tt:
  471. while True:
  472. # 检查拉流进程是否正常, 心跳线程是否正常
  473. self.checkPT(service_timeout, start_time, pull_process, push_process, hb_thread, push_ex_queue,
  474. pull_queue, request_id)
  475. # 检查推流是否异常
  476. push_status = get_no_block_queue(push_ex_queue)
  477. if push_status is not None and push_status[0] == 1:
  478. raise ServiceException(push_status[1], push_status[2])
  479. # 获取停止指令
  480. event_result = get_no_block_queue(event_queue)
  481. if event_result:
  482. cmdStr = event_result.get("command")
  483. # 接收到停止指令
  484. if "stop" == cmdStr:
  485. logger.info("离线任务开始停止, requestId: {}", request_id)
  486. pull_process.sendCommand({"command": 'stop'})
  487. pull_result = get_no_block_queue(pull_queue)
  488. if pull_result is None:
  489. sleep(1)
  490. continue
  491. # (4, (frame_list, frame_index_list, all_frames))
  492. if pull_result[0] == 4:
  493. frame_list, frame_index_list, all_frames = pull_result[1]
  494. if len(frame_list) > 0:
  495. # 判断是否已经初始化
  496. if task_status[0] == 0:
  497. task_status[0] = 1
  498. for i, model in enumerate(model_array):
  499. model_conf, code = model
  500. model_param = model_conf[1]
  501. # (modeType, model_param, allowedList, names, rainbows)
  502. MODEL_CONFIG[code][2](frame_list[0].shape[1], frame_list[0].shape[0],
  503. model_conf)
  504. if draw_config.get("font_config") is None:
  505. draw_config["font_config"] = model_param['font_config']
  506. if draw_config.get(code) is None:
  507. draw_config[code] = {}
  508. draw_config[code]["allowedList"] = model_conf[2]
  509. draw_config[code]["rainbows"] = model_conf[4]
  510. draw_config[code]["label_arrays"] = model_param['label_arraylist']
  511. det_array = []
  512. for i, frame in enumerate(frame_list):
  513. det_result = t.submit(self.obj_det, self, model_array, frame, task_status,
  514. frame_index_list[i], tt, request_id)
  515. det_array.append(det_result)
  516. push_objs = [det.result() for det in det_array]
  517. put_queue(push_queue,
  518. (1, (frame_list, frame_index_list, all_frames, draw_config, push_objs)),
  519. timeout=2, is_ex=True)
  520. del det_array, push_objs
  521. del frame_list, frame_index_list, all_frames
  522. elif pull_result[0] == 1:
  523. put_queue(push_queue, (2, 'stop_ex'), timeout=1, is_ex=True)
  524. logger.info("关闭推流进程, requestId:{}", request_id)
  525. push_process.join(timeout=120)
  526. logger.info("关闭推流进程1, requestId:{}", request_id)
  527. raise ServiceException(pull_result[1], pull_result[2])
  528. elif pull_result[0] == 2:
  529. logger.info("离线任务开始停止, requestId: {}", request_id)
  530. put_queue(push_queue, (2, 'stop'), timeout=1, is_ex=True)
  531. push_process.join(120)
  532. pull_process.sendCommand({"command": 'stop'})
  533. pull_process.join(120)
  534. break
  535. else:
  536. raise Exception("未知拉流状态异常!")
  537. except ServiceException as s:
  538. logger.exception("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, request_id)
  539. ex = s.code, s.msg
  540. except Exception:
  541. logger.error("服务异常: {}, requestId: {},", format_exc(), request_id)
  542. ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
  543. finally:
  544. base_dir, env, aiFilePath = context["base_dir"], context["env"], context["aiFilePath"]
  545. ai_url, exc = "", None
  546. try:
  547. if push_process and push_process.is_alive():
  548. put_queue(push_queue, (2, 'stop_ex'), timeout=1)
  549. push_process.join(timeout=120)
  550. if pull_process and pull_process.is_alive():
  551. pull_process.sendCommand({"command": 'stop_ex'})
  552. pull_process.sendCommand({"command": 'stop'})
  553. pull_process.join(timeout=120)
  554. if exists(aiFilePath) and getsize(aiFilePath) > 100:
  555. ai_url = self.upload_video(base_dir, env, request_id, aiFilePath)
  556. if ai_url is None:
  557. logger.error("原视频或AI视频播放上传VOD失败!, requestId: {}", request_id)
  558. raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0],
  559. ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1])
  560. # 停止心跳线程
  561. if hb_thread and hb_thread.is_alive():
  562. put_queue(hb_queue, {"command": "stop"}, timeout=1)
  563. hb_thread.join(timeout=120)
  564. if exists(aiFilePath):
  565. logger.info("开始删除AI视频, aiFilePath: {}, requestId: {}", aiFilePath, request_id)
  566. os.remove(aiFilePath)
  567. logger.info("删除AI视频成功, aiFilePath: {}, requestId: {}", aiFilePath, request_id)
  568. # 如果有异常, 检查是否有原视频和AI视频,有则上传,响应失败
  569. if ex:
  570. code, msg = ex
  571. put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
  572. analyse_type,
  573. error_code=code,
  574. error_msg=msg,
  575. ai_video_url=ai_url), timeout=2, is_ex=False)
  576. else:
  577. if ai_url is None or len(ai_url) == 0:
  578. raise ServiceException(ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[0],
  579. ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[1])
  580. put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.SUCCESS.value,
  581. analyse_type,
  582. progress=success_progess,
  583. ai_video_url=ai_url), timeout=2, is_ex=False)
  584. except ServiceException as s:
  585. logger.exception("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, request_id)
  586. exc = s.code, s.msg
  587. except Exception:
  588. logger.error("服务异常: {}, requestId: {},", format_exc(), request_id)
  589. exc = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
  590. finally:
  591. if push_process and push_process.is_alive():
  592. put_queue(push_queue, (2, 'stop_ex'), timeout=1)
  593. push_process.join(timeout=120)
  594. if pull_process and pull_process.is_alive():
  595. pull_process.sendCommand({"command": 'stop_ex'})
  596. pull_process.sendCommand({"command": 'stop'})
  597. pull_process.join(timeout=120)
  598. if hb_thread and hb_thread.is_alive():
  599. put_queue(hb_queue, {"command": "stop"}, timeout=1)
  600. hb_thread.join(timeout=120)
  601. if exc:
  602. code, msg = exc
  603. put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
  604. analyse_type,
  605. error_code=code,
  606. error_msg=msg,
  607. ai_video_url=ai_url), timeout=2, is_ex=False)
  608. self.clear_queue()
  609. '''
  610. 图片识别
  611. '''
  612. class PhotosIntelligentRecognitionProcess(Process):
  613. __slots__ = ("_fb_queue", "_msg", "_analyse_type", "_context", "_image_queue")
  614. def __init__(self, *args):
  615. super().__init__()
  616. self._fb_queue, self._msg, self._analyse_type, self._context = args
  617. self._image_queue = Queue()
  618. put_queue(self._fb_queue, message_feedback(self._msg["request_id"], AnalysisStatus.WAITING.value,
  619. self._analyse_type, progress=init_progess), timeout=2, is_ex=True)
  620. self.build_logo(self._msg, self._context)
  621. @staticmethod
  622. def build_logo(msg, context):
  623. logo = None
  624. if context["video"]["video_add_water"]:
  625. logo = msg.get("logo_url")
  626. if logo is not None and len(logo) > 0:
  627. logo = url2Array(logo, enable_ex=False)
  628. if logo is None:
  629. logo = cv2.imread(join(context['base_dir'], "image/logo.png"), -1)
  630. context['logo'] = logo
  631. def epidemic_prevention(self, imageUrl, model, orc, request_id):
  632. try:
  633. # modeType, allowedList, new_device, model, par, img_type
  634. model_conf, code = model
  635. modeType, allowedList, new_device, model, par, img_type = model_conf
  636. image = url2Array(imageUrl)
  637. param = [image, new_device, model, par, img_type, request_id]
  638. dataBack = MODEL_CONFIG[code][3](param)
  639. if img_type == 'plate':
  640. carCode = ''
  641. if dataBack is None or dataBack.get("plateImage") is None or len(dataBack.get("plateImage")) == 0:
  642. result = orc.license_plate_recognition(image, request_id)
  643. score = ''
  644. if result is None or result.get("words_result") is None or len(result.get("words_result")) == 0:
  645. logger.error("车牌识别为空: {}", result)
  646. carCode = ''
  647. else:
  648. for word in result.get("words_result"):
  649. if word is not None and word.get("number") is not None:
  650. if len(carCode) == 0:
  651. carCode = word.get("number")
  652. else:
  653. carCode = carCode + "," + word.get("number")
  654. else:
  655. result = orc.license_plate_recognition(dataBack.get("plateImage")[0], request_id)
  656. score = dataBack.get("plateImage")[1]
  657. if result is None or result.get("words_result") is None or len(result.get("words_result")) == 0:
  658. result = orc.license_plate_recognition(image, request_id)
  659. if result is None or result.get("words_result") is None or len(result.get("words_result")) == 0:
  660. logger.error("车牌识别为空: {}", result)
  661. carCode = ''
  662. else:
  663. for word in result.get("words_result"):
  664. if word is not None and word.get("number") is not None:
  665. if len(carCode) == 0:
  666. carCode = word.get("number")
  667. else:
  668. carCode = carCode + "," + word.get("number")
  669. else:
  670. for word in result.get("words_result"):
  671. if word is not None and word.get("number") is not None:
  672. if len(carCode) == 0:
  673. carCode = word.get("number")
  674. else:
  675. carCode = carCode + "," + word.get("number")
  676. if len(carCode) > 0:
  677. plate_result = {'type': str(3), 'modelCode': code, 'carUrl': imageUrl,
  678. 'carCode': carCode,
  679. 'score': score}
  680. put_queue(self._fb_queue, message_feedback(request_id,
  681. AnalysisStatus.RUNNING.value,
  682. AnalysisType.IMAGE.value, "", "",
  683. '',
  684. imageUrl,
  685. imageUrl,
  686. str(code),
  687. str(3),
  688. plate_result), timeout=2, is_ex=True)
  689. if img_type == 'code':
  690. if dataBack is None or dataBack.get("type") is None:
  691. return
  692. # 行程码
  693. if dataBack.get("type") == 1 and 1 in allowedList:
  694. # 手机号
  695. if dataBack.get("phoneNumberImage") is None or len(dataBack.get("phoneNumberImage")) == 0:
  696. phoneNumberRecognition = ''
  697. phone_score = ''
  698. else:
  699. phone = orc.universal_text_recognition(dataBack.get("phoneNumberImage")[0], request_id)
  700. phone_score = dataBack.get("phoneNumberImage")[1]
  701. if phone is None or phone.get("words_result") is None or len(phone.get("words_result")) == 0:
  702. logger.error("手机号识别为空: {}", phone)
  703. phoneNumberRecognition = ''
  704. else:
  705. phoneNumberRecognition = phone.get("words_result")
  706. if dataBack.get("cityImage") is None or len(dataBack.get("cityImage")) == 0:
  707. cityRecognition = ''
  708. city_score = ''
  709. else:
  710. city = orc.universal_text_recognition(dataBack.get("cityImage")[0], request_id)
  711. city_score = dataBack.get("cityImage")[1]
  712. if city is None or city.get("words_result") is None or len(city.get("words_result")) == 0:
  713. logger.error("城市识别为空: {}", city)
  714. cityRecognition = ''
  715. else:
  716. cityRecognition = city.get("words_result")
  717. if len(phoneNumberRecognition) > 0 or len(cityRecognition) > 0:
  718. trip_result = {'type': str(1),
  719. 'modelCode': code,
  720. 'imageUrl': imageUrl,
  721. 'phoneNumberRecognition': phoneNumberRecognition,
  722. 'phone_sorce': phone_score,
  723. 'cityRecognition': cityRecognition,
  724. 'city_score': city_score}
  725. put_queue(self._fb_queue, message_feedback(request_id,
  726. AnalysisStatus.RUNNING.value,
  727. AnalysisType.IMAGE.value, "", "",
  728. '',
  729. imageUrl,
  730. imageUrl,
  731. str(code),
  732. str(1),
  733. trip_result), timeout=2, is_ex=True)
  734. if dataBack.get("type") == 2 and 2 in allowedList:
  735. if dataBack.get("nameImage") is None or len(dataBack.get("nameImage")) == 0:
  736. nameRecognition = ''
  737. name_score = ''
  738. else:
  739. name = orc.universal_text_recognition(dataBack.get("nameImage")[0], request_id)
  740. name_score = dataBack.get("nameImage")[1]
  741. if name is None or name.get("words_result") is None or len(name.get("words_result")) == 0:
  742. logger.error("名字识别为空: {}", name)
  743. nameRecognition = ''
  744. else:
  745. nameRecognition = name.get("words_result")
  746. if dataBack.get("phoneNumberImage") is None or len(dataBack.get("phoneNumberImage")) == 0:
  747. phoneNumberRecognition = ''
  748. phone_score = ''
  749. else:
  750. phone = orc.universal_text_recognition(dataBack.get("phoneNumberImage")[0], request_id)
  751. phone_score = dataBack.get("phoneNumberImage")[1]
  752. if phone is None or phone.get("words_result") is None or len(phone.get("words_result")) == 0:
  753. logger.error("手机号识别为空: {}", phone)
  754. phoneNumberRecognition = ''
  755. else:
  756. phoneNumberRecognition = phone.get("words_result")
  757. if dataBack.get("hsImage") is None or len(dataBack.get("hsImage")) == 0:
  758. hsRecognition = ''
  759. hs_score = ''
  760. else:
  761. hs = orc.universal_text_recognition(dataBack.get("hsImage")[0], request_id)
  762. hs_score = dataBack.get("hsImage")[1]
  763. if hs is None or hs.get("words_result") is None or len(hs.get("words_result")) == 0:
  764. logger.error("核酸识别为空: {}", hs)
  765. hsRecognition = ''
  766. else:
  767. hsRecognition = hs.get("words_result")
  768. if len(nameRecognition) > 0 or len(phoneNumberRecognition) > 0 or len(hsRecognition) > 0:
  769. healthy_result = {'type': str(2),
  770. 'modelCode': code,
  771. 'imageUrl': imageUrl,
  772. 'color': dataBack.get("color"),
  773. 'nameRecognition': nameRecognition,
  774. 'name_score': name_score,
  775. 'phoneNumberRecognition': phoneNumberRecognition,
  776. 'phone_score': phone_score,
  777. 'hsRecognition': hsRecognition,
  778. 'hs_score': hs_score}
  779. put_queue(self._fb_queue, message_feedback(request_id,
  780. AnalysisStatus.RUNNING.value,
  781. AnalysisType.IMAGE.value, "", "",
  782. '',
  783. imageUrl,
  784. imageUrl,
  785. str(code),
  786. str(2),
  787. healthy_result), timeout=2, is_ex=True)
  788. except ServiceException as s:
  789. raise s
  790. except Exception as e:
  791. logger.error("模型分析异常: {}, requestId: {}", format_exc(), request_id)
  792. raise e
  793. '''
  794. # 防疫模型
  795. '''
  796. def epidemicPrevention(self, imageUrls, model, base_dir, env, request_id):
  797. with ThreadPoolExecutor(max_workers=2) as t:
  798. orc = OcrBaiduSdk(base_dir, env)
  799. obj_list = []
  800. for imageUrl in imageUrls:
  801. obj = t.submit(self.epidemic_prevention, imageUrl, model, orc, request_id)
  802. obj_list.append(obj)
  803. for r in obj_list:
  804. r.result(60)
  805. def image_recognition(self, imageUrl, mod, image_queue, logo, request_id):
  806. try:
  807. model_conf, code = mod
  808. model_param = model_conf[1]
  809. image = url2Array(imageUrl)
  810. MODEL_CONFIG[code][2](image.shape[1], image.shape[0], model_conf)
  811. p_result = MODEL_CONFIG[code][3]([model_conf, image, request_id])[0]
  812. if p_result is None or len(p_result) < 3 or p_result[2] is None or len(p_result[2]) == 0:
  813. return
  814. if logo:
  815. image = add_water_pic(image, logo, request_id)
  816. # (modeType, model_param, allowedList, names, rainbows)
  817. allowedList = model_conf[2]
  818. label_arraylist = model_param['label_arraylist']
  819. font_config = model_param['font_config']
  820. rainbows = model_conf[4]
  821. det_xywh = {code: {}}
  822. ai_result_list = p_result[2]
  823. for ai_result in ai_result_list:
  824. box, score, cls = xywh2xyxy2(ai_result)
  825. # 如果检测目标在识别任务中,继续处理
  826. if cls in allowedList:
  827. label_array = label_arraylist[cls]
  828. color = rainbows[cls]
  829. cd = det_xywh[code].get(cls)
  830. if cd is None:
  831. det_xywh[code][cls] = [[cls, box, score, label_array, color]]
  832. else:
  833. det_xywh[code][cls].append([cls, box, score, label_array, color])
  834. if len(det_xywh) > 0:
  835. put_queue(image_queue, (1, (det_xywh, imageUrl, image, font_config, "")), timeout=2, is_ex=False)
  836. except ServiceException as s:
  837. raise s
  838. except Exception as e:
  839. logger.error("模型分析异常: {}, requestId: {}", format_exc(), self._msg.get("request_id"))
  840. raise e
  841. def publicIdentification(self, imageUrls, mod, image_queue, logo, request_id):
  842. with ThreadPoolExecutor(max_workers=2) as t:
  843. obj_list = []
  844. for imageUrl in imageUrls:
  845. obj = t.submit(self.image_recognition, imageUrl, mod, image_queue, logo, request_id)
  846. obj_list.append(obj)
  847. for r in obj_list:
  848. r.result(60)
  849. '''
  850. 1. imageUrls: 图片url数组,多张图片
  851. 2. mod: 模型对象
  852. 3. image_queue: 图片队列
  853. '''
  854. def baiduRecognition(self, imageUrls, mod, image_queue, logo, request_id):
  855. with ThreadPoolExecutor(max_workers=2) as t:
  856. thread_result = []
  857. for imageUrl in imageUrls:
  858. obj = t.submit(self.baidu_recognition, imageUrl, mod, image_queue, logo, request_id)
  859. thread_result.append(obj)
  860. for r in thread_result:
  861. r.result(60)
  862. def baidu_recognition(self, imageUrl, mod, image_queue, logo, request_id):
  863. with ThreadPoolExecutor(max_workers=2) as t:
  864. try:
  865. # modeType, aipImageClassifyClient, aipBodyAnalysisClient, allowedList, rainbows,
  866. # vehicle_names, person_names, requestId
  867. model_conf, code = mod
  868. allowedList = model_conf[3]
  869. rainbows = model_conf[4]
  870. # 图片转数组
  871. img = url2Array(imageUrl)
  872. vehicle_label_arrays, person_label_arrays, font_config = MODEL_CONFIG[code][2](img.shape[1],
  873. img.shape[0],
  874. model_conf)
  875. obj_list = []
  876. for target in allowedList:
  877. parm = [target, imageUrl, model_conf[1], model_conf[2], request_id]
  878. reuslt = t.submit(self.baidu_method, code, parm, img, image_queue, vehicle_label_arrays,
  879. person_label_arrays, font_config, rainbows, logo)
  880. obj_list.append(reuslt)
  881. for r in obj_list:
  882. r.result(60)
  883. except ServiceException as s:
  884. raise s
  885. except Exception as e:
  886. logger.error("百度AI分析异常: {}, requestId: {}", format_exc(), request_id)
  887. raise e
  888. @staticmethod
  889. def baidu_method(code, parm, img, image_queue, vehicle_label_arrays, person_label_arrays, font_config,
  890. rainbows, logo):
  891. # [target, url, aipImageClassifyClient, aipBodyAnalysisClient, requestId]
  892. request_id = parm[4]
  893. target = parm[0]
  894. image_url = parm[1]
  895. result = MODEL_CONFIG[code][3](parm)
  896. if target == BaiduModelTarget.VEHICLE_DETECTION.value[1] and result is not None:
  897. vehicleInfo = result.get("vehicle_info")
  898. if vehicleInfo is not None and len(vehicleInfo) > 0:
  899. det_xywh = {code: {}}
  900. copy_frame = img.copy()
  901. for i, info in enumerate(vehicleInfo):
  902. value = VehicleEnumVALUE.get(info.get("type"))
  903. target_num = value.value[2]
  904. label_array = vehicle_label_arrays[target_num]
  905. color = rainbows[target_num]
  906. if value is None:
  907. logger.error("车辆识别出现未支持的目标类型!type:{}, requestId:{}", info.get("type"), request_id)
  908. return
  909. left_top = (int(info.get("location").get("left")), int(info.get("location").get("top")))
  910. right_top = (int(info.get("location").get("left")) + int(info.get("location").get("width")),
  911. int(info.get("location").get("top")))
  912. right_bottom = (int(info.get("location").get("left")) + int(info.get("location").get("width")),
  913. int(info.get("location").get("top")) + int(info.get("location").get("height")))
  914. left_bottom = (int(info.get("location").get("left")),
  915. int(info.get("location").get("top")) + int(info.get("location").get("height")))
  916. box = [left_top, right_top, right_bottom, left_bottom]
  917. score = float("%.2f" % info.get("probability"))
  918. if logo:
  919. copy_frame = add_water_pic(copy_frame, logo, request_id)
  920. if det_xywh[code].get(target) is None:
  921. det_xywh[code][target] = [[target, box, score, label_array, color]]
  922. else:
  923. det_xywh[code][target].append([target, box, score, label_array, color])
  924. info["id"] = str(i)
  925. if len(det_xywh[code]) > 0:
  926. result["type"] = str(target)
  927. result["modelCode"] = code
  928. put_queue(image_queue, (1, (det_xywh, image_url, copy_frame, font_config, result)), timeout=2,
  929. is_ex=True)
  930. # 人体识别
  931. if target == BaiduModelTarget.HUMAN_DETECTION.value[1] and result is not None:
  932. personInfo = result.get("person_info")
  933. personNum = result.get("person_num")
  934. if personNum is not None and personNum > 0 and personInfo is not None and len(personInfo) > 0:
  935. det_xywh = {code: {}}
  936. copy_frame = img.copy()
  937. for i, info in enumerate(personInfo):
  938. left_top = (int(info.get("location").get("left")), int(info.get("location").get("top")))
  939. right_top = (int(info.get("location").get("left")) + int(info.get("location").get("width")),
  940. int(info.get("location").get("top")))
  941. right_bottom = (int(info.get("location").get("left")) + int(info.get("location").get("width")),
  942. int(info.get("location").get("top")) + int(info.get("location").get("height")))
  943. left_bottom = (int(info.get("location").get("left")),
  944. int(info.get("location").get("top")) + int(info.get("location").get("height")))
  945. box = [left_top, right_top, right_bottom, left_bottom]
  946. score = float("%.2f" % info.get("location").get("score"))
  947. label_array = person_label_arrays[0]
  948. color = rainbows[0]
  949. if logo:
  950. copy_frame = add_water_pic(copy_frame, logo, request_id)
  951. if det_xywh[code].get(target) is None:
  952. det_xywh[code][target] = [[target, box, score, label_array, color]]
  953. else:
  954. det_xywh[code][target].append([target, box, score, label_array, color])
  955. info["id"] = str(i)
  956. if len(det_xywh[code]) > 0:
  957. result["type"] = str(target)
  958. result["modelCode"] = code
  959. put_queue(image_queue, (1, (det_xywh, image_url, copy_frame, font_config, result)), timeout=2)
  960. # 人流量
  961. if target == BaiduModelTarget.PEOPLE_COUNTING.value[1] and result is not None:
  962. base64Image = result.get("image")
  963. if base64Image is not None and len(base64Image) > 0:
  964. baiduImage = base64.b64decode(base64Image)
  965. result["type"] = str(target)
  966. result["modelCode"] = code
  967. del result["image"]
  968. put_queue(image_queue, (1, (None, image_url, baiduImage, None, result)), timeout=2)
  969. @staticmethod
  970. def start_File_upload(fb_queue, context, msg, image_queue, analyse_type):
  971. image_thread = ImageTypeImageFileUpload(fb_queue, context, msg, image_queue, analyse_type)
  972. image_thread.setDaemon(True)
  973. image_thread.start()
  974. return image_thread
  975. def run(self):
  976. fb_queue, msg, analyse_type, context = self._fb_queue, self._msg, self._analyse_type, self._context
  977. request_id, logo, image_queue = msg["request_id"], context['logo'], self._image_queue
  978. base_dir, env = context["base_dir"], context["env"]
  979. imageUrls = msg["image_urls"]
  980. image_thread = None
  981. with ThreadPoolExecutor(max_workers=2) as t:
  982. try:
  983. init_log(base_dir, env)
  984. logger.info("开始启动图片识别进程, requestId: {}", request_id)
  985. model_array = get_model(msg, context, analyse_type)
  986. image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type)
  987. task_list = []
  988. for model in model_array:
  989. # 百度模型逻辑
  990. if model[1] == ModelType.BAIDU_MODEL.value[1]:
  991. result = t.submit(self.baiduRecognition, imageUrls, model, image_queue, logo, request_id)
  992. task_list.append(result)
  993. # 防疫模型
  994. elif model[1] == ModelType.EPIDEMIC_PREVENTION_MODEL.value[1]:
  995. result = t.submit(self.epidemicPrevention, imageUrls, model, base_dir, env, request_id)
  996. task_list.append(result)
  997. # 车牌模型
  998. elif model[1] == ModelType.PLATE_MODEL.value[1]:
  999. result = t.submit(self.epidemicPrevention, imageUrls, model, base_dir, env, request_id)
  1000. task_list.append(result)
  1001. else:
  1002. result = t.submit(self.publicIdentification, imageUrls, model, image_queue, logo, request_id)
  1003. task_list.append(result)
  1004. for r in task_list:
  1005. r.result(60)
  1006. if image_thread and not image_thread.is_alive():
  1007. raise Exception("图片识别图片上传线程异常停止!!!")
  1008. if image_thread and image_thread.is_alive():
  1009. put_queue(image_queue, (2, 'stop'), timeout=2)
  1010. image_thread.join(120)
  1011. logger.info("图片进程任务完成,requestId:{}", request_id)
  1012. put_queue(fb_queue, message_feedback(request_id,
  1013. AnalysisStatus.SUCCESS.value,
  1014. analyse_type,
  1015. progress=success_progess), timeout=2, is_ex=True)
  1016. except ServiceException as s:
  1017. logger.error("图片分析异常,异常编号:{}, 异常描述:{}, requestId:{}", s.code, s.msg, request_id)
  1018. put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
  1019. analyse_type,
  1020. s.code,
  1021. s.msg), timeout=2)
  1022. except Exception:
  1023. logger.error("图片分析异常: {}, requestId:{}", format_exc(), request_id)
  1024. put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
  1025. analyse_type,
  1026. ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
  1027. ExceptionType.SERVICE_INNER_EXCEPTION.value[1]), timeout=2)
  1028. finally:
  1029. if image_thread and image_thread.is_alive():
  1030. clear_queue(image_queue)
  1031. put_queue(image_queue, (2, 'stop'), timeout=2)
  1032. image_thread.join(120)
  1033. clear_queue(image_queue)
  1034. class ScreenRecordingProcess(Process):
  1035. __slots__ = ('_fb_queue', '_context', '_msg', '_analysisType', '_event_queue', '_hb_queue', '_analysisType')
  1036. def __init__(self, *args):
  1037. super().__init__()
  1038. # 传参
  1039. self._fb_queue, self._context, self._msg, self._analysisType = args
  1040. self._event_queue, self._hb_queue, self._pull_queue = Queue(), Queue(), Queue(10)
  1041. put_queue(self._fb_queue,
  1042. recording_feedback(self._msg["request_id"], RecordingStatus.RECORDING_WAITING.value[0]),
  1043. timeout=1, is_ex=True)
  1044. def sendEvent(self, result):
  1045. put_queue(self._event_queue, result, timeout=2, is_ex=True)
  1046. @staticmethod
  1047. def start_pull_stream_thread(msg, context, pull_queue, hb_queue, fb_queue, frame_num):
  1048. pullThread = RecordingPullStreamThread(msg, context, pull_queue, hb_queue, fb_queue, frame_num)
  1049. pullThread.setDaemon(True)
  1050. pullThread.start()
  1051. return pullThread
  1052. @staticmethod
  1053. def start_hb_thread(fb_queue, hb_queue, request_id):
  1054. hb = RecordingHeartbeat(fb_queue, hb_queue, request_id)
  1055. hb.setDaemon(True)
  1056. hb.start()
  1057. return hb
  1058. @staticmethod
  1059. def check(start_time, service_timeout, pull_thread, hb_thread, request_id):
  1060. if time() - start_time > service_timeout:
  1061. logger.error("录屏超时, requestId: {}", request_id)
  1062. raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
  1063. ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
  1064. if pull_thread and not pull_thread.is_alive():
  1065. logger.info("录屏拉流线程停止异常, requestId: {}", request_id)
  1066. raise Exception("录屏拉流线程异常停止")
  1067. if hb_thread and not hb_thread.is_alive():
  1068. logger.info("录屏心跳线程异常停止, requestId: {}", request_id)
  1069. raise Exception("录屏心跳线程异常停止")
  1070. def run(self):
  1071. msg, context = self._msg, self._context
  1072. request_id, push_url = msg['request_id'], msg.get('push_url')
  1073. pull_queue, fb_queue, hb_queue, event_queue = self._pull_queue, self._fb_queue, self._hb_queue, \
  1074. self._event_queue
  1075. base_dir, env, service_timeout = context['base_dir'], context['env'], int(context["service"]["timeout"])
  1076. pre_path, end_path = '%s/%s%s' % (base_dir, context["video"]["file_path"], now_date_to_str(YMDHMSF)), \
  1077. '%s%s' % (request_id, ".mp4")
  1078. orFilePath = '%s%s%s' % (pre_path, "_on_or_", end_path)
  1079. pull_thread, hb_thread = None, None
  1080. or_write_status, p_push_status = [0, 0], [0, 0]
  1081. or_video_file, push_p = None, None
  1082. ex = None
  1083. try:
  1084. # 初始化日志
  1085. init_log(base_dir, env)
  1086. # 启动拉流线程
  1087. pull_thread = self.start_pull_stream_thread(msg, context, pull_queue, hb_queue, fb_queue, 25)
  1088. hb_thread = self.start_hb_thread(fb_queue, hb_queue, request_id)
  1089. start_time = time()
  1090. with ThreadPoolExecutor(max_workers=2) as t:
  1091. while True:
  1092. # 检查拉流线程和心跳线程
  1093. self.check(start_time, service_timeout, pull_thread, hb_thread, request_id)
  1094. # 判断是否需要停止录屏
  1095. event_result = get_no_block_queue(event_queue)
  1096. if event_result is not None:
  1097. cmdStr = event_result.get("command")
  1098. # 接收到停止指令
  1099. if 'stop' == cmdStr:
  1100. logger.info("录屏任务开始停止, requestId: {}", request_id)
  1101. pull_thread.sendEvent({"command": "stop"})
  1102. pull_result = get_no_block_queue(pull_queue)
  1103. if pull_result is None:
  1104. sleep(1)
  1105. continue
  1106. if pull_result[0] == 1:
  1107. close_all_p(push_p, or_video_file, None, request_id)
  1108. pull_thread.sendEvent({"command": "stop"})
  1109. pull_thread.join(180)
  1110. raise ServiceException(pull_result[1], pull_result[2])
  1111. elif pull_result[0] == 2:
  1112. close_all_p(push_p, or_video_file, None, request_id)
  1113. pull_thread.sendEvent({"command": "stop"})
  1114. pull_thread.join(180)
  1115. break
  1116. elif pull_result[0] == 4:
  1117. frame_list, frame_index_list, all_frames = pull_result[1]
  1118. if len(frame_list) > 0:
  1119. for i, frame in enumerate(frame_list):
  1120. if frame_index_list[i] % 300 == 0 and frame_index_list[i] < all_frames:
  1121. task_process = "%.2f" % (float(frame_index_list[i]) / float(all_frames))
  1122. put_queue(hb_queue, {"progress": task_process}, timeout=1)
  1123. write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file,
  1124. or_write_status, request_id)
  1125. if push_url is not None and len(push_url) > 0:
  1126. push_p_result = t.submit(push_video_stream, frame, push_p, push_url, p_push_status,
  1127. request_id)
  1128. push_p = push_p_result.result()
  1129. or_video_file = write_or_video_result.result()
  1130. else:
  1131. raise Exception("未知拉流状态异常!")
  1132. logger.info("录屏线程任务完成,requestId:{}", self._msg.get("request_id"))
  1133. except ServiceException as s:
  1134. logger.error("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, self._msg.get("request_id"))
  1135. ex = s.code, s.msg
  1136. except Exception:
  1137. logger.error("服务异常: {}, requestId: {},", format_exc(), self._msg.get("request_id"))
  1138. ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
  1139. finally:
  1140. or_url = ""
  1141. exn = None
  1142. try:
  1143. # 关闭推流管道, 原视频写流客户端
  1144. close_all_p(push_p, or_video_file, None, request_id)
  1145. # 关闭拉流线程
  1146. if pull_thread and pull_thread.is_alive():
  1147. pull_thread.sendEvent({"command": "stop_ex"})
  1148. pull_thread.sendEvent({"command": "stop"})
  1149. pull_thread.join(120)
  1150. # 判断是否需要上传视频
  1151. if exists(orFilePath) and getsize(orFilePath) > 100:
  1152. or_url = self.upload_video(base_dir, env, request_id, orFilePath)
  1153. if or_url is None or len(or_url) == 0:
  1154. logger.error("原视频或AI视频播放上传VOD失败!, requestId: {}", request_id)
  1155. raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0],
  1156. ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1])
  1157. # 停止心跳线程
  1158. if hb_thread and hb_thread.is_alive():
  1159. put_queue(hb_queue, {"command": "stop"}, timeout=10, is_ex=False)
  1160. hb_thread.join(timeout=120)
  1161. if exists(orFilePath):
  1162. logger.info("开始删除原视频, orFilePath: {}, requestId: {}", orFilePath, request_id)
  1163. os.remove(orFilePath)
  1164. logger.info("删除原视频成功, orFilePath: {}, requestId: {}", orFilePath, request_id)
  1165. # 如果有异常, 检查是否有原视频和AI视频,有则上传,响应失败
  1166. if ex:
  1167. code, msg = ex
  1168. put_queue(fb_queue, recording_feedback(request_id, RecordingStatus.RECORDING_FAILED.value[0],
  1169. error_code=code,
  1170. error_msg=msg,
  1171. video_url=or_url), timeout=10, is_ex=False)
  1172. else:
  1173. if or_url is None or len(or_url) == 0:
  1174. raise ServiceException(ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[0],
  1175. ExceptionType.PUSH_STREAM_TIME_EXCEPTION.value[1])
  1176. put_queue(fb_queue, recording_feedback(request_id, RecordingStatus.RECORDING_SUCCESS.value[0],
  1177. progress=success_progess,
  1178. video_url=or_url), timeout=10, is_ex=False)
  1179. except ServiceException as s:
  1180. exn = s.code, s.msg
  1181. except Exception:
  1182. logger.error("异常:{}, requestId: {}", format_exc(), request_id)
  1183. exn = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
  1184. finally:
  1185. if pull_thread and pull_thread.is_alive():
  1186. pull_thread.sendEvent({"command": "stop"})
  1187. pull_thread.join(120)
  1188. if hb_thread and hb_thread.is_alive():
  1189. put_queue(hb_queue, {"command": "stop"}, timeout=10, is_ex=False)
  1190. hb_thread.join(timeout=120)
  1191. self.clear_queue_end()
  1192. if exn:
  1193. code, msg = exn
  1194. put_queue(fb_queue, recording_feedback(request_id, RecordingStatus.RECORDING_FAILED.value[0],
  1195. error_code=code,
  1196. error_msg=msg,
  1197. video_url=or_url), timeout=10, is_ex=False)
  1198. def clear_queue_end(self):
  1199. clear_queue(self._event_queue)
  1200. clear_queue(self._hb_queue)
  1201. clear_queue(self._pull_queue)
  1202. @staticmethod
  1203. def upload_video(base_dir, env, request_id, orFilePath):
  1204. aliyunVodSdk = ThAliyunVodSdk(base_dir, env, request_id)
  1205. upload_video_thread_ai = Common(aliyunVodSdk.get_play_url, orFilePath, "or_online_%s" % request_id)
  1206. upload_video_thread_ai.setDaemon(True)
  1207. upload_video_thread_ai.start()
  1208. or_url = upload_video_thread_ai.get_result()
  1209. return or_url
  1210. """
  1211. "models": [{
  1212. "code": "模型编号",
  1213. "categories":[{
  1214. "id": "模型id",
  1215. "config": {
  1216. "k1": "v1",
  1217. "k2": "v2"
  1218. }
  1219. }]
  1220. }]
  1221. """
  1222. def get_model(msg, context, analyse_type):
  1223. # 初始变量
  1224. request_id, base_dir, gpu_name, env = msg["request_id"], context["base_dir"], context["gpu_name"], context["env"]
  1225. models, model_num_limit = msg["models"], context["service"]["model"]['limit']
  1226. try:
  1227. # 实时、离线元组
  1228. analyse_type_tuple = (AnalysisType.ONLINE.value, AnalysisType.OFFLINE.value)
  1229. # (实时、离线)检查模型组合, 目前只支持3个模型组合
  1230. if analyse_type in analyse_type_tuple:
  1231. if len(models) > model_num_limit:
  1232. raise ServiceException(ExceptionType.MODEL_GROUP_LIMIT_EXCEPTION.value[0],
  1233. ExceptionType.MODEL_GROUP_LIMIT_EXCEPTION.value[1])
  1234. modelArray, codeArray = [], set()
  1235. for model in models:
  1236. # 模型编码
  1237. code = model["code"]
  1238. # 检验code是否重复
  1239. if code in codeArray:
  1240. raise ServiceException(ExceptionType.MODEL_DUPLICATE_EXCEPTION.value[0],
  1241. ExceptionType.MODEL_DUPLICATE_EXCEPTION.value[1])
  1242. codeArray.add(code)
  1243. # 检测目标数组
  1244. needed_objectsIndex = list(set([int(category["id"]) for category in model["categories"]]))
  1245. logger.info("模型编号: {}, 检查目标: {}, requestId: {}", code, needed_objectsIndex, request_id)
  1246. model_method = MODEL_CONFIG.get(code)
  1247. if model_method is None:
  1248. logger.error("未匹配到对应的模型, requestId:{}", request_id)
  1249. raise ServiceException(ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[0],
  1250. ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[1])
  1251. # 检查cpu资源、gpu资源
  1252. check_cpu(base_dir, request_id)
  1253. gpu_ids = check_gpu_resource(request_id)
  1254. # 如果实时识别、离线识别
  1255. if analyse_type in analyse_type_tuple:
  1256. if model["is_video"] == "1":
  1257. mod = model_method[0](gpu_ids[0], needed_objectsIndex, request_id, gpu_name, base_dir, env)
  1258. modelArray.append((mod.model_conf, code))
  1259. else:
  1260. raise ServiceException(ExceptionType.MODEL_NOT_SUPPORT_VIDEO_EXCEPTION.value[0],
  1261. ExceptionType.MODEL_NOT_SUPPORT_VIDEO_EXCEPTION.value[1],
  1262. model_method[1].value[2])
  1263. # 如果是图片识别
  1264. if analyse_type == AnalysisType.IMAGE.value:
  1265. if model["is_image"] == "1":
  1266. mod = model_method[0](gpu_ids[0], needed_objectsIndex, request_id, gpu_name, base_dir, env)
  1267. modelArray.append((mod.model_conf, code))
  1268. else:
  1269. raise ServiceException(ExceptionType.MODEL_NOT_SUPPORT_IMAGE_EXCEPTION.value[0],
  1270. ExceptionType.MODEL_NOT_SUPPORT_IMAGE_EXCEPTION.value[1],
  1271. model_method[1].value[2])
  1272. if len(modelArray) == 0:
  1273. raise ServiceException(ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[0],
  1274. ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[1])
  1275. return modelArray
  1276. except ServiceException as s:
  1277. raise s
  1278. except Exception:
  1279. logger.error("模型配置处理异常: {}, request_id: {}", format_exc(), request_id)
  1280. raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0],
  1281. ExceptionType.MODEL_LOADING_EXCEPTION.value[1])