ソースを参照

算法交互模型去除多线程加载

pull/1/head
chenyukun 2年前
コミット
f7fcd47737
1個のファイルの変更31行の追加25行の削除
  1. +31
    -25
      concurrency/IntelligentRecognitionProcess.py

+ 31
- 25
concurrency/IntelligentRecognitionProcess.py ファイルの表示

@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
import json
import os
import time
import copy
@@ -97,12 +98,13 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
LogUtils.init_log(self.content)
# 程序开始时间
start_time = time.time()
mod_thread = Common(None, func=get_model, args=(self.config, str(self.gpu_ids[0]), self.msg["models"]))
mod_thread.setDaemon(True)
mod_thread.start()
mod = None
model_type_code = None
modelConfig = None
mod, model_type_code, modelConfig = get_model((self.config, str(self.gpu_ids[0]), self.msg["models"]))
# mod_thread = Common(None, func=get_model, args=(self.config, str(self.gpu_ids[0]), self.msg["models"]))
# mod_thread.setDaemon(True)
# mod_thread.start()
# mod = None
# model_type_code = None
# modelConfig = None
# 启动公共进程包含(图片上传线程,心跳线程,问题反馈线程)
commonProcess = CommonProcess(self.fbQueue, None, self.content, self.msg, self.imageQueue,
AnalysisType.ONLINE.value)
@@ -159,7 +161,7 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
pull_start_time = None
read_start_time = None
# 模型初始化次数
model = 0
# model = 0
while True:
end_time = time.time()
create_task_time = end_time - start_time
@@ -210,10 +212,10 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
cv2tool.build_cv2()
continue
read_start_time = None
if mod is None and model == 0:
model += 1
logger.info("初始化模型: {}次, requestId: {}", model, self.msg.get("request_id"))
mod, model_type_code, modelConfig = mod_thread.get_result()
# if mod is None and model == 0:
# model += 1
# logger.info("初始化模型: {}次, requestId: {}", model, self.msg.get("request_id"))
# mod, model_type_code, modelConfig = mod_thread.get_result()
# time00 = time.time()
# 调用AI模型
p_result, timeOut = mod.process(copy.deepcopy(frame), modelConfig)
@@ -344,12 +346,13 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
LogUtils.init_log(self.content)
# 程序开始时间
start_time = time.time()
mod_thread = Common(None, func=get_model, args=(self.config, str(self.gpu_ids[0]), self.msg["models"]))
mod_thread.setDaemon(True)
mod_thread.start()
mod = None
model_type_code = None
modelConfig = None
mod, model_type_code, modelConfig = get_model((self.config, str(self.gpu_ids[0]), self.msg["models"]))
# mod_thread = Common(None, func=get_model, args=(self.config, str(self.gpu_ids[0]), self.msg["models"]))
# mod_thread.setDaemon(True)
# mod_thread.start()
# mod = None
# model_type_code = None
# modelConfig = None
# 创建心跳队列
hbQueue = Queue()
# 结果反馈进程启动
@@ -375,7 +378,7 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
high_score_image = {}
step = int(self.content["service"]["frame_step"])
# 模型初始化速度
model = 0
# model = 0
# 总视频帧数
all_f = None
if cv2tool.cap is not None:
@@ -422,10 +425,10 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
logger.info("任务开始结束分析, requestId: {}", self.msg.get("request_id"))
self.stop_task(cv2tool, aiFilePath, AnalysisStatus.SUCCESS.value)
break
if mod is None and model == 0:
model += 1
logger.info("初始化模型: {}次, requestId: {}", model, self.msg.get("request_id"))
mod, model_type_code, modelConfig = mod_thread.get_result()
# if mod is None and model == 0:
# model += 1
# logger.info("初始化模型: {}次, requestId: {}", model, self.msg.get("request_id"))
# mod, model_type_code, modelConfig = mod_thread.get_result()
# time00 = time.time()
# 调用AI模型
p_result, timeOut = mod.process(copy.deepcopy(frame), modelConfig)
@@ -540,21 +543,24 @@ class PhotosIntelligentRecognitionProcess(IntelligentRecognitionProcess):


def get_model(args):
logger.info("######################开始加载模型######################")
for model in args[2]:
try:
code = model.get("code")
needed_objectsIndex = [int(category.get("id")) for category in model.get("categories")]
logger.info("code:{}, 检查目标:{}, gpuId:{}", code, needed_objectsIndex, args[1])
if code == ModelType.WATER_SURFACE_MODEL.value[1]:
return ModelUtils.SZModel(args[1], needed_objectsIndex), code, args[0].get("sz")
logger.info("######################加载河道模型######################")
mod, model_type_code, modelConfig = ModelUtils.SZModel(args[1], needed_objectsIndex), code, args[0].get("sz")
return mod, model_type_code, modelConfig
elif code == ModelType.FOREST_FARM_MODEL.value[1]:
logger.info("######################加载林场模型######################")
return ModelUtils.LCModel(args[1], needed_objectsIndex), code, args[0].get("lc")
else:
logger.error("未匹配到对应的模型")
raise ServiceException(ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[0],
ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[1])
except Exception as e:
logger.error("获取模型配置异常:")
logger.exception(e)
logger.exception("获取模型配置异常: {}", e)
raise ServiceException(ExceptionType.AI_MODEL_CONFIG_EXCEPTION.value[0],
ExceptionType.AI_MODEL_CONFIG_EXCEPTION.value[1])

読み込み中…
キャンセル
保存