|
|
|
|
|
|
|
|
# -*- coding: utf-8 -*- |
|
|
# -*- coding: utf-8 -*- |
|
|
|
|
|
import json |
|
|
import os |
|
|
import os |
|
|
import time |
|
|
import time |
|
|
import copy |
|
|
import copy |
|
|
|
|
|
|
|
|
LogUtils.init_log(self.content) |
|
|
LogUtils.init_log(self.content) |
|
|
# 程序开始时间 |
|
|
# 程序开始时间 |
|
|
start_time = time.time() |
|
|
start_time = time.time() |
|
|
mod_thread = Common(None, func=get_model, args=(self.config, str(self.gpu_ids[0]), self.msg["models"])) |
|
|
|
|
|
mod_thread.setDaemon(True) |
|
|
|
|
|
mod_thread.start() |
|
|
|
|
|
mod = None |
|
|
|
|
|
model_type_code = None |
|
|
|
|
|
modelConfig = None |
|
|
|
|
|
|
|
|
mod, model_type_code, modelConfig = get_model((self.config, str(self.gpu_ids[0]), self.msg["models"])) |
|
|
|
|
|
# mod_thread = Common(None, func=get_model, args=(self.config, str(self.gpu_ids[0]), self.msg["models"])) |
|
|
|
|
|
# mod_thread.setDaemon(True) |
|
|
|
|
|
# mod_thread.start() |
|
|
|
|
|
# mod = None |
|
|
|
|
|
# model_type_code = None |
|
|
|
|
|
# modelConfig = None |
|
|
# 启动公共进程包含(图片上传线程,心跳线程,问题反馈线程) |
|
|
# 启动公共进程包含(图片上传线程,心跳线程,问题反馈线程) |
|
|
commonProcess = CommonProcess(self.fbQueue, None, self.content, self.msg, self.imageQueue, |
|
|
commonProcess = CommonProcess(self.fbQueue, None, self.content, self.msg, self.imageQueue, |
|
|
AnalysisType.ONLINE.value) |
|
|
AnalysisType.ONLINE.value) |
|
|
|
|
|
|
|
|
pull_start_time = None |
|
|
pull_start_time = None |
|
|
read_start_time = None |
|
|
read_start_time = None |
|
|
# 模型初始化次数 |
|
|
# 模型初始化次数 |
|
|
model = 0 |
|
|
|
|
|
|
|
|
# model = 0 |
|
|
while True: |
|
|
while True: |
|
|
end_time = time.time() |
|
|
end_time = time.time() |
|
|
create_task_time = end_time - start_time |
|
|
create_task_time = end_time - start_time |
|
|
|
|
|
|
|
|
cv2tool.build_cv2() |
|
|
cv2tool.build_cv2() |
|
|
continue |
|
|
continue |
|
|
read_start_time = None |
|
|
read_start_time = None |
|
|
if mod is None and model == 0: |
|
|
|
|
|
model += 1 |
|
|
|
|
|
logger.info("初始化模型: {}次, requestId: {}", model, self.msg.get("request_id")) |
|
|
|
|
|
mod, model_type_code, modelConfig = mod_thread.get_result() |
|
|
|
|
|
|
|
|
# if mod is None and model == 0: |
|
|
|
|
|
# model += 1 |
|
|
|
|
|
# logger.info("初始化模型: {}次, requestId: {}", model, self.msg.get("request_id")) |
|
|
|
|
|
# mod, model_type_code, modelConfig = mod_thread.get_result() |
|
|
# time00 = time.time() |
|
|
# time00 = time.time() |
|
|
# 调用AI模型 |
|
|
# 调用AI模型 |
|
|
p_result, timeOut = mod.process(copy.deepcopy(frame), modelConfig) |
|
|
p_result, timeOut = mod.process(copy.deepcopy(frame), modelConfig) |
|
|
|
|
|
|
|
|
LogUtils.init_log(self.content) |
|
|
LogUtils.init_log(self.content) |
|
|
# 程序开始时间 |
|
|
# 程序开始时间 |
|
|
start_time = time.time() |
|
|
start_time = time.time() |
|
|
mod_thread = Common(None, func=get_model, args=(self.config, str(self.gpu_ids[0]), self.msg["models"])) |
|
|
|
|
|
mod_thread.setDaemon(True) |
|
|
|
|
|
mod_thread.start() |
|
|
|
|
|
mod = None |
|
|
|
|
|
model_type_code = None |
|
|
|
|
|
modelConfig = None |
|
|
|
|
|
|
|
|
mod, model_type_code, modelConfig = get_model((self.config, str(self.gpu_ids[0]), self.msg["models"])) |
|
|
|
|
|
# mod_thread = Common(None, func=get_model, args=(self.config, str(self.gpu_ids[0]), self.msg["models"])) |
|
|
|
|
|
# mod_thread.setDaemon(True) |
|
|
|
|
|
# mod_thread.start() |
|
|
|
|
|
# mod = None |
|
|
|
|
|
# model_type_code = None |
|
|
|
|
|
# modelConfig = None |
|
|
# 创建心跳队列 |
|
|
# 创建心跳队列 |
|
|
hbQueue = Queue() |
|
|
hbQueue = Queue() |
|
|
# 结果反馈进程启动 |
|
|
# 结果反馈进程启动 |
|
|
|
|
|
|
|
|
high_score_image = {} |
|
|
high_score_image = {} |
|
|
step = int(self.content["service"]["frame_step"]) |
|
|
step = int(self.content["service"]["frame_step"]) |
|
|
# 模型初始化速度 |
|
|
# 模型初始化速度 |
|
|
model = 0 |
|
|
|
|
|
|
|
|
# model = 0 |
|
|
# 总视频帧数 |
|
|
# 总视频帧数 |
|
|
all_f = None |
|
|
all_f = None |
|
|
if cv2tool.cap is not None: |
|
|
if cv2tool.cap is not None: |
|
|
|
|
|
|
|
|
logger.info("任务开始结束分析, requestId: {}", self.msg.get("request_id")) |
|
|
logger.info("任务开始结束分析, requestId: {}", self.msg.get("request_id")) |
|
|
self.stop_task(cv2tool, aiFilePath, AnalysisStatus.SUCCESS.value) |
|
|
self.stop_task(cv2tool, aiFilePath, AnalysisStatus.SUCCESS.value) |
|
|
break |
|
|
break |
|
|
if mod is None and model == 0: |
|
|
|
|
|
model += 1 |
|
|
|
|
|
logger.info("初始化模型: {}次, requestId: {}", model, self.msg.get("request_id")) |
|
|
|
|
|
mod, model_type_code, modelConfig = mod_thread.get_result() |
|
|
|
|
|
|
|
|
# if mod is None and model == 0: |
|
|
|
|
|
# model += 1 |
|
|
|
|
|
# logger.info("初始化模型: {}次, requestId: {}", model, self.msg.get("request_id")) |
|
|
|
|
|
# mod, model_type_code, modelConfig = mod_thread.get_result() |
|
|
# time00 = time.time() |
|
|
# time00 = time.time() |
|
|
# 调用AI模型 |
|
|
# 调用AI模型 |
|
|
p_result, timeOut = mod.process(copy.deepcopy(frame), modelConfig) |
|
|
p_result, timeOut = mod.process(copy.deepcopy(frame), modelConfig) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_model(args): |
|
|
def get_model(args): |
|
|
|
|
|
logger.info("######################开始加载模型######################") |
|
|
for model in args[2]: |
|
|
for model in args[2]: |
|
|
try: |
|
|
try: |
|
|
code = model.get("code") |
|
|
code = model.get("code") |
|
|
needed_objectsIndex = [int(category.get("id")) for category in model.get("categories")] |
|
|
needed_objectsIndex = [int(category.get("id")) for category in model.get("categories")] |
|
|
logger.info("code:{}, 检查目标:{}, gpuId:{}", code, needed_objectsIndex, args[1]) |
|
|
logger.info("code:{}, 检查目标:{}, gpuId:{}", code, needed_objectsIndex, args[1]) |
|
|
if code == ModelType.WATER_SURFACE_MODEL.value[1]: |
|
|
if code == ModelType.WATER_SURFACE_MODEL.value[1]: |
|
|
return ModelUtils.SZModel(args[1], needed_objectsIndex), code, args[0].get("sz") |
|
|
|
|
|
|
|
|
logger.info("######################加载河道模型######################") |
|
|
|
|
|
mod, model_type_code, modelConfig = ModelUtils.SZModel(args[1], needed_objectsIndex), code, args[0].get("sz") |
|
|
|
|
|
return mod, model_type_code, modelConfig |
|
|
elif code == ModelType.FOREST_FARM_MODEL.value[1]: |
|
|
elif code == ModelType.FOREST_FARM_MODEL.value[1]: |
|
|
|
|
|
logger.info("######################加载林场模型######################") |
|
|
return ModelUtils.LCModel(args[1], needed_objectsIndex), code, args[0].get("lc") |
|
|
return ModelUtils.LCModel(args[1], needed_objectsIndex), code, args[0].get("lc") |
|
|
else: |
|
|
else: |
|
|
logger.error("未匹配到对应的模型") |
|
|
logger.error("未匹配到对应的模型") |
|
|
raise ServiceException(ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[0], |
|
|
raise ServiceException(ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[0], |
|
|
ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[1]) |
|
|
ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[1]) |
|
|
except Exception as e: |
|
|
except Exception as e: |
|
|
logger.error("获取模型配置异常:") |
|
|
|
|
|
logger.exception(e) |
|
|
|
|
|
|
|
|
logger.exception("获取模型配置异常: {}", e) |
|
|
raise ServiceException(ExceptionType.AI_MODEL_CONFIG_EXCEPTION.value[0], |
|
|
raise ServiceException(ExceptionType.AI_MODEL_CONFIG_EXCEPTION.value[0], |
|
|
ExceptionType.AI_MODEL_CONFIG_EXCEPTION.value[1]) |
|
|
ExceptionType.AI_MODEL_CONFIG_EXCEPTION.value[1]) |