Selaa lähdekoodia

更新

tags/V2.6.0^2
chenyukun 1 vuosi sitten
vanhempi
commit
346b4922dd
6 muutettua tiedostoa jossa 18 lisäystä ja 16 poistoa
  1. +2
    -2
      .idea/deployment.xml
  2. +4
    -2
      .idea/workspace.xml
  3. +2
    -2
      concurrency/IntelligentRecognitionProcess.py
  4. +2
    -2
      dsp_application.yml
  5. +2
    -2
      service/Dispatcher.py
  6. +6
    -6
      util/ModelUtils.py

+ 2
- 2
.idea/deployment.xml Näytä tiedosto

@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="PublishConfigData" autoUpload="Always" serverName="10.21" remoteFilesAllowedToDisappearOnAutoupload="false">
<component name="PublishConfigData" autoUpload="Always" remoteFilesAllowedToDisappearOnAutoupload="false">
<serverData>
<paths name="10.21">
<serverdata>
@@ -42,7 +42,7 @@
<paths name="thsw2@192.168.10.66:22">
<serverdata>
<mappings>
<mapping deploy="/home/chenyukun/algSch" local="$PROJECT_DIR$" web="/" />
<mapping deploy="/home/chenyukun/dev/algSch" local="$PROJECT_DIR$" web="/" />
</mappings>
</serverdata>
</paths>

+ 4
- 2
.idea/workspace.xml Näytä tiedosto

@@ -5,9 +5,11 @@
</component>
<component name="ChangeListManager">
<list default="true" id="4f7dccd9-8f92-4a6e-90cc-33890d102263" name="Changes" comment="Changes">
<change beforePath="$PROJECT_DIR$/.idea/deployment.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/deployment.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/enums/ModelTypeEnum.py" beforeDir="false" afterPath="$PROJECT_DIR$/enums/ModelTypeEnum.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/dsp_application.yml" beforeDir="false" afterPath="$PROJECT_DIR$/dsp_application.yml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/service/Dispatcher.py" beforeDir="false" afterPath="$PROJECT_DIR$/service/Dispatcher.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/util/ModelUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/ModelUtils.py" afterDir="false" />
</list>
<option name="SHOW_DIALOG" value="false" />
@@ -384,7 +386,7 @@
<workItem from="1672273700875" duration="1315000" />
<workItem from="1672295805200" duration="19000" />
<workItem from="1672709979593" duration="2445000" />
<workItem from="1672797232144" duration="20547000" />
<workItem from="1672797232144" duration="24690000" />
</task>
<servers />
</component>

+ 2
- 2
concurrency/IntelligentRecognitionProcess.py Näytä tiedosto

@@ -998,7 +998,7 @@ class PhotosIntelligentRecognitionProcess(IntelligentRecognitionProcess):
progress='0.0000',
analyse_time=TimeUtils.now_date_to_str())})
# 加载模型
mod, model_type_code = get_model((str(self.gpu_ids[0]), self.msg["models"]))
mod, model_type_code = get_model((str(self.gpu_ids[0]), self.msg["models"], self.msg.get("request_id")))
# 获取所有图片信息
imageUrls = self.msg.get("image_urls")
if model_type_code == ModelType.EPIDEMIC_PREVENTION_MODEL.value[1]:
@@ -1076,7 +1076,7 @@ def get_model(args):
logger.info("code:{}, 检查目标:{}, gpuId:{}", code, needed_objectsIndex, args[0])
model_method = model_config.get(code)
if model_method is not None:
return model_method(args[0], needed_objectsIndex, logger, args[2])
return model_method[0](args[0], needed_objectsIndex, logger, args[2]), model_method[1]
else:
logger.error("未匹配到对应的模型")
raise ServiceException(ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[0],

+ 2
- 2
dsp_application.yml Näytä tiedosto

@@ -92,9 +92,9 @@ gpu:
# 'random'- 随机订购可用的 GPU 设备 ID
# 'load'- 按负载递增排序可用的 GPU 设备 ID
# 'memory'- 通过升序内存使用来排序可用的 GPU 设备 ID
order: 'memory'
order: 'first'
# 获取可用gpu数量
limit: 1
limit: 10
# 最大负载
maxLoad: 0.8
# 最大内存

+ 2
- 2
service/Dispatcher.py Näytä tiedosto

@@ -279,7 +279,7 @@ class DispatcherService:
if 'start' == message.get("command"):
logger.info("开始实时分析")
gpu_ids = GPUtils.get_gpu_ids(self.content)
if gpu_ids is None or len(gpu_ids) == 0:
if gpu_ids is None or len(gpu_ids) == 0 or (0 not in gpu_ids and str(0) not in gpu_ids):
feedback = {
"feedback": message_feedback(message.get("request_id"),
AnalysisStatus.FAILED.value,
@@ -303,7 +303,7 @@ class DispatcherService:
if 'start' == message.get("command"):
logger.info("开始离线分析")
gpu_ids = GPUtils.get_gpu_ids(self.content)
if gpu_ids is None or len(gpu_ids) == 0:
if gpu_ids is None or len(gpu_ids) == 0 or (0 not in gpu_ids and str(0) not in gpu_ids):
feedback = {
"feedback": message_feedback(message.get("request_id"),
AnalysisStatus.FAILED.value,

+ 6
- 6
util/ModelUtils.py Näytä tiedosto

@@ -153,7 +153,7 @@ class LCModel:
'postFile': '../AIlib2/weights/forest/para.json' # 后处理参数文件
}
self.device = select_device(par.get('device'))
self.half = device.type != 'cpu' # half precision only supported on CUDA
self.half = self.device.type != 'cpu' # half precision only supported on CUDA
Detweights = par.get('Detweights')
if self.trtFlag_det:
log = trt.Logger(trt.Logger.ERROR)
@@ -224,7 +224,7 @@ class VehicleModel:
'postFile': '../AIlib2/weights/vehicle/para.json' # 后处理参数文件
}
self.device = select_device(par.get('device'))
self.half = device.type != 'cpu' # half precision only supported on CUDA
self.half = self.device.type != 'cpu' # half precision only supported on CUDA
Detweights = par.get('Detweights')
if self.trtFlag_det:
log = trt.Logger(trt.Logger.ERROR)
@@ -295,7 +295,7 @@ class PedestrianModel:
'postFile': '../AIlib2/weights/pedestrian/para.json' # 后处理参数文件
}
self.device = select_device(par.get('device'))
self.half = device.type != 'cpu' # half precision only supported on CUDA
self.half = self.device.type != 'cpu' # half precision only supported on CUDA
Detweights = par.get('Detweights')
if self.trtFlag_det:
log = trt.Logger(trt.Logger.ERROR)
@@ -366,7 +366,7 @@ class SmogfireModel:
'postFile': '../AIlib2/weights/smogfire/para.json' # 后处理参数文件
}
self.device = select_device(par.get('device'))
self.half = device.type != 'cpu' # half precision only supported on CUDA
self.half = self.device.type != 'cpu' # half precision only supported on CUDA
Detweights = par.get('Detweights')
if self.trtFlag_det:
log = trt.Logger(trt.Logger.ERROR)
@@ -544,8 +544,8 @@ class PlateMModel:
logger.info("########################加载车牌模型########################, requestId:{}", requestId)
self.allowedList = allowedList
self.img_type = 'plate' ## code,plate
self.par = {'code': {'weights': '../AIlib/weights/jkm/health_yolov5s_v3.jit', 'img_type': 'code', 'nc': 10},
'plate': {'weights': '../AIlib/weights/jkm/plate_yolov5s_v3.jit', 'img_type': 'plate', 'nc': 1},
self.par = {'code': {'weights': '../AIlib2/weights/jkm/health_yolov5s_v3.jit', 'img_type': 'code', 'nc': 10},
'plate': {'weights': '../AIlib2/weights/jkm/plate_yolov5s_v3.jit', 'img_type': 'plate', 'nc': 1},
'conf_thres': 0.4,
'iou_thres': 0.45,
#'device': 'cuda:%s' % device,

Loading…
Peruuta
Tallenna