選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

1040 行
58KB

  1. import sys, yaml
  2. from easydict import EasyDict as edict
  3. from concurrent.futures import ThreadPoolExecutor
  4. sys.path.extend(['..','../AIlib2' ])
  5. from AI import AI_process,AI_process_forest,get_postProcess_para,get_postProcess_para_dic,ocr_process,AI_det_track,AI_det_track_batch
  6. from stdc import stdcModel
  7. import cv2,os,time
  8. from segutils.segmodel import SegModel
  9. from segutils.trafficUtils import tracfficAccidentMixFunction
  10. from models.experimental import attempt_load
  11. from utils.torch_utils import select_device
  12. from utilsK.queRiver import get_labelnames,get_label_arrays,save_problem_images,riverDetSegMixProcess,draw_painting_joint
  13. from utilsK.drownUtils import mixDrowing_water_postprocess
  14. from ocrUtils.ocrUtils import CTCLabelConverter,AlignCollate
  15. from trackUtils.sort import Sort,track_draw_boxAndTrace,track_draw_trace_boxes,moving_average_wang,drawBoxTraceSimplied
  16. from obbUtils.load_obb_model import load_model_decoder_OBB
  17. from obbUtils.shipUtils import OBB_infer,draw_obb
  18. import numpy as np
  19. import torch,glob
  20. import tensorrt as trt
  21. from utilsK.masterUtils import get_needed_objectsIndex
  22. from utilsK.noParkingUtils import mixNoParking_road_postprocess
  23. from copy import deepcopy
  24. from scipy import interpolate
  25. #import warnings
  26. #warnings.filterwarnings("error")
  27. def view_bar(num, total,time1,prefix='prefix'):
  28. rate = num / total
  29. time_n=time.time()
  30. rate_num = int(rate * 30)
  31. rate_nums = np.round(rate * 100)
  32. r = '\r %s %d / %d [%s%s] %.2f s'%(prefix,num,total, ">" * rate_num, " " * (30 - rate_num), time_n-time1 )
  33. sys.stdout.write(r)
  34. sys.stdout.flush()
  35. '''
  36. 多线程
  37. '''
  38. def drawAllBox(preds,imgDraw,label_arraylist,rainbows,font):
  39. for box in preds:
  40. #cls,conf,xyxy = box[0],box[5], box[1:5]
  41. #print('#'*20,'line47',box)
  42. cls,conf,xyxy = box[5],box[4], box[0:4] ##2023.08.03,修改了格式
  43. #print('#####line46 demo.py:', cls,conf,xyxy, len(label_arraylist),len(rainbows) )
  44. imgDraw = draw_painting_joint(xyxy,imgDraw,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font,socre_location="leftBottom")
  45. return imgDraw
  46. def process_v1(frame):
  47. #try:
  48. time00 = time.time()
  49. H,W,C = frame[0][0].shape
  50. #frmess---- (im0s,model,segmodel,names,label_arraylist,rainbows,objectPar,digitFont,os.path.basename(imgpath),segPar,mode,postPar)
  51. #p_result[1] = draw_painting_joint(xyxy,p_result[1],label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font,socre_location="leftBottom")
  52. p_result,timeOut = AI_process(frame[0],frame[1],frame[2],frame[3],frame[4],frame[5],objectPar=frame[6],font=frame[7],segPar=frame[9],mode=frame[10],postPar=frame[11])
  53. #print('##'*20,'line64:',p_result[2])
  54. p_result[1] = drawAllBox(p_result[2],p_result[1],frame[4],frame[5],frame[7])
  55. time11 = time.time()
  56. image_array = p_result[1]
  57. cv2.imwrite(os.path.join('images/results/',frame[8] ) ,image_array)
  58. bname = frame[8].split('.')[0]
  59. if frame[2]:
  60. if len(p_result)==5:
  61. image_mask = p_result[4]
  62. if isinstance(image_mask,np.ndarray) and image_mask.shape[0]>0:
  63. cv2.imwrite(os.path.join('images/results/',bname+'_mask.png' ) , (image_mask).astype(np.uint8))
  64. boxes=p_result[2]
  65. with open( os.path.join('images/results/',bname+'.txt' ),'w' ) as fp:
  66. for box in boxes:
  67. box_str=[str(x) for x in box]
  68. out_str=','.join(box_str)+'\n'
  69. fp.write(out_str)
  70. time22 = time.time()
  71. print('%s,%d*%d,AI-process: %.1f,image save:%.1f , %s'%(frame[8],H,W, (time11 - time00) * 1000.0, (time22-time11)*1000.0,timeOut))
  72. return 'success'
  73. #except Exception as e:
  74. # return 'failed:'+str(e)
  75. def process_video(video,par0,mode='detSeg'):
  76. cap=cv2.VideoCapture(video)
  77. if not cap.isOpened():
  78. print('#####error url:',video)
  79. return False
  80. bname=os.path.basename(video).split('.')[0]
  81. fps = int(cap.get(cv2.CAP_PROP_FPS)+0.5)
  82. width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH )+0.5)
  83. height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)+0.5)
  84. framecnt=int(cap.get(7)+0.5)
  85. save_path_AI = os.path.join(par0['outpth'],os.path.basename(video))
  86. problem_image_dir= os.path.join( par0['outpth'], 'probleImages' )
  87. os.makedirs(problem_image_dir,exist_ok=True)
  88. vid_writer_AI = cv2.VideoWriter(save_path_AI, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width,height))
  89. num=0
  90. iframe=0;post_results=[];fpsample=30*10
  91. imgarray_list = []; iframe_list = []
  92. #patch_cnt = par0['trackPar']['patchCnt']
  93. ##windowsize 对逐帧插值后的结果做平滑,windowsize为平滑的长度,没隔det_cnt帧做一次跟踪。
  94. trackPar={'det_cnt':10,'windowsize':29 }
  95. ##track_det_result_update= np.empty((0,8)) ###每100帧跑出来的结果,放在track_det_result_update,只保留当前100帧里有的tracker Id.
  96. while cap.isOpened():
  97. ret, imgarray = cap.read() #读取摄像头画面
  98. iframe +=1
  99. if not ret:break
  100. if mode=='detSeg':
  101. p_result,timeOut = AI_process([imgarray],par0['model'],par0['segmodel'],par0['names'],par0['label_arraylist'],par0['rainbows'],objectPar=par0['objectPar'],font=par0['digitFont'],segPar=par0['segPar'])
  102. else:
  103. p_result,timeOut = AI_process_forest([imgarray],par0['model'],par0['segmodel'],par0['names'],par0['label_arraylist'],par0['rainbows'],par0['half'],par0['device'],par0['conf_thres'], par0['iou_thres'],par0['allowedList'],font=par0['digitFont'],trtFlag_det=par0['trtFlag_det'])
  104. p_result[1] = drawAllBox(p_result[2],p_result[1],par0['label_arraylist'],par0['rainbows'],par0['digitFont'])
  105. if mode != 'track':
  106. image_array = p_result[1];num+=1
  107. ret = vid_writer_AI.write(image_array)
  108. view_bar(num, framecnt,time.time(),prefix=os.path.basename(video))
  109. ##每隔 fpsample帧处理一次,如果有问题就保存图片
  110. if (iframe % fpsample == 0) and (len(post_results)>0) :
  111. parImage=save_problem_images(post_results,iframe,par0['names'],streamName=bname,outImaDir=problem_image_dir,imageTxtFile=False)
  112. post_results=[]
  113. if len(p_result[2] )>0:
  114. post_results.append(p_result)
  115. vid_writer_AI.release();
  116. def detSeg_demo(opt):
  117. if opt['business'] == 'river':
  118. par={
  119. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  120. 'labelnames':"../AIlib2/weights/conf/river/labelnames.json", ###检测类别对照表
  121. 'max_workers':1, ###并行线程数
  122. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  123. #'Detweights':"/mnt/thsw2/DSP2/weights/river/yolov5.pt",###检测模型路径
  124. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6,7,8,9] ],###控制哪些检测类别显示,输出
  125. 'seg_nclass':2,###分割模型类别数目,默认2类
  126. 'segRegionCnt':1,###分割模型结果需要保留的等值线数目
  127. 'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True,#分割模型预处理参数
  128. 'mixFunction':{'function':riverDetSegMixProcess,'pars':{'slopeIndex':[1,3,4,7], 'riverIou':0.1}} #分割和检测混合处理的函数
  129. },
  130. 'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  131. 'postFile': '../AIlib2/weights/conf/river/para.json',###后处理参数文件
  132. 'txtFontSize':40,###文本字符的大小
  133. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':3},###显示框、线、数字设置
  134. 'testImgPath':'/mnt/thsw2/DSP2/videos/river/',
  135. 'testOutPath':'images/results/',###输出测试图像位置
  136. }
  137. if opt['business'] == 'river2':
  138. par={
  139. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  140. 'labelnames':"../AIlib2/weights/conf/river2/labelnames.json", ###检测类别对照表
  141. 'max_workers':1, ###并行线程数
  142. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  143. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6,7,8,9] ],###控制哪些检测类别显示,输出
  144. 'seg_nclass':2,###分割模型类别数目,默认2类
  145. 'segRegionCnt':1,###分割模型结果需要保留的等值线数目
  146. 'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True,#分割模型预处理参数
  147. 'mixFunction':{'function':riverDetSegMixProcess,'pars':{'slopeIndex':[1,3,4,7], 'riverIou':0.1}} #分割和检测混合处理的函数
  148. },
  149. 'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  150. 'postFile': '../AIlib2/weights/conf/river2/para.json',###后处理参数文件
  151. 'txtFontSize':40,###文本字符的大小
  152. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':3},###显示框、线、数字设置
  153. 'testImgPath':'images/river2/',
  154. 'testOutPath':'images/results/',###输出测试图像位置
  155. }
  156. if opt['business'] == 'highWay2':
  157. par={
  158. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  159. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  160. 'max_workers':1, ###并行线程数
  161. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  162. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
  163. 'seg_nclass':3,###分割模型类别数目,默认2类
  164. 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
  165. 'segPar':{'modelSize':(1920,1080),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数
  166. 'mixFunction':{'function':tracfficAccidentMixFunction,
  167. 'pars':{ 'RoadArea': 16000, 'roadVehicleAngle': 15, 'speedRoadVehicleAngleMax': 75, 'roundness': 1.0, 'cls': 9, 'vehicleFactor': 0.1, 'confThres':0.25,'roadIou':0.6,'radius': 50 ,'vehicleFlag':False,'distanceFlag': False}
  168. }
  169. },
  170. #'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  171. 'Segweights' : "../weights/%s/AIlib2/%s/stdc_1080X1920_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  172. #'Segweights' :'/mnt/thsw2/DSP2/weights/highWay2/stdc_360X640.pth',
  173. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
  174. 'txtFontSize':20,###文本字符的大小
  175. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
  176. 'testImgPath':'images/highWayTest/',###测试图像的位置
  177. #'testImgPath':'images/tt',
  178. 'testOutPath':'images/results/',###输出测试图像位置
  179. }
  180. par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize']
  181. if opt['business'] == 'drowning':
  182. par={
  183. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  184. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  185. 'max_workers':1, ###并行线程数
  186. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  187. #'Detweights':"/mnt/thsw2/DSP2/weights/drowning/yolov5.pt",
  188. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
  189. 'seg_nclass':2,###分割模型类别数目,默认2类
  190. 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
  191. 'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数
  192. 'mixFunction':{'function':mixDrowing_water_postprocess,
  193. 'pars':{ }
  194. }
  195. },
  196. 'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  197. #'Segweights' : "/mnt/thsw2/DSP2/weights/drowning/stdc_360X640_2080Ti_fp16.engine",
  198. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
  199. 'txtFontSize':20,###文本字符的大小
  200. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
  201. #'testImgPath':'/mnt/thsw2/DSP2/videos/drowning/',###测试图像的位置
  202. 'testImgPath':'images/drowning/',
  203. 'testOutPath':'images/results/',###输出测试图像位置
  204. }
  205. par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize']
  206. if opt['business'] == 'noParking':
  207. par={
  208. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  209. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  210. 'max_workers':1, ###并行线程数
  211. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  212. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
  213. 'seg_nclass':4,###分割模型类别数目,默认2类
  214. 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
  215. 'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数
  216. 'mixFunction':{'function':mixNoParking_road_postprocess,
  217. 'pars':{ 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5 ,'RoadArea': 16000,'fitOrder':2}
  218. }
  219. },
  220. 'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  221. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
  222. 'txtFontSize':20,###文本字符的大小
  223. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
  224. 'testImgPath':'images/noParking/',###测试图像的位置
  225. 'testOutPath':'images/results/',###输出测试图像位置
  226. }
  227. par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize']
  228. if opt['business'] == 'illParking':
  229. from utilsK.illParkingUtils import illParking_postprocess
  230. par={
  231. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  232. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  233. 'max_workers':1, ###并行线程数
  234. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  235. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
  236. 'seg_nclass':4,###分割模型类别数目,默认2类
  237. 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
  238. 'segPar':{
  239. 'mixFunction':{'function':illParking_postprocess,
  240. 'pars':{ }
  241. }
  242. },
  243. 'Segweights' : None,###分割模型权重位置
  244. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
  245. 'txtFontSize':20,###文本字符的大小
  246. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
  247. 'testImgPath':'images/cityMangement',###测试图像的位置
  248. 'testOutPath':'images/results/',###输出测试图像位置
  249. }
  250. if opt['business'] == 'cityMangement2':
  251. from DMPR import DMPRModel
  252. from DMPRUtils.jointUtil import dmpr_yolo
  253. par={
  254. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  255. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  256. 'max_workers':1, ###并行线程数
  257. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  258. #'Detweights':"../AIlib2/weights/conf/cityMangement2/yolov5.pt",###检测模型路径
  259. #'Detweights':"/mnt/thsw2/DSP2/weights/cityMangement2/weights/urbanManagement/yolo/best.pt",
  260. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
  261. 'seg_nclass':4,###分割模型类别数目,默认2类
  262. 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
  263. 'segPar':{ 'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.3, 'dmprimg_size':640,
  264. 'mixFunction':{'function':dmpr_yolo,
  265. 'pars':{'carCls':0 ,'illCls':3,'scaleRatio':0.5,'border':80}
  266. }
  267. },
  268. #'Segweights' : '/mnt/thsw2/DSP2/weights/cityMangement2/weights/urbanManagement/DMPR/dp_detector_499.engine',###分割模型权重位置
  269. 'Segweights':"../weights/%s/AIlib2/%s/dmpr_%s.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  270. #'Segweights':"../AIlib2/weights/conf/cityMangement2/dmpr.pth",###检测模型路径
  271. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
  272. 'txtFontSize':20,###文本字符的大小
  273. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
  274. #'testImgPath':'/mnt/thsw2/DSP2/demoImages/illParking',###测试图像的位置
  275. #'testImgPath':'/mnt/thsw2/DSP2/weights/cityMangement2_0916/images/input',
  276. 'testImgPath':'images/cityMangement2/',
  277. 'testOutPath':'images/results/',###输出测试图像位置
  278. }
  279. if par['Segweights']:
  280. par['trtFlag_seg']=True if par['Segweights'].endswith('.engine') else False
  281. else:
  282. par['trtFlag_seg']=False
  283. par['trtFlag_det']=True if par['Detweights'].endswith('.engine') else False
  284. mode = par['mode'] if 'mode' in par.keys() else 'others'
  285. postPar = par['postPar'] if 'postPar' in par.keys() else None
  286. device_=par['device']
  287. labelnames = par['labelnames'] ##对应类别表
  288. max_workers=par['max_workers'];
  289. trtFlag_det=par['trtFlag_det'];trtFlag_seg=par['trtFlag_seg'];segRegionCnt=par['segRegionCnt']
  290. device = select_device(device_)
  291. names=get_labelnames(labelnames)
  292. half = device.type != 'cpu' # half precision only supported on CUDA
  293. if trtFlag_det:
  294. Detweights = par['Detweights']##升级后的检测模型
  295. logger = trt.Logger(trt.Logger.ERROR)
  296. with open(Detweights, "rb") as f, trt.Runtime(logger) as runtime:
  297. model=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象
  298. print('############locad det model trtsuccess:',Detweights)
  299. else:
  300. Detweights = par['Detweights']
  301. model = attempt_load(Detweights, map_location=device) # load FP32 model
  302. print('############locad det model pth success:',Detweights)
  303. if half: model.half()
  304. par['segPar']['seg_nclass'] = par['seg_nclass']
  305. segPar=par['segPar']
  306. if par['Segweights']:
  307. if opt['business'] == 'cityMangement2':
  308. segmodel = DMPRModel(weights=par['Segweights'], par = par['segPar'])
  309. else:
  310. segmodel = stdcModel(weights=par['Segweights'], par = par['segPar'])
  311. else:
  312. segmodel= None
  313. print('############None seg model is loaded###########:' )
  314. postFile= par['postFile']
  315. digitFont= par['digitFont']
  316. #conf_thres,iou_thres,classes,rainbows=get_postProcess_para(postFile)
  317. detPostPar = get_postProcess_para_dic(postFile)
  318. conf_thres,iou_thres,classes,rainbows = detPostPar["conf_thres"],detPostPar["iou_thres"],detPostPar["classes"],detPostPar["rainbows"]
  319. if 'ovlap_thres_crossCategory' in detPostPar.keys(): ovlap_thres_crossCategory=detPostPar['ovlap_thres_crossCategory']
  320. else:ovlap_thres_crossCategory = None
  321. if 'score_byClass' in detPostPar.keys(): score_byClass=detPostPar['score_byClass']
  322. else: score_byClass = None
  323. ####模型选择参数用如下:
  324. mode_paras=par['detModelpara']
  325. allowedList,allowedList_string=get_needed_objectsIndex(mode_paras)
  326. #allowedList=[0,1,2,3]
  327. ##加载模型,准备好显示字符
  328. label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['txtFontSize'],fontpath="../AIlib2/conf/platech.ttf")
  329. ##图像测试
  330. #impth = 'images/slope/'
  331. impth = par['testImgPath']
  332. outpth = par['testOutPath']
  333. imgpaths=[]###获取文件里所有的图像
  334. for postfix in ['.jpg','.JPG','.PNG','.png']:
  335. imgpaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  336. videopaths=[]###获取文件里所有的视频
  337. for postfix in ['.MP4','.mp4','.avi']:
  338. videopaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  339. ###先处理图像
  340. frames=[]
  341. for imgpath in imgpaths:
  342. im0s=[cv2.imread(imgpath)]
  343. objectPar={ 'half':half,'device':device,'conf_thres':conf_thres,'ovlap_thres_crossCategory':ovlap_thres_crossCategory,'iou_thres':iou_thres,'allowedList':allowedList,'segRegionCnt':segRegionCnt, 'trtFlag_det':trtFlag_det,'trtFlag_seg':trtFlag_seg ,'score_byClass':score_byClass}
  344. #p_result[1] = draw_painting_joint(xyxy,p_result[1],label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font,socre_location="leftBottom")
  345. frame=(im0s,model,segmodel,names,label_arraylist,rainbows,objectPar,digitFont,os.path.basename(imgpath),segPar,mode,postPar)
  346. frames.append(frame)
  347. t1=time.time()
  348. if max_workers==1:
  349. for i in range(len(imgpaths)):
  350. print('-'*20,imgpaths[i],'-'*20)
  351. t5=time.time()
  352. process_v1(frames[i])
  353. t6=time.time()
  354. #print('#######%s, ms:%.1f , accumetate time:%.1f, avage:%1.f '%(os.path.basename(imgpaths[i]), (t6-t5)*1000.0,(t6-t1)*1000.0, (t6-t1)*1000.0/(i+1)))
  355. else:
  356. with ThreadPoolExecutor(max_workers=max_workers) as t:
  357. for result in t.map(process_v1, frames):
  358. #print(result)
  359. t=result
  360. t2=time.time()
  361. if len(imgpaths)>0:
  362. print('All %d images time:%.1f ms ,each:%.1f ms, with %d threads'%(len(imgpaths),(t2-t1)*1000, (t2-t1)*1000.0/len(imgpaths) , max_workers) )
  363. objectPar={ 'half':half,'device':device,'conf_thres':conf_thres,'iou_thres':iou_thres,'allowedList':allowedList,'segRegionCnt':segRegionCnt, 'trtFlag_det':trtFlag_det,'trtFlag_seg':trtFlag_seg }
  364. par0={ 'model':model,'segmodel':segmodel,'names':names,'label_arraylist':label_arraylist,'rainbows':rainbows,
  365. 'objectPar':objectPar,'digitFont':digitFont,'segPar':segPar,'outpth':outpth
  366. }
  367. ###如果是视频文件
  368. for video in videopaths:
  369. process_video(video,par0)
  370. print(' ')
  371. def det_demo(business ):
  372. ####森林巡检的参数
  373. if opt['business'] == 'forest':
  374. par={
  375. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  376. 'labelnames':"../AIlib2/weights/conf/forest/labelnames.json", ###检测类别对照表
  377. 'gpuname':'3090',###显卡名称
  378. 'max_workers':1, ###并行线程数
  379. 'trtFlag_det':True,###检测模型是否采用TRT
  380. 'trtFlag_seg':False,###分割模型是否采用TRT
  381. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  382. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  383. 'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
  384. 'seg_nclass':2,###分割模型类别数目,默认2类
  385. 'segRegionCnt':0,###分割模型结果需要保留的等值线数目
  386. 'segPar':None,###分割模型预处理参数
  387. 'Segweights' : None,###分割模型权重位置
  388. 'postFile': '../AIlib2/weights/conf/forest/para.json',###后处理参数文件
  389. 'txtFontSize':80,###文本字符的大小
  390. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  391. 'testImgPath':'../AIdemo2/images/forest/',###测试图像的位置
  392. 'testOutPath':'images/results/',###输出测试图像位置
  393. }
  394. #
  395. if opt['business'] == 'forest2':
  396. par={
  397. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  398. 'labelnames':"../AIlib2/weights/conf/forest2/labelnames.json", ###检测类别对照表
  399. 'gpuname':opt['gpu'],###显卡名称
  400. 'max_workers':1, ###并行线程数
  401. 'trtFlag_det':True,###检测模型是否采用TRT
  402. 'trtFlag_seg':False,###分割模型是否采用TRT
  403. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  404. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  405. #'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [] ],
  406. 'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
  407. 'seg_nclass':2,###分割模型类别数目,默认2类
  408. 'segRegionCnt':0,###分割模型结果需要保留的等值线数目
  409. 'segPar':None,###分割模型预处理参数
  410. 'Segweights' : None,###分割模型权重位置
  411. 'postFile': '../AIlib2/weights/conf/forest/para.json',###后处理参数文件
  412. 'txtFontSize':80,###文本字符的大小
  413. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  414. 'testImgPath':'../AIdemo2/images/forest2/',###测试图像的位置
  415. 'testOutPath':'images/results/',###输出测试图像位置
  416. }
  417. ###车辆巡检参数
  418. if opt['business'] == 'vehicle':
  419. par={
  420. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  421. 'labelnames':"../AIlib2/weights/conf/vehicle/labelnames.json", ###检测类别对照表
  422. 'gpuname':'2080T',###显卡名称
  423. 'max_workers':1, ###并行线程数
  424. 'trtFlag_det':True,###检测模型是否采用TRT
  425. 'trtFlag_seg':False,###分割模型是否采用TRT
  426. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  427. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  428. 'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
  429. 'seg_nclass':2,###分割模型类别数目,默认2类
  430. 'segRegionCnt':0,###分割模型结果需要保留的等值线数目
  431. 'segPar':None,###分割模型预处理参数
  432. 'Segweights' : None,###分割模型权重位置
  433. 'postFile': '../AIlib2/weights/conf/vehicle/para.json',###后处理参数文件
  434. 'txtFontSize':40,###文本字符的大小
  435. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  436. 'testImgPath':'../AIdemo2/images/vehicle/',###测试图像的位置
  437. 'testOutPath':'images/results/',###输出测试图像位置
  438. }
  439. ###行人检测模型
  440. if opt['business'] == 'pedestrian':
  441. par={
  442. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  443. 'labelnames':"../AIlib2/weights/conf/pedestrian/labelnames.json", ###检测类别对照表
  444. 'gpuname':'2080T',###显卡名称
  445. 'max_workers':1, ###并行线程数
  446. 'trtFlag_det':True,###检测模型是否采用TRT
  447. 'trtFlag_seg':False,###分割模型是否采用TRT
  448. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  449. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  450. 'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
  451. 'seg_nclass':2,###分割模型类别数目,默认2类
  452. 'segRegionCnt':0,###分割模型结果需要保留的等值线数目
  453. 'segPar':None,###分割模型预处理参数
  454. 'Segweights' : None,###分割模型权重位置
  455. 'postFile': '../AIlib2/weights/conf/pedestrian/para.json',###后处理参数文件
  456. 'txtFontSize':40,###文本字符的大小
  457. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  458. 'testImgPath':'../AIdemo2/images/pedestrian/',###测试图像的位置
  459. 'testOutPath':'images/results/',###输出测试图像位置
  460. }
  461. ###烟雾火焰检测模型
  462. if opt['business'] == 'smogfire':
  463. par={
  464. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  465. 'labelnames':"../AIlib2/weights/conf/smogfire/labelnames.json", ###检测类别对照表
  466. 'gpuname':'2080T',###显卡名称
  467. 'max_workers':1, ###并行线程数
  468. 'trtFlag_det':True,###检测模型是否采用TRT
  469. 'trtFlag_seg':False,###分割模型是否采用TRT
  470. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  471. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  472. 'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
  473. 'seg_nclass':2,###没有分割模型,此处不用
  474. 'segRegionCnt':0,###没有分割模型,此处不用
  475. 'segPar':None,###分割模型预处理参数
  476. 'Segweights' : None,###分割模型权重位置
  477. 'postFile': '../AIlib2/weights/conf/smogfire/para.json',###后处理参数文件
  478. 'txtFontSize':40,###文本字符的大小
  479. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  480. 'testImgPath':'../AIdemo2/images/smogfire/',###测试图像的位置
  481. 'testOutPath':'images/results/',###输出测试图像位置
  482. }
  483. ###钓鱼游泳检测
  484. if opt['business'] == 'AnglerSwimmer':
  485. par={
  486. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  487. 'labelnames':"../AIlib2/weights/conf/AnglerSwimmer/labelnames.json", ###检测类别对照表
  488. 'gpuname':'2080T',###显卡名称
  489. 'max_workers':1, ###并行线程数
  490. 'trtFlag_det':True,###检测模型是否采用TRT
  491. 'trtFlag_seg':False,###分割模型是否采用TRT
  492. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  493. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  494. 'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
  495. 'seg_nclass':2,###没有分割模型,此处不用
  496. 'segRegionCnt':0,###没有分割模型,此处不用
  497. 'segPar':None,###分割模型预处理参数
  498. 'Segweights' : None,###分割模型权重位置
  499. 'postFile': '../AIlib2/weights/conf/AnglerSwimmer/para.json',###后处理参数文件
  500. 'txtFontSize':40,###文本字符的大小
  501. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  502. 'testImgPath':'../AIdemo2/images/AnglerSwimmer/',###测试图像的位置
  503. 'testOutPath':'images/results/',###输出测试图像位置
  504. }
  505. ###航道应急,做落水人员检测, channelEmergency
  506. if opt['business'] == 'channelEmergency':
  507. par={
  508. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  509. 'labelnames':"../AIlib2/weights/conf/channelEmergency/labelnames.json", ###检测类别对照表
  510. 'gpuname':'2080T',###显卡名称
  511. 'max_workers':1, ###并行线程数
  512. 'trtFlag_det':True,###检测模型是否采用TRT
  513. 'trtFlag_seg':False,###分割模型是否采用TRT
  514. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  515. #'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  516. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [] ],###控制哪些检测类别显示、输出
  517. 'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
  518. 'seg_nclass':2,###没有分割模型,此处不用
  519. 'segRegionCnt':0,###没有分割模型,此处不用
  520. 'segPar':None,###分割模型预处理参数
  521. 'Segweights' : None,###分割模型权重位置
  522. 'postFile': '../AIlib2/weights/conf/channelEmergency/para.json',###后处理参数文件
  523. 'txtFontSize':40,###文本字符的大小
  524. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  525. 'testImgPath':'../AIdemo2/images/channelEmergency/',###测试图像的位置
  526. 'testOutPath':'images/results/',###输出测试图像位置
  527. }
  528. ###乡村路违法种植
  529. if opt['business'] == 'countryRoad':
  530. par={
  531. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  532. 'labelnames':"../AIlib2/weights/conf/countryRoad/labelnames.json", ###检测类别对照表
  533. 'gpuname':'2080T',###显卡名称
  534. 'max_workers':1, ###并行线程数
  535. 'trtFlag_det':True,###检测模型是否采用TRT
  536. 'trtFlag_seg':False,###分割模型是否采用TRT
  537. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  538. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  539. 'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
  540. 'seg_nclass':2,###没有分割模型,此处不用
  541. 'segRegionCnt':0,###没有分割模型,此处不用
  542. 'segPar':None,###分割模型预处理参数
  543. 'Segweights' : None,###分割模型权重位置
  544. 'postFile': '../AIlib2/weights/conf/countryRoad/para.json',###后处理参数文件
  545. 'txtFontSize':40,###文本字符的大小
  546. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  547. 'testImgPath':'../AIdemo2/images/countryRoad/',###测试图像的位置
  548. 'testOutPath':'images/results/',###输出测试图像位置
  549. }
  550. ###河道上大型船只
  551. if opt['business'] == 'ship':
  552. par={
  553. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  554. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  555. 'gpuname':'2080T',###显卡名称
  556. 'max_workers':1, ###并行线程数
  557. 'trtFlag_det':True,###检测模型是否采用TRT
  558. 'trtFlag_seg':False,###分割模型是否采用TRT
  559. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  560. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  561. 'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
  562. 'seg_nclass':2,###没有分割模型,此处不用
  563. 'segRegionCnt':0,###没有分割模型,此处不用
  564. 'segPar':None,###分割模型预处理参数
  565. 'Segweights' : None,###分割模型权重位置
  566. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business']),###后处理参数文件
  567. 'txtFontSize':40,###文本字符的大小
  568. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  569. 'testImgPath':'../../../data/XunHe/shipData/',###测试图像的位置
  570. 'testOutPath':'images/results/',###输出测试图像位置
  571. }
  572. ###城管项目,检测城市垃圾和车辆
  573. if opt['business'] == 'cityMangement':
  574. par={
  575. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  576. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  577. 'gpuname':'2080Ti',###显卡名称
  578. 'max_workers':1, ###并行线程数
  579. 'trtFlag_det':True,###检测模型是否采用TRT
  580. 'trtFlag_seg':False,###分割模型是否采用TRT
  581. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  582. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  583. 'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
  584. 'seg_nclass':2,###没有分割模型,此处不用
  585. 'segRegionCnt':0,###没有分割模型,此处不用
  586. 'segPar':None,###分割模型预处理参数
  587. 'Segweights' : None,###分割模型权重位置
  588. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business']),###后处理参数文件
  589. 'txtFontSize':40,###文本字符的大小
  590. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  591. 'testImgPath':'images/tmp',###测试图像的位置
  592. 'testOutPath':'images/results/',###输出测试图像位置
  593. }
  594. ###城管项目,检测道路情况,输入类别为五个:"护栏","交通标志","非交通标志","施工","施工“(第4,第5类别合并,名称相同)
  595. ###实际模型检测输出的类别为:"护栏","交通标志","非交通标志","锥桶","水马"
  596. if opt['business'] == 'cityRoad':
  597. par={
  598. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  599. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  600. 'gpuname':'2080Ti',###显卡名称
  601. 'max_workers':1, ###并行线程数
  602. 'trtFlag_det':True,###检测模型是否采用TRT
  603. 'trtFlag_seg':False,###分割模型是否采用TRT
  604. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  605. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  606. 'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
  607. 'seg_nclass':2,###没有分割模型,此处不用
  608. 'segRegionCnt':0,###没有分割模型,此处不用
  609. 'segPar':None,###分割模型预处理参数
  610. 'Segweights' : None,###分割模型权重位置
  611. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business']),###后处理参数文件
  612. 'txtFontSize':40,###文本字符的大小
  613. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  614. 'testImgPath':'images/%s'%(opt['business'] ),###测试图像的位置
  615. 'testOutPath':'images/results/',###输出测试图像位置
  616. }
  617. #segRegionCnt=par['segRegionCnt']
  618. trtFlag_seg = par['trtFlag_seg'];segPar=par['segPar']
  619. ##使用森林,道路模型,business 控制['forest','road']
  620. ##预先设置的参数
  621. gpuname=par['gpuname']#如果用trt就需要此参数,只能是"3090" "2080Ti"
  622. device_=par['device'] ##选定模型,可选 cpu,'0','1'
  623. device = select_device(device_)
  624. half = device.type != 'cpu' # half precision only supported on CUDA
  625. trtFlag_det=par['trtFlag_det'] ###是否采用TRT模型加速
  626. ##以下参数目前不可改
  627. imageW=1536 ####道路模型
  628. digitFont= par['digitFont']
  629. if trtFlag_det:
  630. Detweights=par['Detweights']
  631. logger = trt.Logger(trt.Logger.ERROR)
  632. with open(Detweights, "rb") as f, trt.Runtime(logger) as runtime:
  633. model=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象
  634. print('####load TRT model :%s'%(Detweights))
  635. else:
  636. Detweights=par['Detweights']
  637. model = attempt_load(Detweights, map_location=device) # load FP32 model
  638. if half: model.half()
  639. labelnames = par['labelnames']
  640. postFile= par['postFile']
  641. print( Detweights,labelnames )
  642. #conf_thres,iou_thres,classes,rainbows=get_postProcess_para(postFile)
  643. detPostPar = get_postProcess_para_dic(postFile)
  644. conf_thres,iou_thres,classes,rainbows = detPostPar["conf_thres"],detPostPar["iou_thres"],detPostPar["classes"],detPostPar["rainbows"]
  645. if 'ovlap_thres_crossCategory' in detPostPar.keys(): ovlap_thres_crossCategory=detPostPar['ovlap_thres_crossCategory']
  646. else:ovlap_thres_crossCategory = None
  647. ####模型选择参数用如下:
  648. mode_paras=par['detModelpara']
  649. allowedList,allowedList_string=get_needed_objectsIndex(mode_paras)
  650. slopeIndex = par['slopeIndex']
  651. ##只加载检测模型,准备好显示字符
  652. names=get_labelnames(labelnames)
  653. #imageW=4915;###默认是1920,在森林巡检的高清图像中是4920
  654. outfontsize=int(imageW/1920*40);###
  655. label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['txtFontSize'],fontpath="../AIlib2/conf/platech.ttf")
  656. segmodel = None
  657. ##图像测试
  658. #url='images/examples/20220624_响水河_12300_1621.jpg'
  659. impth = par['testImgPath']
  660. outpth = par['testOutPath']
  661. imgpaths=[]###获取文件里所有的图像
  662. for postfix in ['.jpg','.JPG','.PNG','.png']:
  663. imgpaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  664. videopaths=[]###获取文件里所有的视频
  665. for postfix in ['.MP4','.mp4','.avi']:
  666. videopaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  667. imgpaths.sort()
  668. for i in range(len(imgpaths)):
  669. #for i in range(2):
  670. #imgpath = os.path.join(impth, folders[i])
  671. imgpath = imgpaths[i]
  672. bname = os.path.basename(imgpath )
  673. im0s=[cv2.imread(imgpath)]
  674. time00 = time.time()
  675. #使用不同的函数。每一个领域采用一个函数
  676. p_result,timeOut = AI_process_forest(im0s,model,segmodel,names,label_arraylist,rainbows,half,device,conf_thres, iou_thres,allowedList,font=digitFont,trtFlag_det=trtFlag_det,SecNms=ovlap_thres_crossCategory)
  677. time11 = time.time()
  678. image_array = p_result[1]
  679. cv2.imwrite( os.path.join( outpth,bname ) ,image_array )
  680. print('----image:%s, process:%.1f ,save:%.1f, %s'%(bname,(time11-time00) * 1000, (time.time() - time11) * 1000,timeOut ) )
  681. ##process video
  682. print('##begin to process videos, total %d videos'%( len(videopaths)))
  683. for i,video in enumerate(videopaths):
  684. print('process video%d :%s '%(i,video))
  685. par0={'model':model,'segmodel':segmodel, 'names':names,'label_arraylist':label_arraylist,'rainbows':rainbows,'outpth':par['testOutPath'],
  686. 'half':half,'device':device,'conf_thres':conf_thres, 'iou_thres':iou_thres,'allowedList':allowedList,'digitFont':digitFont,'trtFlag_det': trtFlag_det
  687. }
  688. process_video(video,par0,mode='det')
  689. def OCR_demo2(opt):
  690. from ocrUtils2 import crnn_model
  691. from ocrUtils2.ocrUtils import get_cfg,recognition_ocr,strLabelConverter
  692. if opt['business'] == 'ocr2':
  693. par={
  694. 'image_dir':'images/ocr_en',
  695. 'outtxt':'images/results',
  696. 'weights':'../AIlib2/weights/conf/ocr2/crnn_448X32.pth',
  697. #'weights':'../weights/2080Ti/AIlib2/ocr2/crnn_2080Ti_fp16_448X32.engine',
  698. 'device':'cuda:0',
  699. 'cfg':'../AIlib2/weights/conf/ocr2/360CC_config.yaml',
  700. 'char_file':'../AIlib2/weights/conf/ocr2/chars.txt',
  701. 'imgH':32,
  702. 'imgW':448,
  703. 'workers':1
  704. }
  705. image_dir=par['image_dir']
  706. outtxt=par['outtxt']
  707. workers=par['workers']
  708. weights= par['weights']
  709. device=par['device']
  710. char_file=par['char_file']
  711. imgH=par['imgH']
  712. imgW=par['imgW']
  713. cfg = par['cfg']
  714. config = get_cfg(cfg, char_file)
  715. par['contextFlag']=False
  716. device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
  717. if weights.endswith('.pth'):
  718. model = crnn_model.get_crnn(config,weights=weights).to(device)
  719. par['model_mode']='pth'
  720. else:
  721. logger = trt.Logger(trt.Logger.ERROR)
  722. with open(weights, "rb") as f, trt.Runtime(logger) as runtime:
  723. model = runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象
  724. print('#####load TRT file:',weights,'success #####')
  725. context = model.create_execution_context()
  726. par['model_mode']='trt';par['contextFlag']=context
  727. converter = strLabelConverter(config.DATASET.ALPHABETS)
  728. img_urls=glob.glob('%s/*.jpg'%( image_dir ))
  729. img_urls.extend( glob.glob('%s/*.png'%( image_dir )) )
  730. cnt=len(img_urls)
  731. print('%s has %d images'%(image_dir ,len(img_urls) ) )
  732. # 准备数据
  733. parList=[]
  734. for i in range(cnt):
  735. img_patch=cv2.imread( img_urls[i] , cv2.IMREAD_GRAYSCALE)
  736. started = time.time()
  737. img = cv2.imread(img_urls[i])
  738. sim_pred = recognition_ocr(config, img, model, converter, device,par=par)
  739. finished = time.time()
  740. print('{0}: elapsed time: {1} prd:{2} '.format( os.path.basename( img_urls[i] ), finished - started, sim_pred ))
  741. def OBB_demo(opt):
  742. ###倾斜框(OBB)的ship目标检测
  743. par={
  744. 'model_size':(608,608), #width,height
  745. 'K':100, #Maximum of objects'
  746. 'conf_thresh':0.18,##Confidence threshold, 0.1 for general evaluation
  747. 'device':"cuda:0",
  748. 'down_ratio':4,'num_classes':15,
  749. #'weights':'../AIlib2/weights/conf/ship2/obb_608X608.engine',
  750. 'weights':'../weights/%s/AIlib2/%s/obb_608X608_%s_fp16.engine'%(opt['gpu'],opt['business'],opt['gpu']),
  751. 'dataset':'dota',
  752. 'test_dir': 'images/ship/',
  753. 'result_dir': 'images/results',
  754. 'half': False,
  755. 'mean':(0.5, 0.5, 0.5),
  756. 'std':(1, 1, 1),
  757. 'model_size':(608,608),##width,height
  758. 'heads': {'hm': None,'wh': 10,'reg': 2,'cls_theta': 1},
  759. 'decoder':None,
  760. 'test_flag':True,
  761. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
  762. 'drawBox':False,#####是否画框
  763. 'digitWordFont': { 'line_thickness':2,'boxLine_thickness':1,'wordSize':40, 'fontSize':1.0,'label_location':'leftTop'},
  764. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business'] ), ###检测类别对照表
  765. }
  766. ####加载模型
  767. model,decoder2=load_model_decoder_OBB(par)
  768. par['decoder']=decoder2
  769. names=get_labelnames(par['labelnames']);par['labelnames']=names
  770. conf_thres,iou_thres,classes,rainbows=get_postProcess_para(par['postFile']);par['rainbows']=rainbows
  771. label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['digitWordFont']['wordSize'],fontpath="../AIlib2/conf/platech.ttf")
  772. par['label_array']=label_arraylist
  773. img_urls=glob.glob('%s/*'%( par['test_dir'] ))
  774. for img_url in img_urls:
  775. #print(img_url)
  776. ori_image=cv2.imread(img_url)
  777. ori_image_list,infos = OBB_infer(model,ori_image,par)
  778. ori_image_list[1] = draw_obb(ori_image_list[2] ,ori_image_list[1],par)
  779. imgName = os.path.basename(img_url)
  780. saveFile = os.path.join(par['result_dir'], imgName)
  781. ret=cv2.imwrite(saveFile, ori_image_list[1] )
  782. if not ret:
  783. print(saveFile, ' not created ')
  784. print( os.path.basename(img_url),':',infos)
  785. def jkm_demo():
  786. from utilsK.jkmUtils import pre_process,post_process,get_return_data
  787. img_type = 'plate' ## code,plate
  788. par={'code':{'weights':'../AIlib2/weights/jkm/health_yolov5s_v3.jit','img_type':'code','nc':10 },
  789. 'plate':{'weights':'../AIlib2/weights/jkm/plate_yolov5s_v3.jit','img_type':'plate','nc':1 },
  790. 'conf_thres': 0.4,
  791. 'iou_thres':0.45,
  792. 'device':'cuda:0',
  793. 'plate_dilate':(0.5,0.1)
  794. }
  795. ###加载模型
  796. device = torch.device(par['device'])
  797. jit_weights = par['code']['weights']
  798. model = torch.jit.load(jit_weights)
  799. jit_weights = par['plate']['weights']
  800. model_plate = torch.jit.load(jit_weights)
  801. imgd='images/plate'
  802. imgpaths = os.listdir(imgd)
  803. for imgp in imgpaths[0:]:
  804. #imgp = 'plate_IMG_20221030_100612.jpg'
  805. imgpath = os.path.join(imgd,imgp)
  806. im0 = cv2.imread(imgpath) #读取数据
  807. img ,padInfos = pre_process(im0,device) ##预处理
  808. if img_type=='code': pred = model(img) ##模型推理
  809. else: pred = model_plate(img)
  810. boxes = post_process(pred,padInfos,device,conf_thres= par['conf_thres'], iou_thres= par['iou_thres'],nc=par[img_type]['nc']) #后处理
  811. dataBack=get_return_data(im0,boxes,modelType=img_type,plate_dilate=par['plate_dilate'])
  812. print(imgp,boxes,dataBack['type'])
  813. for key in dataBack.keys():
  814. if isinstance(dataBack[key],list):
  815. cv2.imwrite( 'images/results/%s_%s.jpg'%( imgp.replace('.jpg','').replace('.png',''),key),dataBack[key][0] ) ###返回值: dataBack
  816. def crowd_demo(opt):
  817. if opt['business']=='crowdCounting':
  818. from crowd import crowdModel as Model
  819. par={
  820. 'mean':[0.485, 0.456, 0.406], 'std':[0.229, 0.224, 0.225],'threshold':0.5,
  821. 'input_profile_shapes':[(1,3,256,256),(1,3,1024,1024),(1,3,2048,2048)],
  822. 'modelPar':{'backbone':'vgg16_bn', 'gpu_id':0,'anchorFlag':False, 'width':None,'height':None ,'line':2, 'row':2},
  823. 'weights':"../weights/%s/AIlib2/%s/crowdCounting_%s_dynamic.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  824. 'testImgPath':'images/%s'%(opt['business'] ),###测试图像的位置
  825. 'testOutPath':'images/results/',###输出测试图像位置
  826. }
  827. #weights='weights/best_mae.pth'
  828. cmodel = Model(par['weights'],par)
  829. img_path = par['testImgPath']
  830. File = os.listdir(img_path)
  831. targetList = []
  832. for file in File[0:]:
  833. COORlist = []
  834. imgPath = img_path + os.sep + file
  835. img_raw = cv2.cvtColor(cv2.imread(imgPath),cv2.COLOR_BGR2RGB)
  836. # cmodel.eval---
  837. # 输入读取的RGB数组
  838. # 输出:list,0--原图,1-人头坐标list,2-对接OBB的格式数据,其中4个坐标均相同,2-格式如下:
  839. # [ [ [ (x0,y0),(x1,y1),(x2,y2),(x3,y3) ],score, cls ], [ [ (x0,y0),(x1,y1),(x2,y2),(x3,y3) ],score ,cls ],........ ]
  840. prets, infos = cmodel.eval(img_raw)
  841. print(file,infos,' 人数:',len(prets[1]))
  842. img_to_draw = cv2.cvtColor(np.array(img_raw), cv2.COLOR_RGB2BGR)
  843. # 打印预测图像中人头的个数
  844. for p in prets[1]:
  845. img_to_draw = cv2.circle(img_to_draw, (int(p[0]), int(p[1])), 2, (0, 255, 0), -1)
  846. COORlist.append((int(p[0]), int(p[1])))
  847. # 将各测试图像中的人头坐标存储在targetList中, 格式:[[(x1, y1),(x2, y2),...], [(X1, Y1),(X2, Y2),..], ...]
  848. targetList.append(COORlist)
  849. #time.sleep(2)
  850. # 保存预测图片
  851. cv2.imwrite(os.path.join(par['testOutPath'], file), img_to_draw)
  852. if __name__=="__main__":
  853. #jkm_demo()
  854. businessAll=['river2','AnglerSwimmer', 'countryRoad','forest2', 'pedestrian' , 'smogfire' , 'vehicle','ship2',"highWay2","channelEmergency","cityMangement","drowning","noParking","illParking",'cityMangement2',"cityRoad","crowdCounting"]
  855. businessAll = ['crowdCounting']
  856. # forest 、 ocr2 、ocr_en 、 river 、 road 、 ship ,目前都没有在用
  857. for busi in businessAll:
  858. print('-'*40,'beg to test ',busi,'-'*40)
  859. opt={'gpu':'2080Ti','business':busi}
  860. if opt['business'] in ['highWay2','river2','drowning','noParking','river',"illParking","cityMangement2"]:
  861. detSeg_demo(opt)
  862. elif opt['business'] in ['crowdCounting'] :
  863. crowd_demo(opt)
  864. elif opt['business'] in ['ship2']:
  865. OBB_demo(opt)
  866. elif opt['business'] in ['ocr']:
  867. OCR_demo(opt)
  868. elif opt['business'] in ['ocr2']:
  869. OCR_demo2(opt)
  870. elif opt['business'] in ['riverTrack','highWay2Track']:
  871. det_track_demo(opt )
  872. else:
  873. det_demo( opt )