Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

962 lines
54KB

  1. import sys, yaml
  2. from easydict import EasyDict as edict
  3. from concurrent.futures import ThreadPoolExecutor
  4. sys.path.extend(['..','../AIlib2' ])
  5. from AI import AI_process,AI_process_forest,get_postProcess_para,get_postProcess_para_dic,ocr_process,AI_det_track,AI_det_track_batch
  6. import cv2,os,time
  7. from segutils.segmodel import SegModel
  8. from stdc import stdcModel
  9. from segutils.trafficUtils import tracfficAccidentMixFunction
  10. from models.experimental import attempt_load
  11. from utils.torch_utils import select_device
  12. from utilsK.queRiver import get_labelnames,get_label_arrays,save_problem_images,riverDetSegMixProcess
  13. from ocrUtils.ocrUtils import CTCLabelConverter,AlignCollate
  14. from trackUtils.sort import Sort,track_draw_boxAndTrace,track_draw_trace_boxes,moving_average_wang,drawBoxTraceSimplied
  15. from trackUtils.sort_obb import OBB_Sort,obbTohbb,track_draw_all_boxes,track_draw_trace
  16. from obbUtils.shipUtils import OBB_infer,OBB_tracker,draw_obb,OBB_tracker_batch
  17. from utilsK.noParkingUtils import mixNoParking_road_postprocess
  18. from obbUtils.load_obb_model import load_model_decoder_OBB
  19. import numpy as np
  20. import torch,glob
  21. import tensorrt as trt
  22. from utilsK.masterUtils import get_needed_objectsIndex
  23. from copy import deepcopy
  24. from scipy import interpolate
  25. from utilsK.drownUtils import mixDrowing_water_postprocess
  26. #import warnings
  27. #warnings.filterwarnings("error")
  28. def view_bar(num, total,time1,prefix='prefix'):
  29. rate = num / total
  30. time_n=time.time()
  31. rate_num = int(rate * 30)
  32. rate_nums = np.round(rate * 100)
  33. r = '\r %s %d / %d [%s%s] %.2f s'%(prefix,num,total, ">" * rate_num, " " * (30 - rate_num), time_n-time1 )
  34. sys.stdout.write(r)
  35. sys.stdout.flush()
  36. '''
  37. 多线程
  38. '''
  39. def process_v1(frame):
  40. #try:
  41. print('demo.py beging to :',frame[8])
  42. time00 = time.time()
  43. H,W,C = frame[0][0].shape
  44. p_result,timeOut = AI_process(frame[0],frame[1],frame[2],frame[3],frame[4],frame[5],objectPar=frame[6],font=frame[7],segPar=frame[9],mode=frame[10],postPar=frame[11])
  45. time11 = time.time()
  46. image_array = p_result[1]
  47. cv2.imwrite(os.path.join('images/results/',frame[8] ) ,image_array)
  48. bname = frame[8].split('.')[0]
  49. if len(p_result)==5:
  50. image_mask = p_result[4]
  51. cv2.imwrite(os.path.join('images/results/',bname+'_mask.png' ) , (image_mask).astype(np.uint8))
  52. boxes=p_result[2]
  53. with open( os.path.join('images/results/',bname+'.txt' ),'w' ) as fp:
  54. for box in boxes:
  55. box_str=[str(x) for x in box]
  56. out_str=','.join(box_str)+'\n'
  57. fp.write(out_str)
  58. time22 = time.time()
  59. print('%s,%d*%d,AI-process: %.1f,image save:%.1f , %s'%(frame[8],H,W, (time11 - time00) * 1000.0, (time22-time11)*1000.0,timeOut), boxes)
  60. return 'success'
  61. #except Exception as e:
  62. # return 'failed:'+str(e)
  63. def process_video(video,par0,mode='detSeg'):
  64. cap=cv2.VideoCapture(video)
  65. if not cap.isOpened():
  66. print('#####error url:',video)
  67. return False
  68. bname=os.path.basename(video).split('.')[0]
  69. fps = int(cap.get(cv2.CAP_PROP_FPS)+0.5)
  70. width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH )+0.5)
  71. height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)+0.5)
  72. framecnt=int(cap.get(7)+0.5)
  73. save_path_AI = os.path.join(par0['outpth'],os.path.basename(video))
  74. problem_image_dir= os.path.join( par0['outpth'], 'probleImages' )
  75. os.makedirs(problem_image_dir,exist_ok=True)
  76. vid_writer_AI = cv2.VideoWriter(save_path_AI, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width,height))
  77. num=0
  78. iframe=0;post_results=[];fpsample=30*10
  79. imgarray_list = []; iframe_list = []
  80. patch_cnt = par0['trackPar']['patchCnt']
  81. ##windowsize 对逐帧插值后的结果做平滑,windowsize为平滑的长度,没隔det_cnt帧做一次跟踪。
  82. trackPar={'det_cnt':10,'windowsize':29 }
  83. ##track_det_result_update= np.empty((0,8)) ###每100帧跑出来的结果,放在track_det_result_update,只保留当前100帧里有的tracker Id.
  84. while cap.isOpened():
  85. ret, imgarray = cap.read() #读取摄像头画面
  86. iframe +=1
  87. if not ret:break
  88. if mode=='detSeg':
  89. p_result,timeOut = AI_process([imgarray],par0['model'],par0['segmodel'],par0['names'],par0['label_arraylist'],par0['rainbows'],objectPar=par0['objectPar'],font=par0['digitFont'],segPar=par0['segPar'])
  90. elif mode == 'track':
  91. #sampleCount=10
  92. imgarray_list.append( imgarray )
  93. iframe_list.append(iframe )
  94. if iframe%patch_cnt==0:
  95. time_patch0 = time.time()
  96. retResults,timeInfos = AI_det_track_batch(imgarray_list, iframe_list ,par0['modelPar'],par0['processPar'],par0['sort_tracker'] ,par0['trackPar'],segPar=par0['segPar'])
  97. #print('###line111:',retResults[2])
  98. ###需要保存成一个二维list,每一个list是一帧检测结果。
  99. ###track_det_result 内容格式:x1, y1, x2, y2, conf, cls,iframe,trackId
  100. time_patch2 = time.time()
  101. frame_min = iframe_list[0];frame_max=iframe_list[-1]
  102. for iiframe in range(frame_min,frame_max+1):
  103. img_draw = imgarray_list[ iiframe- frame_min ]
  104. img_draw = drawBoxTraceSimplied(retResults[1] ,iiframe, img_draw,rainbows=par0['drawPar']['rainbows'],boxFlag=True,traceFlag=True,names=par0['drawPar']['names'] )
  105. ret = vid_writer_AI.write(img_draw)
  106. view_bar(iiframe, framecnt,time.time(),prefix=os.path.basename(video))
  107. imgarray_list=[];iframe_list=[]
  108. elif mode =='obbTrack':
  109. imgarray_list.append( imgarray )
  110. iframe_list.append(iframe )
  111. if iframe%patch_cnt==0:
  112. time_patch0 = time.time()
  113. track_det_results, timeInfos = OBB_tracker_batch(imgarray_list,iframe_list,par0['modelPar'],par0['obbModelPar'],par0['sort_tracker'],par0['trackPar'],segPar=None)
  114. print( timeInfos )
  115. #对结果画图
  116. track_det_np = track_det_results[1]
  117. frame_min = iframe_list[0];frame_max=iframe_list[-1]
  118. for iiframe in range(frame_min,frame_max+1):
  119. img_draw = imgarray_list[ iiframe- frame_min ]
  120. if len( track_det_results[2][ iiframe- frame_min]) > 0:
  121. img_draw = draw_obb( track_det_results[2][iiframe- frame_min ] ,img_draw,par0['drawPar'])
  122. if True:
  123. frameIdex=12;trackIdex=13;
  124. boxes_oneFrame = track_det_np[ track_det_np[:,frameIdex]==iiframe ]
  125. ###在某一帧上,画上轨迹
  126. track_ids = boxes_oneFrame[:,trackIdex].tolist()
  127. boxes_before_oneFrame = track_det_np[ track_det_np[:,frameIdex]<=iiframe ]
  128. for trackId in track_ids:
  129. boxes_before_oneFrame_oneId = boxes_before_oneFrame[boxes_before_oneFrame[:,trackIdex]==trackId]
  130. xcs = boxes_before_oneFrame_oneId[:,8]
  131. ycs = boxes_before_oneFrame_oneId[:,9]
  132. [cv2.line(img_draw, ( int(xcs[i]) , int(ycs[i]) ),
  133. ( int(xcs[i+1]),int(ycs[i+1]) ),(255,0,0), thickness=2)
  134. for i,_ in enumerate(xcs) if i < len(xcs)-1 ]
  135. ret = vid_writer_AI.write(img_draw)
  136. #sys.exit(0)
  137. #print('vide writer ret:',ret)
  138. imgarray_list=[];iframe_list=[]
  139. view_bar(iframe, framecnt,time.time(),prefix=os.path.basename(video))
  140. else:
  141. p_result,timeOut = AI_process_forest([imgarray],par0['model'],par0['segmodel'],par0['names'],par0['label_arraylist'],par0['rainbows'],par0['half'],par0['device'],par0['conf_thres'], par0['iou_thres'],par0['allowedList'],font=par0['digitFont'],trtFlag_det=par0['trtFlag_det'])
  142. if mode not in [ 'track','obbTrack']:
  143. image_array = p_result[1];num+=1
  144. ret = vid_writer_AI.write(image_array)
  145. view_bar(num, framecnt,time.time(),prefix=os.path.basename(video))
  146. ##每隔 fpsample帧处理一次,如果有问题就保存图片
  147. if (iframe % fpsample == 0) and (len(post_results)>0) :
  148. parImage=save_problem_images(post_results,iframe,par0['names'],streamName=bname,outImaDir=problem_image_dir,imageTxtFile=False)
  149. post_results=[]
  150. if len(p_result[2] )>0:
  151. post_results.append(p_result)
  152. vid_writer_AI.release();
  153. def det_track_demo(business ):
  154. '''
  155. 跟踪参数说明:
  156. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}
  157. sort_max_age--跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
  158. sort_min_hits--每隔目标连续出现的次数,超过这个次数才认为是一个目标。
  159. sort_iou_thresh--检测最小的置信度。
  160. det_cnt--每隔几次做一个跟踪和检测,默认10。
  161. windowsize--轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。
  162. patchCnt--每次送入图像的数量,不宜少于100帧。
  163. '''
  164. ''' 以下是基于检测和分割的跟踪模型,分割用来修正检测的结果'''
  165. ####河道巡检的跟踪模型参数
  166. if opt['business'] == 'river' or opt['business'] == 'river2' :
  167. par={
  168. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  169. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  170. 'gpuname':'2080Ti',###显卡名称
  171. 'max_workers':1, ###并行线程数
  172. 'half':True,
  173. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  174. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  175. 'seg_nclass':2,###分割模型类别数目,默认2类
  176. 'segRegionCnt':0,###分割模型结果需要保留的等值线数目
  177. 'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True,#分割模型预处理参数
  178. 'mixFunction':{'function':riverDetSegMixProcess,'pars':{'slopeIndex':[1,3,4,7], 'riverIou':0.1}} #分割和检测混合处理的函数
  179. },
  180. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  181. 'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  182. 'postFile': '../AIlib2/weights/conf/%s/para.json'%( opt['business'] ),###后处理参数文件
  183. 'txtFontSize':80,###文本字符的大小
  184. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  185. #'testImgPath':'images/videos/river',###测试图像的位置
  186. 'testImgPath':'images/tt',###测试图像的位置
  187. 'testOutPath':'images/results/',###输出测试图像位置
  188. }
  189. if opt['business'] == 'highWay2':
  190. par={
  191. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  192. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%( opt['business'] ), ###检测类别对照表
  193. 'half':True,
  194. 'gpuname':'3090',###显卡名称
  195. 'max_workers':1, ###并行线程数
  196. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  197. #'Detweights':"../AIlib2/weights/conf/highWay2/yolov5.pt",
  198. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
  199. 'seg_nclass':3,###分割模型类别数目,默认2类
  200. 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
  201. 'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数
  202. 'mixFunction':{'function':tracfficAccidentMixFunction,
  203. 'pars':{ 'RoadArea': 16000, 'vehicleArea': 10, 'roadVehicleAngle': 15, 'speedRoadVehicleAngleMax': 75,'radius': 50 , 'roundness': 1.0, 'cls': 9, 'vehicleFactor': 0.1,'cls':9, 'confThres':0.25,'roadIou':0.6,'vehicleFlag':False,'distanceFlag': False }
  204. }
  205. },
  206. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':5,'windowsize':29,'patchCnt':100},
  207. 'mode':'highWay3.0',
  208. 'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  209. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
  210. 'txtFontSize':20,###文本字符的大小
  211. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':0.5,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
  212. #'testImgPath':'images/trafficAccident/8.png',###测试图像的位置
  213. 'testImgPath':'/home/chenyukun/777-7-42.mp4',###测试图像的位置
  214. 'testOutPath':'images/results/',###输出测试图像位置
  215. }
  216. par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize']
  217. if opt['business'] == 'noParking':
  218. par={
  219. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  220. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%( opt['business'] ), ###检测类别对照表
  221. 'half':True,
  222. 'gpuname':'3090',###显卡名称
  223. 'max_workers':1, ###并行线程数
  224. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  225. #'Detweights':"../AIlib2/weights/conf/highWay2/yolov5.pt",
  226. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
  227. 'seg_nclass':4,###分割模型类别数目,默认2类
  228. 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
  229. 'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数
  230. 'mixFunction':{'function':mixNoParking_road_postprocess,
  231. 'pars': { 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5 ,'RoadArea': 16000,'fitOrder':2}
  232. }
  233. },
  234. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  235. 'mode':'highWay3.0',
  236. 'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  237. 'postFile': '../AIlib2/weights/conf/%s/para.json'%('highWay2' ),###后处理参数文件
  238. 'txtFontSize':20,###文本字符的大小
  239. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
  240. 'testImgPath':'images/noParking/',###测试图像的位置
  241. 'testOutPath':'images/results/',###输出测试图像位置
  242. }
  243. par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize']
  244. if opt['business'] == 'cityMangement2':
  245. from DMPR import DMPRModel
  246. from DMPRUtils.jointUtil import dmpr_yolo
  247. par={
  248. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  249. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  250. 'max_workers':1, ###并行线程数
  251. 'half':True,
  252. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  253. #'Detweights':"/mnt/thsw2/DSP2/weights/cityMangement2/weights/urbanManagement/yolo/best.pt",
  254. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
  255. 'seg_nclass':4,###分割模型类别数目,默认2类
  256. 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
  257. 'segPar':{ 'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.3, 'dmprimg_size':640,
  258. 'mixFunction':{'function':dmpr_yolo,
  259. 'pars':{'carCls':0 ,'illCls':3,'scaleRatio':0.5,'border':80}
  260. }
  261. },
  262. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  263. #'Segweights' : '/mnt/thsw2/DSP2/weights/cityMangement2/weights/urbanManagement/DMPR/dp_detector_499.engine',###分割模型权重位置
  264. 'Segweights':"../weights/%s/AIlib2/%s/dmpr_%s.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  265. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
  266. 'txtFontSize':20,###文本字符的大小
  267. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
  268. #'testImgPath':'/mnt/thsw2/DSP2/demoImages/illParking',###测试图像的位置
  269. 'testImgPath':'/mnt/thsw2/DSP2/weights/cityMangement2_0916/images/input',
  270. #'testImgPath':'images/cityMangement/',
  271. 'testOutPath':'images/results/',###输出测试图像位置
  272. }
  273. if opt['business'] == 'drowning':
  274. par={
  275. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  276. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%( opt['business'] ), ###检测类别对照表
  277. 'half':True,
  278. 'gpuname':'3090',###显卡名称
  279. 'max_workers':1, ###并行线程数
  280. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  281. #'Detweights':"../AIlib2/weights/conf/highWay2/yolov5.pt",
  282. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
  283. 'seg_nclass':2,###分割模型类别数目,默认2类
  284. 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
  285. 'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数
  286. 'mixFunction':{'function':mixDrowing_water_postprocess,
  287. 'pars':{ }
  288. }
  289. },
  290. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  291. 'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  292. 'postFile': '../AIlib2/weights/conf/%s/para.json'%('highWay2' ),###后处理参数文件
  293. 'txtFontSize':20,###文本字符的大小
  294. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
  295. 'testImgPath':'images/drowning/',###测试图像的位置
  296. 'testOutPath':'images/results/',###输出测试图像位置
  297. }
  298. par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize']
  299. ''' 以下是基于检测的跟踪模型,只有检测没有分割 '''
  300. if opt['business'] == 'forest2':
  301. par={
  302. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  303. 'labelnames':"../AIlib2/weights/conf/forest2/labelnames.json", ###检测类别对照表
  304. 'gpuname':opt['gpu'],###显卡名称
  305. 'max_workers':1, ###并行线程数
  306. 'half':True,
  307. 'trtFlag_det':True,###检测模型是否采用TRT
  308. 'trtFlag_seg':False,###分割模型是否采用TRT
  309. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  310. #'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  311. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [] ],
  312. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  313. 'seg_nclass':2,###分割模型类别数目,默认2类
  314. 'segRegionCnt':0,###分割模型结果需要保留的等值线数目
  315. 'segPar':None,###分割模型预处理参数
  316. 'Segweights' : None,###分割模型权重位置
  317. 'postFile': '../AIlib2/weights/conf/forest/para.json',###后处理参数文件
  318. 'txtFontSize':80,###文本字符的大小
  319. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  320. 'testImgPath':'../AIdemo2/images/forest2/',###测试图像的位置
  321. 'testOutPath':'images/results/',###输出测试图像位置
  322. }
  323. ###车辆巡检参数
  324. if opt['business'] == 'vehicle':
  325. par={
  326. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  327. 'labelnames':"../AIlib2/weights/conf/vehicle/labelnames.json", ###检测类别对照表
  328. 'gpuname':'2080T',###显卡名称
  329. 'half':True,
  330. 'max_workers':1, ###并行线程数
  331. 'trtFlag_det':True,###检测模型是否采用TRT
  332. 'trtFlag_seg':False,###分割模型是否采用TRT
  333. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  334. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  335. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  336. 'seg_nclass':2,###分割模型类别数目,默认2类
  337. 'segRegionCnt':0,###分割模型结果需要保留的等值线数目
  338. 'segPar':None,###分割模型预处理参数
  339. 'Segweights' : None,###分割模型权重位置
  340. 'postFile': '../AIlib2/weights/conf/vehicle/para.json',###后处理参数文件
  341. 'txtFontSize':40,###文本字符的大小
  342. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  343. 'testImgPath':'images/videos/vehicle/',###测试图像的位置
  344. 'testOutPath':'images/results/',###输出测试图像位置
  345. }
  346. ###行人检测模型
  347. if opt['business'] == 'pedestrian':
  348. par={
  349. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  350. 'labelnames':"../AIlib2/weights/conf/pedestrian/labelnames.json", ###检测类别对照表
  351. 'gpuname':'2080T',###显卡名称
  352. 'half':True,
  353. 'max_workers':1, ###并行线程数
  354. 'trtFlag_det':True,###检测模型是否采用TRT
  355. 'trtFlag_seg':False,###分割模型是否采用TRT
  356. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  357. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  358. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  359. 'seg_nclass':2,###分割模型类别数目,默认2类
  360. 'segRegionCnt':0,###分割模型结果需要保留的等值线数目
  361. 'segPar':None,###分割模型预处理参数
  362. 'Segweights' : None,###分割模型权重位置
  363. 'postFile': '../AIlib2/weights/conf/pedestrian/para.json',###后处理参数文件
  364. 'txtFontSize':40,###文本字符的大小
  365. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  366. 'testImgPath':'../AIdemo2/images/pedestrian/',###测试图像的位置
  367. 'testOutPath':'images/results/',###输出测试图像位置
  368. }
  369. if opt['business'] == 'smogfire':
  370. par={
  371. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  372. 'labelnames':"../AIlib2/weights/conf/smogfire/labelnames.json", ###检测类别对照表
  373. 'gpuname':'2080T',###显卡名称
  374. 'half':True,
  375. 'max_workers':1, ###并行线程数
  376. 'trtFlag_det':True,###检测模型是否采用TRT
  377. 'trtFlag_seg':False,###分割模型是否采用TRT
  378. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  379. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  380. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  381. 'seg_nclass':2,###没有分割模型,此处不用
  382. 'segRegionCnt':0,###没有分割模型,此处不用
  383. 'segPar':None,###分割模型预处理参数
  384. 'Segweights' : None,###分割模型权重位置
  385. 'postFile': '../AIlib2/weights/conf/smogfire/para.json',###后处理参数文件
  386. 'txtFontSize':40,###文本字符的大小
  387. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  388. 'testImgPath':'../AIdemo2/images/smogfire/',###测试图像的位置
  389. 'testOutPath':'images/results/',###输出测试图像位置
  390. }
  391. ###钓鱼游泳检测
  392. if opt['business'] == 'AnglerSwimmer':
  393. par={
  394. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  395. 'labelnames':"../AIlib2/weights/conf/AnglerSwimmer/labelnames.json", ###检测类别对照表
  396. 'gpuname':'2080T',###显卡名称
  397. 'half':True,
  398. 'max_workers':1, ###并行线程数
  399. 'trtFlag_det':True,###检测模型是否采用TRT
  400. 'trtFlag_seg':False,###分割模型是否采用TRT
  401. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  402. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  403. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  404. 'seg_nclass':2,###没有分割模型,此处不用
  405. 'segRegionCnt':0,###没有分割模型,此处不用
  406. 'segPar':None,###分割模型预处理参数
  407. 'Segweights' : None,###分割模型权重位置
  408. 'postFile': '../AIlib2/weights/conf/AnglerSwimmer/para.json',###后处理参数文件
  409. 'txtFontSize':40,###文本字符的大小
  410. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  411. 'testImgPath':'../AIdemo2/images/AnglerSwimmer/',###测试图像的位置
  412. 'testOutPath':'images/results/',###输出测试图像位置
  413. }
  414. ###航道应急,做落水人员检测, channelEmergency
  415. if opt['business'] == 'channelEmergency':
  416. par={
  417. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  418. 'labelnames':"../AIlib2/weights/conf/channelEmergency/labelnames.json", ###检测类别对照表
  419. 'gpuname':'2080T',###显卡名称
  420. 'half':True,
  421. 'max_workers':1, ###并行线程数
  422. 'trtFlag_det':True,###检测模型是否采用TRT
  423. 'trtFlag_seg':False,###分割模型是否采用TRT
  424. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  425. #'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  426. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [] ],###控制哪些检测类别显示、输出
  427. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  428. 'seg_nclass':2,###没有分割模型,此处不用
  429. 'segRegionCnt':0,###没有分割模型,此处不用
  430. 'segPar':None,###分割模型预处理参数
  431. 'Segweights' : None,###分割模型权重位置
  432. 'postFile': '../AIlib2/weights/conf/channelEmergency/para.json',###后处理参数文件
  433. 'txtFontSize':40,###文本字符的大小
  434. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  435. 'testImgPath':'../AIdemo2/images/channelEmergency/',###测试图像的位置
  436. 'testOutPath':'images/results/',###输出测试图像位置
  437. }
  438. ###乡村路违法种植
  439. if opt['business'] == 'countryRoad':
  440. par={
  441. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  442. 'labelnames':"../AIlib2/weights/conf/countryRoad/labelnames.json", ###检测类别对照表
  443. 'gpuname':'2080T',###显卡名称
  444. 'half':True,
  445. 'max_workers':1, ###并行线程数
  446. 'trtFlag_det':True,###检测模型是否采用TRT
  447. 'trtFlag_seg':False,###分割模型是否采用TRT
  448. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  449. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  450. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  451. 'seg_nclass':2,###没有分割模型,此处不用
  452. 'segRegionCnt':0,###没有分割模型,此处不用
  453. 'segPar':None,###分割模型预处理参数
  454. 'Segweights' : None,###分割模型权重位置
  455. 'postFile': '../AIlib2/weights/conf/countryRoad/para.json',###后处理参数文件
  456. 'txtFontSize':40,###文本字符的大小
  457. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  458. 'testImgPath':'../AIdemo2/images/countryRoad/',###测试图像的位置
  459. 'testOutPath':'images/results/',###输出测试图像位置
  460. }
  461. ###城管项目,检测城市垃圾和车辆
  462. if opt['business'] == 'cityMangement':
  463. par={
  464. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  465. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  466. 'gpuname':'2080Ti',###显卡名称
  467. 'half':True,
  468. 'max_workers':1, ###并行线程数
  469. 'trtFlag_det':True,###检测模型是否采用TRT
  470. 'trtFlag_seg':False,###分割模型是否采用TRT
  471. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  472. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  473. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  474. 'seg_nclass':2,###没有分割模型,此处不用
  475. 'segRegionCnt':0,###没有分割模型,此处不用
  476. 'segPar':None,###分割模型预处理参数
  477. 'Segweights' : None,###分割模型权重位置
  478. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business']),###后处理参数文件
  479. 'txtFontSize':40,###文本字符的大小
  480. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  481. 'testImgPath':'images/cityMangement',###测试图像的位置
  482. 'testOutPath':'images/results/',###输出测试图像位置
  483. }
  484. ###城管项目,检测道路情况,输入类别为五个:"护栏","交通标志","非交通标志","施工","施工“(第4,第5类别合并,名称相同)
  485. ###实际模型检测输出的类别为:"护栏","交通标志","非交通标志","锥桶","水马"
  486. if opt['business'] == 'cityRoad':
  487. par={
  488. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  489. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  490. 'gpuname':'2080Ti',###显卡名称
  491. 'half':True,
  492. 'max_workers':1, ###并行线程数
  493. 'trtFlag_det':True,###检测模型是否采用TRT
  494. 'trtFlag_seg':False,###分割模型是否采用TRT
  495. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  496. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  497. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  498. 'seg_nclass':2,###没有分割模型,此处不用
  499. 'segRegionCnt':0,###没有分割模型,此处不用
  500. 'segPar':None,###分割模型预处理参数
  501. 'Segweights' : None,###分割模型权重位置
  502. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business']),###后处理参数文件
  503. 'txtFontSize':40,###文本字符的大小
  504. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  505. 'testImgPath':'images/%s'%(opt['business']),###测试图像的位置
  506. 'testOutPath':'images/results/',###输出测试图像位置
  507. }
  508. if opt['business'] == 'illParking':
  509. from utilsK.illParkingUtils import illParking_postprocess
  510. par={
  511. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  512. 'half':True,
  513. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  514. 'max_workers':1, ###并行线程数
  515. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  516. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
  517. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  518. 'seg_nclass':4,###没有分割模型,此处不用
  519. 'segRegionCnt':2,###没有分割模型,此处不用
  520. 'segPar':{
  521. 'mixFunction':{'function':illParking_postprocess,
  522. 'pars':{ }
  523. }
  524. },
  525. 'Segweights' : None,###分割模型权重位置
  526. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
  527. 'txtFontSize':20,###文本字符的大小
  528. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':2},###显示框、线设置
  529. 'testImgPath':'images/cityMangement',###测试图像的位置
  530. 'testOutPath':'images/results/',###输出测试图像位置
  531. }
  532. par['trtFlag_det']=True if par['Detweights'].endswith('.engine') else False
  533. if par['Segweights']:
  534. par['segPar']['trtFlag_seg']=True if par['Segweights'].endswith('.engine') else False
  535. ##使用森林,道路模型,business 控制['forest','road']
  536. ##预先设置的参数
  537. #gpuname=par['gpuname']#如果用trt就需要此参数,只能是"3090" "2080Ti"
  538. device_=par['device'] ##选定模型,可选 cpu,'0','1'
  539. device = select_device(device_)
  540. half = device.type != 'cpu' # half precision only supported on CUDA
  541. trtFlag_det=par['trtFlag_det'] ###是否采用TRT模型加速
  542. ##以下参数目前不可改
  543. imageW=1080 ####道路模型
  544. digitFont= par['digitFont']
  545. ####加载检测模型
  546. if trtFlag_det:
  547. Detweights=par['Detweights']
  548. logger = trt.Logger(trt.Logger.ERROR)
  549. with open(Detweights, "rb") as f, trt.Runtime(logger) as runtime:
  550. model=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象
  551. print('####load TRT model :%s'%(Detweights))
  552. else:
  553. Detweights=par['Detweights']
  554. model = attempt_load(Detweights, map_location=device) # load FP32 model
  555. if half: model.half()
  556. ####加载分割模型
  557. seg_nclass = par['seg_nclass']
  558. segPar=par['segPar']
  559. if par['Segweights']:
  560. if opt['business'] == 'cityMangement2':
  561. segmodel = DMPRModel(weights=par['Segweights'], par = par['segPar'])
  562. else:
  563. segmodel = stdcModel(weights=par['Segweights'], par = par['segPar'])
  564. '''
  565. if par['segPar']['trtFlag_seg']:
  566. Segweights = par['Segweights']
  567. logger = trt.Logger(trt.Logger.ERROR)
  568. with open(Segweights, "rb") as f, trt.Runtime(logger) as runtime:
  569. segmodel=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象
  570. print('############locad seg model trt success: ',Segweights)
  571. else:
  572. Segweights = par['Segweights']
  573. segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device)
  574. print('############locad seg model pth success:',Segweights)
  575. '''
  576. else:
  577. segmodel=None
  578. trackPar=par['trackPar']
  579. sort_tracker = Sort(max_age=trackPar['sort_max_age'],
  580. min_hits=trackPar['sort_min_hits'],
  581. iou_threshold=trackPar['sort_iou_thresh'])
  582. labelnames = par['labelnames']
  583. postFile= par['postFile']
  584. print( Detweights,labelnames )
  585. conf_thres,iou_thres,classes,rainbows=get_postProcess_para(postFile)
  586. detPostPar = get_postProcess_para_dic(postFile)
  587. conf_thres,iou_thres,classes,rainbows = detPostPar["conf_thres"],detPostPar["iou_thres"],detPostPar["classes"],detPostPar["rainbows"]
  588. if 'ovlap_thres_crossCategory' in detPostPar.keys(): iou2nd=detPostPar['ovlap_thres_crossCategory']
  589. else:iou2nd = None
  590. if 'score_byClass' in detPostPar.keys(): score_byClass=detPostPar['score_byClass']
  591. else: score_byClass = None
  592. ####模型选择参数用如下:
  593. mode_paras=par['detModelpara']
  594. allowedList,allowedList_string=get_needed_objectsIndex(mode_paras)
  595. #slopeIndex = par['slopeIndex']
  596. ##只加载检测模型,准备好显示字符
  597. names=get_labelnames(labelnames)
  598. #imageW=4915;###默认是1920,在森林巡检的高清图像中是4920
  599. outfontsize=int(imageW/1920*40);###
  600. label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['txtFontSize'],fontpath="../AIlib2/conf/platech.ttf")
  601. ##图像测试和视频
  602. outpth = par['testOutPath']
  603. impth = par['testImgPath']
  604. imgpaths=[]###获取文件里所有的图像
  605. videopaths=[]###获取文件里所有的视频
  606. img_postfixs = ['.jpg','.JPG','.PNG','.png'];
  607. vides_postfixs= ['.MP4','.mp4','.avi']
  608. if os.path.isdir(impth):
  609. for postfix in img_postfixs:
  610. imgpaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  611. for postfix in ['.MP4','.mp4','.avi']:
  612. videopaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  613. else:
  614. postfix = os.path.splitext(impth)[-1]
  615. if postfix in img_postfixs: imgpaths=[ impth ]
  616. if postfix in vides_postfixs: videopaths = [impth ]
  617. imgpaths.sort()
  618. modelPar={ 'det_Model': model,'seg_Model':segmodel }
  619. processPar={'half':par['half'],'device':device,'conf_thres':conf_thres,'iou_thres':iou_thres,'trtFlag_det':trtFlag_det,'iou2nd':iou2nd,'score_byClass':score_byClass}
  620. drawPar={'names':names,'label_arraylist':label_arraylist,'rainbows':rainbows,'font': par['digitFont'],'allowedList':allowedList}
  621. for i in range(len(imgpaths)):
  622. #for i in range(2):
  623. #imgpath = os.path.join(impth, folders[i])
  624. imgpath = imgpaths[i]
  625. bname = os.path.basename(imgpath )
  626. im0s=[cv2.imread(imgpath)]
  627. time00 = time.time()
  628. retResults,timeOut = AI_det_track_batch(im0s, [i] ,modelPar,processPar,sort_tracker ,trackPar,segPar)
  629. #print('###line627:',retResults[2])
  630. #retResults,timeInfos = AI_det_track_batch(imgarray_list, iframe_list ,par0['modelPar'],par0['processPar'],par0['sort_tracker'] ,par0['trackPar'],segPar=par0['segPar'])
  631. if len(retResults[1])>0:
  632. retResults[0][0] = drawBoxTraceSimplied(retResults[1],i, retResults[0][0],rainbows=rainbows,boxFlag=True,traceFlag=False,names=drawPar['names'])
  633. time11 = time.time()
  634. image_array = retResults[0][0]
  635. '''
  636. 返回值retResults[2] --list,其中每一个元素为一个list,表示每一帧的检测结果,每一个结果是由多个list构成,每个list表示一个框,格式为[ cls , x0 ,y0 ,x1 ,y1 ,conf,ifrmae,trackId ]
  637. --etc. retResults[2][j][k]表示第j帧的第k个框。
  638. '''
  639. cv2.imwrite( os.path.join( outpth,bname ) ,image_array )
  640. print('----image:%s, process:%s ( %s ),save:%s'%(bname,(time11-time00) * 1000, timeOut,(time.time() - time11) * 1000) )
  641. ##process video
  642. print('##begin to process videos, total %d videos'%( len(videopaths)))
  643. for i,video in enumerate(videopaths):
  644. print('process video%d :%s '%(i,video))
  645. par0={'modelPar':modelPar,'processPar':processPar,'drawPar':drawPar,'outpth':par['testOutPath'], 'sort_tracker':sort_tracker,'trackPar':trackPar,'segPar':segPar}
  646. process_video(video,par0,mode='track')
  647. def OCR_demo2(opt):
  648. from ocrUtils2 import crnn_model
  649. from ocrUtils2.ocrUtils import get_cfg,recognition_ocr,strLabelConverter
  650. if opt['business'] == 'ocr2':
  651. par={
  652. 'image_dir':'images/ocr_en',
  653. 'outtxt':'images/results',
  654. 'weights':'../AIlib2/weights/conf/ocr2/crnn_448X32.pth',
  655. #'weights':'../weights/2080Ti/AIlib2/ocr2/crnn_2080Ti_fp16_448X32.engine',
  656. 'device':'cuda:0',
  657. 'cfg':'../AIlib2/weights/conf/ocr2/360CC_config.yaml',
  658. 'char_file':'../AIlib2/weights/conf/ocr2/chars.txt',
  659. 'imgH':32,
  660. 'imgW':448,
  661. 'workers':1
  662. }
  663. image_dir=par['image_dir']
  664. outtxt=par['outtxt']
  665. workers=par['workers']
  666. weights= par['weights']
  667. device=par['device']
  668. char_file=par['char_file']
  669. imgH=par['imgH']
  670. imgW=par['imgW']
  671. cfg = par['cfg']
  672. config = get_cfg(cfg, char_file)
  673. par['contextFlag']=False
  674. device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
  675. if weights.endswith('.pth'):
  676. model = crnn_model.get_crnn(config,weights=weights).to(device)
  677. par['model_mode']='pth'
  678. else:
  679. logger = trt.Logger(trt.Logger.ERROR)
  680. with open(weights, "rb") as f, trt.Runtime(logger) as runtime:
  681. model = runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象
  682. print('#####load TRT file:',weights,'success #####')
  683. context = model.create_execution_context()
  684. par['model_mode']='trt';par['contextFlag']=context
  685. converter = strLabelConverter(config.DATASET.ALPHABETS)
  686. img_urls=glob.glob('%s/*.jpg'%( image_dir ))
  687. img_urls.extend( glob.glob('%s/*.png'%( image_dir )) )
  688. cnt=len(img_urls)
  689. print('%s has %d images'%(image_dir ,len(img_urls) ) )
  690. # 准备数据
  691. parList=[]
  692. for i in range(cnt):
  693. img_patch=cv2.imread( img_urls[i] , cv2.IMREAD_GRAYSCALE)
  694. started = time.time()
  695. img = cv2.imread(img_urls[i])
  696. sim_pred = recognition_ocr(config, img, model, converter, device,par=par)
  697. finished = time.time()
  698. print('{0}: elapsed time: {1} prd:{2} '.format( os.path.basename( img_urls[i] ), finished - started, sim_pred ))
  699. def OBB_track_demo(opt):
  700. ###倾斜框(OBB)的ship目标检测
  701. par={
  702. 'obbModelPar':{
  703. 'model_size':(608,608),'K':100,'conf_thresh':0.3, 'down_ratio':4,'num_classes':15,'dataset':'dota',
  704. 'heads': {'hm': None,'wh': 10,'reg': 2,'cls_theta': 1},
  705. 'mean':(0.5, 0.5, 0.5),'std':(1, 1, 1), 'half': False,'decoder':None,
  706. 'weights':'../weights/%s/AIlib2/%s/obb_608X608_%s_fp16.engine'%(opt['gpu'],opt['business'],opt['gpu']),
  707. },
  708. 'outpth': 'images/results',
  709. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  710. 'device':"cuda:0",
  711. #'test_dir': '/mnt/thsw2/DSP2/videos/obbShips/DJI_20230208110806_0001_W_6M.MP4',
  712. 'test_dir':'/mnt/thsw2/DSP2/videos/obbShips/freighter2.mp4',
  713. 'test_flag':True,
  714. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
  715. 'drawBox':True,#####是否画框
  716. 'drawPar': { 'digitWordFont' :{'line_thickness':2,'boxLine_thickness':1,'wordSize':40, 'fontSize':1.0,'label_location':'leftTop'}} ,
  717. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business'] ), ###检测类别对照表
  718. }
  719. #par['model_size'],par['mean'],par['std'],par['half'],par['saveType'],par['heads'],par['labelnames'],par['decoder'],par['down_ratio'],par['drawBox']
  720. #par['rainbows'],par['label_array'],par['digitWordFont']
  721. obbModelPar = par['obbModelPar']
  722. ####加载模型
  723. model,decoder2=load_model_decoder_OBB(obbModelPar)
  724. obbModelPar['decoder']=decoder2
  725. names=get_labelnames(par['labelnames']);obbModelPar['labelnames']=names
  726. _,_,_,rainbows=get_postProcess_para(par['postFile']);par['drawPar']['rainbows']=rainbows
  727. label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['drawPar']['digitWordFont']['wordSize'],fontpath="../AIlib2/conf/platech.ttf")
  728. #par['label_array']=label_arraylist
  729. trackPar=par['trackPar']
  730. sort_tracker = OBB_Sort(max_age=trackPar['sort_max_age'],
  731. min_hits=trackPar['sort_min_hits'],
  732. iou_threshold=trackPar['sort_iou_thresh'])
  733. ##图像测试和视频
  734. impth = par['test_dir']
  735. img_urls=[]###获取文件里所有的图像
  736. video_urls=[]###获取文件里所有的视频
  737. img_postfixs = ['.jpg','.JPG','.PNG','.png'];
  738. vides_postfixs= ['.MP4','.mp4','.avi']
  739. if os.path.isdir(impth):
  740. for postfix in img_postfixs:
  741. img_urls.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  742. for postfix in ['.MP4','.mp4','.avi']:
  743. video_urls.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  744. else:
  745. postfix = os.path.splitext(impth)[-1]
  746. if postfix in img_postfixs: img_urls=[ impth ]
  747. if postfix in vides_postfixs: video_urls = [impth ]
  748. parIn = {'obbModelPar':obbModelPar,'modelPar':{'obbmodel': model},'sort_tracker':sort_tracker,'outpth':par['outpth'],'trackPar':trackPar,'drawPar':par['drawPar']}
  749. par['drawPar']['label_array']=label_arraylist
  750. for img_url in img_urls:
  751. #print(img_url)
  752. ori_image=cv2.imread(img_url)
  753. #ori_image_list,infos = OBB_infer(model,ori_image,obbModelPar)
  754. ori_image_list,infos = OBB_tracker_batch([ori_image],[0],parIn['modelPar'],parIn['obbModelPar'],None,parIn['trackPar'],None)
  755. ori_image_list[1] = draw_obb(ori_image_list[2] ,ori_image_list[1],par['drawPar'])
  756. imgName = os.path.basename(img_url)
  757. saveFile = os.path.join(par['outpth'], imgName)
  758. ret=cv2.imwrite(saveFile, ori_image_list[1])
  759. if not ret:
  760. print(saveFile, ' not created ')
  761. print( os.path.basename(img_url),':',infos,ori_image_list[2])
  762. ###处理视频
  763. for video_url in video_urls:
  764. process_video(video_url, parIn ,mode='obbTrack')
  765. def crowd_demo(opt):
  766. if opt['business']=='crowdCounting':
  767. from crowd import crowdModel as Model
  768. par={
  769. 'mean':[0.485, 0.456, 0.406], 'std':[0.229, 0.224, 0.225],'threshold':0.5,
  770. 'input_profile_shapes':[(1,3,256,256),(1,3,1024,1024),(1,3,2048,2048)],
  771. 'modelPar':{'backbone':'vgg16_bn', 'gpu_id':0,'anchorFlag':False, 'width':None,'height':None ,'line':2, 'row':2},
  772. 'weights':"../weights/%s/AIlib2/%s/crowdCounting_%s_dynamic.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  773. 'testImgPath':'images/%s'%(opt['business'] ),###测试图像的位置
  774. 'testOutPath':'images/results/',###输出测试图像位置
  775. }
  776. #weights='weights/best_mae.pth'
  777. cmodel = Model(par['weights'],par)
  778. img_path = par['testImgPath']
  779. File = os.listdir(img_path)
  780. targetList = []
  781. for file in File[0:]:
  782. COORlist = []
  783. imgPath = img_path + os.sep + file
  784. img_raw = cv2.cvtColor(cv2.imread(imgPath),cv2.COLOR_BGR2RGB)
  785. # cmodel.eval---
  786. # 输入读取的RGB数组
  787. # 输出:list,0--原图,1-人头坐标list,2-对接OBB的格式数据,其中4个坐标均相同,2-格式如下:
  788. # [ [ [ (x0,y0),(x1,y1),(x2,y2),(x3,y3) ],score, cls ], [ [ (x0,y0),(x1,y1),(x2,y2),(x3,y3) ],score ,cls ],........ ]
  789. prets, infos = cmodel.eval(img_raw)
  790. print(file,infos,' 人数:',len(prets[1]))
  791. img_to_draw = cv2.cvtColor(np.array(img_raw), cv2.COLOR_RGB2BGR)
  792. # 打印预测图像中人头的个数
  793. for p in prets[1]:
  794. img_to_draw = cv2.circle(img_to_draw, (int(p[0]), int(p[1])), 2, (0, 255, 0), -1)
  795. COORlist.append((int(p[0]), int(p[1])))
  796. # 将各测试图像中的人头坐标存储在targetList中, 格式:[[(x1, y1),(x2, y2),...], [(X1, Y1),(X2, Y2),..], ...]
  797. targetList.append(COORlist)
  798. #time.sleep(2)
  799. # 保存预测图片
  800. cv2.imwrite(os.path.join(par['testOutPath'], file), img_to_draw)
  801. if __name__=="__main__":
  802. #jkm_demo()
  803. businessAll=['river', 'river2','highWay2','noParking','drowning','forest2','vehicle','pedestrian','smogfire' , 'AnglerSwimmer','channelEmergency', 'countryRoad','cityMangement','ship2','cityMangement2','cityRoad','illParking',"crowdCounting"]
  804. businessAll = ['crowdCounting']
  805. for busi in businessAll:
  806. print('-'*40,'beg to test:',busi,'-'*40)
  807. opt={'gpu':'2080Ti','business':busi}
  808. if busi in ['ship2']:
  809. OBB_track_demo(opt)
  810. elif opt['business'] in ['crowdCounting'] :
  811. crowd_demo(opt)
  812. else:
  813. #if opt['business'] in ['river','highWay2','noParking','drowning','']:
  814. det_track_demo(opt )