選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

Test.py 45KB

1年前
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. import sys, yaml
  2. from easydict import EasyDict as edict
  3. from concurrent.futures import ThreadPoolExecutor
  4. sys.path.extend(['..','../AIlib2' ])
  5. from AI import AI_process,AI_process_forest,get_postProcess_para,get_postProcess_para_dic,ocr_process,AI_det_track,AI_det_track_batch
  6. import cv2,os,time
  7. from segutils.segmodel import SegModel
  8. from segutils.segmodel import SegModel
  9. from segutils.trafficUtils import tracfficAccidentMixFunction
  10. from models.experimental import attempt_load
  11. from utils.torch_utils import select_device
  12. from utilsK.queRiver import get_labelnames,get_label_arrays,save_problem_images,riverDetSegMixProcess
  13. from ocrUtils.ocrUtils import CTCLabelConverter,AlignCollate
  14. from trackUtils.sort import Sort,track_draw_boxAndTrace,track_draw_trace_boxes,moving_average_wang,drawBoxTraceSimplied
  15. from trackUtils.sort_obb import OBB_Sort,obbTohbb,track_draw_all_boxes,track_draw_trace
  16. from obbUtils.shipUtils import OBB_infer,OBB_tracker,draw_obb,OBB_tracker_batch
  17. from utilsK.noParkingUtils import mixNoParking_road_postprocess
  18. from obbUtils.load_obb_model import load_model_decoder_OBB
  19. import numpy as np
  20. import torch,glob
  21. import tensorrt as trt
  22. from utilsK.masterUtils import get_needed_objectsIndex
  23. from copy import deepcopy
  24. from scipy import interpolate
  25. from utilsK.drownUtils import mixDrowing_water_postprocess
  26. #import warnings
  27. #warnings.filterwarnings("error")
  28. def view_bar(num, total,time1,prefix='prefix'):
  29. rate = num / total
  30. time_n=time.time()
  31. rate_num = int(rate * 30)
  32. rate_nums = np.round(rate * 100)
  33. r = '\r %s %d / %d [%s%s] %.2f s'%(prefix,num,total, ">" * rate_num, " " * (30 - rate_num), time_n-time1 )
  34. sys.stdout.write(r)
  35. sys.stdout.flush()
  36. '''
  37. 多线程
  38. '''
  39. def process_v1(frame):
  40. #try:
  41. print('demo.py beging to :',frame[8])
  42. time00 = time.time()
  43. H,W,C = frame[0][0].shape
  44. p_result,timeOut = AI_process(frame[0],frame[1],frame[2],frame[3],frame[4],frame[5],objectPar=frame[6],font=frame[7],segPar=frame[9],mode=frame[10],postPar=frame[11])
  45. time11 = time.time()
  46. image_array = p_result[1]
  47. cv2.imwrite(os.path.join('images/results/',frame[8] ) ,image_array)
  48. bname = frame[8].split('.')[0]
  49. if len(p_result)==5:
  50. image_mask = p_result[4]
  51. cv2.imwrite(os.path.join('images/results/',bname+'_mask.png' ) , (image_mask).astype(np.uint8))
  52. boxes=p_result[2]
  53. with open( os.path.join('images/results/',bname+'.txt' ),'w' ) as fp:
  54. for box in boxes:
  55. box_str=[str(x) for x in box]
  56. out_str=','.join(box_str)+'\n'
  57. fp.write(out_str)
  58. time22 = time.time()
  59. print('%s,%d*%d,AI-process: %.1f,image save:%.1f , %s'%(frame[8],H,W, (time11 - time00) * 1000.0, (time22-time11)*1000.0,timeOut), boxes)
  60. return 'success'
  61. #except Exception as e:
  62. # return 'failed:'+str(e)
  63. def process_video(video,par0,mode='detSeg'):
  64. cap=cv2.VideoCapture(video)
  65. if not cap.isOpened():
  66. print('#####error url:',video)
  67. return False
  68. bname=os.path.basename(video).split('.')[0]
  69. fps = int(cap.get(cv2.CAP_PROP_FPS)+0.5)
  70. width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH )+0.5)
  71. height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)+0.5)
  72. framecnt=int(cap.get(7)+0.5)
  73. save_path_AI = os.path.join(par0['outpth'],os.path.basename(video))
  74. problem_image_dir= os.path.join( par0['outpth'], 'probleImages' )
  75. os.makedirs(problem_image_dir,exist_ok=True)
  76. vid_writer_AI = cv2.VideoWriter(save_path_AI, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width,height))
  77. num=0
  78. iframe=0;post_results=[];fpsample=30*10
  79. imgarray_list = []; iframe_list = []
  80. patch_cnt = par0['trackPar']['patchCnt']
  81. ##windowsize 对逐帧插值后的结果做平滑,windowsize为平滑的长度,没隔det_cnt帧做一次跟踪。
  82. trackPar={'det_cnt':10,'windowsize':29 }
  83. ##track_det_result_update= np.empty((0,8)) ###每100帧跑出来的结果,放在track_det_result_update,只保留当前100帧里有的tracker Id.
  84. while cap.isOpened():
  85. ret, imgarray = cap.read() #读取摄像头画面
  86. iframe +=1
  87. if not ret:break
  88. if mode=='detSeg':
  89. p_result,timeOut = AI_process([imgarray],par0['model'],par0['segmodel'],par0['names'],par0['label_arraylist'],par0['rainbows'],objectPar=par0['objectPar'],font=par0['digitFont'],segPar=par0['segPar'])
  90. elif mode == 'track':
  91. #sampleCount=10
  92. imgarray_list.append( imgarray )
  93. iframe_list.append(iframe )
  94. if iframe%patch_cnt==0:
  95. time_patch0 = time.time()
  96. retResults,timeInfos = AI_det_track_batch(imgarray_list, iframe_list ,par0['modelPar'],par0['processPar'],par0['sort_tracker'] ,par0['trackPar'],segPar=par0['segPar'])
  97. #print('###line111:',retResults[2])
  98. ###需要保存成一个二维list,每一个list是一帧检测结果。
  99. ###track_det_result 内容格式:x1, y1, x2, y2, conf, cls,iframe,trackId
  100. time_patch2 = time.time()
  101. frame_min = iframe_list[0];frame_max=iframe_list[-1]
  102. for iiframe in range(frame_min,frame_max+1):
  103. img_draw = imgarray_list[ iiframe- frame_min ]
  104. img_draw = drawBoxTraceSimplied(retResults[1] ,iiframe, img_draw,rainbows=par0['drawPar']['rainbows'],boxFlag=True,traceFlag=True,names=par0['drawPar']['names'] )
  105. ret = vid_writer_AI.write(img_draw)
  106. view_bar(iiframe, framecnt,time.time(),prefix=os.path.basename(video))
  107. imgarray_list=[];iframe_list=[]
  108. elif mode =='obbTrack':
  109. imgarray_list.append( imgarray )
  110. iframe_list.append(iframe )
  111. if iframe%patch_cnt==0:
  112. time_patch0 = time.time()
  113. track_det_results, timeInfos = OBB_tracker_batch(imgarray_list,iframe_list,par0['modelPar'],par0['obbModelPar'],par0['sort_tracker'],par0['trackPar'],segPar=None)
  114. print( timeInfos )
  115. #对结果画图
  116. track_det_np = track_det_results[1]
  117. frame_min = iframe_list[0];frame_max=iframe_list[-1]
  118. for iiframe in range(frame_min,frame_max+1):
  119. img_draw = imgarray_list[ iiframe- frame_min ]
  120. if len( track_det_results[2][ iiframe- frame_min]) > 0:
  121. img_draw = draw_obb( track_det_results[2][iiframe- frame_min ] ,img_draw,par0['drawPar'])
  122. if True:
  123. frameIdex=12;trackIdex=13;
  124. boxes_oneFrame = track_det_np[ track_det_np[:,frameIdex]==iiframe ]
  125. ###在某一帧上,画上轨迹
  126. track_ids = boxes_oneFrame[:,trackIdex].tolist()
  127. boxes_before_oneFrame = track_det_np[ track_det_np[:,frameIdex]<=iiframe ]
  128. for trackId in track_ids:
  129. boxes_before_oneFrame_oneId = boxes_before_oneFrame[boxes_before_oneFrame[:,trackIdex]==trackId]
  130. xcs = boxes_before_oneFrame_oneId[:,8]
  131. ycs = boxes_before_oneFrame_oneId[:,9]
  132. [cv2.line(img_draw, ( int(xcs[i]) , int(ycs[i]) ),
  133. ( int(xcs[i+1]),int(ycs[i+1]) ),(255,0,0), thickness=2)
  134. for i,_ in enumerate(xcs) if i < len(xcs)-1 ]
  135. ret = vid_writer_AI.write(img_draw)
  136. #sys.exit(0)
  137. #print('vide writer ret:',ret)
  138. imgarray_list=[];iframe_list=[]
  139. view_bar(iframe, framecnt,time.time(),prefix=os.path.basename(video))
  140. else:
  141. p_result,timeOut = AI_process_forest([imgarray],par0['model'],par0['segmodel'],par0['names'],par0['label_arraylist'],par0['rainbows'],par0['half'],par0['device'],par0['conf_thres'], par0['iou_thres'],par0['allowedList'],font=par0['digitFont'],trtFlag_det=par0['trtFlag_det'])
  142. if mode not in [ 'track','obbTrack']:
  143. image_array = p_result[1];num+=1
  144. ret = vid_writer_AI.write(image_array)
  145. view_bar(num, framecnt,time.time(),prefix=os.path.basename(video))
  146. ##每隔 fpsample帧处理一次,如果有问题就保存图片
  147. if (iframe % fpsample == 0) and (len(post_results)>0) :
  148. parImage=save_problem_images(post_results,iframe,par0['names'],streamName=bname,outImaDir=problem_image_dir,imageTxtFile=False)
  149. post_results=[]
  150. if len(p_result[2] )>0:
  151. post_results.append(p_result)
  152. vid_writer_AI.release();
  153. def det_track_demo(business, videopaths):
  154. '''
  155. 跟踪参数说明:
  156. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}
  157. sort_max_age--跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
  158. sort_min_hits--每隔目标连续出现的次数,超过这个次数才认为是一个目标。
  159. sort_iou_thresh--检测最小的置信度。
  160. det_cnt--每隔几次做一个跟踪和检测,默认10。
  161. windowsize--轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。
  162. patchCnt--每次送入图像的数量,不宜少于100帧。
  163. '''
  164. ''' 以下是基于检测和分割的跟踪模型,分割用来修正检测的结果'''
  165. ####河道巡检的跟踪模型参数
  166. if opt['business'] == 'river' or opt['business'] == 'river2' :
  167. par={
  168. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  169. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  170. 'gpuname':'2080Ti',###显卡名称
  171. 'max_workers':1, ###并行线程数
  172. 'half':True,
  173. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  174. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  175. 'seg_nclass':2,###分割模型类别数目,默认2类
  176. 'segRegionCnt':0,###分割模型结果需要保留的等值线数目
  177. 'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True,#分割模型预处理参数
  178. 'mixFunction':{'function':riverDetSegMixProcess,'pars':{'slopeIndex':[1,3,4,7], 'riverIou':0.1}} #分割和检测混合处理的函数
  179. },
  180. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  181. 'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  182. 'postFile': '../AIlib2/weights/conf/%s/para.json'%( opt['business'] ),###后处理参数文件
  183. 'txtFontSize':80,###文本字符的大小
  184. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  185. #'testImgPath':'images/videos/river',###测试图像的位置
  186. 'testImgPath':'images/tt',###测试图像的位置
  187. 'testOutPath':'images/results/',###输出测试图像位置
  188. }
  189. if opt['business'] == 'highWay2':
  190. par={
  191. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  192. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%( opt['business'] ), ###检测类别对照表
  193. 'half':True,
  194. 'gpuname':'3090',###显卡名称
  195. 'max_workers':1, ###并行线程数
  196. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  197. #'Detweights':"../AIlib2/weights/conf/highWay2/yolov5.pt",
  198. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
  199. 'seg_nclass':3,###分割模型类别数目,默认2类
  200. 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
  201. 'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数
  202. 'mixFunction':{'function':tracfficAccidentMixFunction,
  203. 'pars':{ 'RoadArea': 16000, 'vehicleArea': 10, 'roadVehicleAngle': 15, 'speedRoadVehicleAngleMax': 75,'radius': 50 , 'roundness': 1.0, 'cls': 9, 'vehicleFactor': 0.1,'cls':9, 'confThres':0.25,'roadIou':0.6,'vehicleFlag':False,'distanceFlag': False }
  204. }
  205. },
  206. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  207. 'mode':'highWay3.0',
  208. 'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  209. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
  210. 'txtFontSize':20,###文本字符的大小
  211. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':0.5,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
  212. #'testImgPath':'images/trafficAccident/8.png',###测试图像的位置
  213. 'testImgPath':'images/noParking/',###测试图像的位置
  214. 'testOutPath':'images/results/',###输出测试图像位置
  215. }
  216. par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize']
  217. if opt['business'] == 'noParking':
  218. par={
  219. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  220. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%( opt['business'] ), ###检测类别对照表
  221. 'half':True,
  222. 'gpuname':'3090',###显卡名称
  223. 'max_workers':1, ###并行线程数
  224. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  225. #'Detweights':"../AIlib2/weights/conf/highWay2/yolov5.pt",
  226. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
  227. 'seg_nclass':4,###分割模型类别数目,默认2类
  228. 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
  229. 'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数
  230. 'mixFunction':{'function':mixNoParking_road_postprocess,
  231. 'pars':
  232. #{ 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5 ,'RoadArea': 16000, }
  233. {'RoadArea': 16000, 'roadVehicleAngle': 15,'radius': 50, 'distanceFlag': False, 'vehicleFlag': False}
  234. }
  235. },
  236. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  237. 'mode':'highWay3.0',
  238. 'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  239. 'postFile': '../AIlib2/weights/conf/%s/para.json'%('highWay2' ),###后处理参数文件
  240. 'txtFontSize':20,###文本字符的大小
  241. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
  242. 'testImgPath':'images/noParking/',###测试图像的位置
  243. 'testOutPath':'images/results/',###输出测试图像位置
  244. }
  245. par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize']
  246. if opt['business'] == 'drowning':
  247. par={
  248. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  249. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%( opt['business'] ), ###检测类别对照表
  250. 'half':True,
  251. 'gpuname':'3090',###显卡名称
  252. 'max_workers':1, ###并行线程数
  253. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  254. #'Detweights':"../AIlib2/weights/conf/highWay2/yolov5.pt",
  255. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
  256. 'seg_nclass':2,###分割模型类别数目,默认2类
  257. 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
  258. 'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数
  259. 'mixFunction':{'function':mixDrowing_water_postprocess,
  260. 'pars':{ }
  261. }
  262. },
  263. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  264. 'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
  265. 'postFile': '../AIlib2/weights/conf/%s/para.json'%('highWay2' ),###后处理参数文件
  266. 'txtFontSize':20,###文本字符的大小
  267. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
  268. 'testImgPath':'images/drowning/',###测试图像的位置
  269. 'testOutPath':'images/results/',###输出测试图像位置
  270. }
  271. par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize']
  272. ''' 以下是基于检测的跟踪模型,只有检测没有分割 '''
  273. if opt['business'] == 'forest2':
  274. par={
  275. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  276. 'labelnames':"../AIlib2/weights/conf/forest2/labelnames.json", ###检测类别对照表
  277. 'gpuname':opt['gpu'],###显卡名称
  278. 'max_workers':1, ###并行线程数
  279. 'half':True,
  280. 'trtFlag_det':True,###检测模型是否采用TRT
  281. 'trtFlag_seg':False,###分割模型是否采用TRT
  282. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  283. #'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  284. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [] ],
  285. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  286. 'seg_nclass':2,###分割模型类别数目,默认2类
  287. 'segRegionCnt':0,###分割模型结果需要保留的等值线数目
  288. 'segPar':None,###分割模型预处理参数
  289. 'Segweights' : None,###分割模型权重位置
  290. 'postFile': '../AIlib2/weights/conf/forest/para.json',###后处理参数文件
  291. 'txtFontSize':80,###文本字符的大小
  292. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  293. 'testImgPath':'../AIdemo2/images/forest2/',###测试图像的位置
  294. 'testOutPath':'images/results/',###输出测试图像位置
  295. }
  296. ###车辆巡检参数
  297. if opt['business'] == 'vehicle':
  298. par={
  299. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  300. 'labelnames':"../AIlib2/weights/conf/vehicle/labelnames.json", ###检测类别对照表
  301. 'gpuname':'2080T',###显卡名称
  302. 'half':True,
  303. 'max_workers':1, ###并行线程数
  304. 'trtFlag_det':True,###检测模型是否采用TRT
  305. 'trtFlag_seg':False,###分割模型是否采用TRT
  306. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  307. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  308. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  309. 'seg_nclass':2,###分割模型类别数目,默认2类
  310. 'segRegionCnt':0,###分割模型结果需要保留的等值线数目
  311. 'segPar':None,###分割模型预处理参数
  312. 'Segweights' : None,###分割模型权重位置
  313. 'postFile': '../AIlib2/weights/conf/vehicle/para.json',###后处理参数文件
  314. 'txtFontSize':40,###文本字符的大小
  315. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  316. 'testImgPath':'images/videos/vehicle/',###测试图像的位置
  317. 'testOutPath':'images/results/',###输出测试图像位置
  318. }
  319. ###行人检测模型
  320. if opt['business'] == 'pedestrian':
  321. par={
  322. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  323. 'labelnames':"../AIlib2/weights/conf/pedestrian/labelnames.json", ###检测类别对照表
  324. 'gpuname':'2080T',###显卡名称
  325. 'half':True,
  326. 'max_workers':1, ###并行线程数
  327. 'trtFlag_det':True,###检测模型是否采用TRT
  328. 'trtFlag_seg':False,###分割模型是否采用TRT
  329. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  330. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  331. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  332. 'seg_nclass':2,###分割模型类别数目,默认2类
  333. 'segRegionCnt':0,###分割模型结果需要保留的等值线数目
  334. 'segPar':None,###分割模型预处理参数
  335. 'Segweights' : None,###分割模型权重位置
  336. 'postFile': '../AIlib2/weights/conf/pedestrian/para.json',###后处理参数文件
  337. 'txtFontSize':40,###文本字符的大小
  338. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  339. 'testImgPath':'../AIdemo2/images/pedestrian/',###测试图像的位置
  340. 'testOutPath':'images/results/',###输出测试图像位置
  341. }
  342. if opt['business'] == 'smogfire':
  343. par={
  344. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  345. 'labelnames':"../AIlib2/weights/conf/smogfire/labelnames.json", ###检测类别对照表
  346. 'gpuname':'2080T',###显卡名称
  347. 'half':True,
  348. 'max_workers':1, ###并行线程数
  349. 'trtFlag_det':True,###检测模型是否采用TRT
  350. 'trtFlag_seg':False,###分割模型是否采用TRT
  351. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  352. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  353. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  354. 'seg_nclass':2,###没有分割模型,此处不用
  355. 'segRegionCnt':0,###没有分割模型,此处不用
  356. 'segPar':None,###分割模型预处理参数
  357. 'Segweights' : None,###分割模型权重位置
  358. 'postFile': '../AIlib2/weights/conf/smogfire/para.json',###后处理参数文件
  359. 'txtFontSize':40,###文本字符的大小
  360. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  361. 'testImgPath':'../AIdemo2/images/smogfire/',###测试图像的位置
  362. 'testOutPath':'images/results/',###输出测试图像位置
  363. }
  364. ###钓鱼游泳检测
  365. if opt['business'] == 'AnglerSwimmer':
  366. par={
  367. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  368. 'labelnames':"../AIlib2/weights/conf/AnglerSwimmer/labelnames.json", ###检测类别对照表
  369. 'gpuname':'2080T',###显卡名称
  370. 'half':True,
  371. 'max_workers':1, ###并行线程数
  372. 'trtFlag_det':True,###检测模型是否采用TRT
  373. 'trtFlag_seg':False,###分割模型是否采用TRT
  374. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  375. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  376. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  377. 'seg_nclass':2,###没有分割模型,此处不用
  378. 'segRegionCnt':0,###没有分割模型,此处不用
  379. 'segPar':None,###分割模型预处理参数
  380. 'Segweights' : None,###分割模型权重位置
  381. 'postFile': '../AIlib2/weights/conf/AnglerSwimmer/para.json',###后处理参数文件
  382. 'txtFontSize':40,###文本字符的大小
  383. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  384. 'testImgPath':'../AIdemo2/images/AnglerSwimmer/',###测试图像的位置
  385. 'testOutPath':'images/results/',###输出测试图像位置
  386. }
  387. ###航道应急,做落水人员检测, channelEmergency
  388. if opt['business'] == 'channelEmergency':
  389. par={
  390. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  391. 'labelnames':"../AIlib2/weights/conf/channelEmergency/labelnames.json", ###检测类别对照表
  392. 'gpuname':'2080T',###显卡名称
  393. 'half':True,
  394. 'max_workers':1, ###并行线程数
  395. 'trtFlag_det':True,###检测模型是否采用TRT
  396. 'trtFlag_seg':False,###分割模型是否采用TRT
  397. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  398. #'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  399. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [] ],###控制哪些检测类别显示、输出
  400. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  401. 'seg_nclass':2,###没有分割模型,此处不用
  402. 'segRegionCnt':0,###没有分割模型,此处不用
  403. 'segPar':None,###分割模型预处理参数
  404. 'Segweights' : None,###分割模型权重位置
  405. 'postFile': '../AIlib2/weights/conf/channelEmergency/para.json',###后处理参数文件
  406. 'txtFontSize':40,###文本字符的大小
  407. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  408. 'testImgPath':'../AIdemo2/images/channelEmergency/',###测试图像的位置
  409. 'testOutPath':'images/results/',###输出测试图像位置
  410. }
  411. ###乡村路违法种植
  412. if opt['business'] == 'countryRoad':
  413. par={
  414. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  415. 'labelnames':"../AIlib2/weights/conf/countryRoad/labelnames.json", ###检测类别对照表
  416. 'gpuname':'2080T',###显卡名称
  417. 'half':True,
  418. 'max_workers':1, ###并行线程数
  419. 'trtFlag_det':True,###检测模型是否采用TRT
  420. 'trtFlag_seg':False,###分割模型是否采用TRT
  421. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  422. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  423. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  424. 'seg_nclass':2,###没有分割模型,此处不用
  425. 'segRegionCnt':0,###没有分割模型,此处不用
  426. 'segPar':None,###分割模型预处理参数
  427. 'Segweights' : None,###分割模型权重位置
  428. 'postFile': '../AIlib2/weights/conf/countryRoad/para.json',###后处理参数文件
  429. 'txtFontSize':40,###文本字符的大小
  430. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  431. 'testImgPath':'../AIdemo2/images/countryRoad/',###测试图像的位置
  432. 'testOutPath':'images/results/',###输出测试图像位置
  433. }
  434. ###城管项目,检测城市垃圾和车辆
  435. if opt['business'] == 'cityMangement':
  436. par={
  437. 'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡)
  438. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
  439. 'gpuname':'2080Ti',###显卡名称
  440. 'half':True,
  441. 'max_workers':1, ###并行线程数
  442. 'trtFlag_det':True,###检测模型是否采用TRT
  443. 'trtFlag_seg':False,###分割模型是否采用TRT
  444. 'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
  445. 'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
  446. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  447. 'seg_nclass':2,###没有分割模型,此处不用
  448. 'segRegionCnt':0,###没有分割模型,此处不用
  449. 'segPar':None,###分割模型预处理参数
  450. 'Segweights' : None,###分割模型权重位置
  451. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business']),###后处理参数文件
  452. 'txtFontSize':40,###文本字符的大小
  453. 'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
  454. 'testImgPath':'images/cityMangement',###测试图像的位置
  455. 'testOutPath':'images/results/',###输出测试图像位置
  456. }
  457. par['trtFlag_det']=True if par['Detweights'].endswith('.engine') else False
  458. if par['Segweights']:
  459. par['segPar']['trtFlag_seg']=True if par['Segweights'].endswith('.engine') else False
  460. ##使用森林,道路模型,business 控制['forest','road']
  461. ##预先设置的参数
  462. gpuname=par['gpuname']#如果用trt就需要此参数,只能是"3090" "2080Ti"
  463. device_=par['device'] ##选定模型,可选 cpu,'0','1'
  464. device = select_device(device_)
  465. half = device.type != 'cpu' # half precision only supported on CUDA
  466. trtFlag_det=par['trtFlag_det'] ###是否采用TRT模型加速
  467. ##以下参数目前不可改
  468. imageW=1080 ####道路模型
  469. digitFont= par['digitFont']
  470. ####加载检测模型
  471. if trtFlag_det:
  472. Detweights=par['Detweights']
  473. logger = trt.Logger(trt.Logger.ERROR)
  474. with open(Detweights, "rb") as f, trt.Runtime(logger) as runtime:
  475. model=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象
  476. print('####load TRT model :%s'%(Detweights))
  477. else:
  478. Detweights=par['Detweights']
  479. model = attempt_load(Detweights, map_location=device) # load FP32 model
  480. if half: model.half()
  481. ####加载分割模型
  482. seg_nclass = par['seg_nclass']
  483. segPar=par['segPar']
  484. if par['Segweights']:
  485. if par['segPar']['trtFlag_seg']:
  486. Segweights = par['Segweights']
  487. logger = trt.Logger(trt.Logger.ERROR)
  488. with open(Segweights, "rb") as f, trt.Runtime(logger) as runtime:
  489. segmodel=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象
  490. print('############locad seg model trt success: ',Segweights)
  491. else:
  492. Segweights = par['Segweights']
  493. segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device)
  494. print('############locad seg model pth success:',Segweights)
  495. else:
  496. segmodel=None
  497. trackPar=par['trackPar']
  498. sort_tracker = Sort(max_age=trackPar['sort_max_age'],
  499. min_hits=trackPar['sort_min_hits'],
  500. iou_threshold=trackPar['sort_iou_thresh'])
  501. labelnames = par['labelnames']
  502. postFile= par['postFile']
  503. print( Detweights,labelnames )
  504. conf_thres,iou_thres,classes,rainbows=get_postProcess_para(postFile)
  505. detPostPar = get_postProcess_para_dic(postFile)
  506. conf_thres,iou_thres,classes,rainbows = detPostPar["conf_thres"],detPostPar["iou_thres"],detPostPar["classes"],detPostPar["rainbows"]
  507. if 'ovlap_thres_crossCategory' in detPostPar.keys(): iou2nd=detPostPar['ovlap_thres_crossCategory']
  508. else:iou2nd = None
  509. ####模型选择参数用如下:
  510. mode_paras=par['detModelpara']
  511. allowedList,allowedList_string=get_needed_objectsIndex(mode_paras)
  512. #slopeIndex = par['slopeIndex']
  513. ##只加载检测模型,准备好显示字符
  514. names=get_labelnames(labelnames)
  515. #imageW=4915;###默认是1920,在森林巡检的高清图像中是4920
  516. outfontsize=int(imageW/1920*40);###
  517. label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['txtFontSize'],fontpath="../AIlib2/conf/platech.ttf")
  518. ##图像测试和视频
  519. outpth = par['testOutPath']
  520. impth = par['testImgPath']
  521. imgpaths=[]###获取文件里所有的图像
  522. videopaths=videopaths###获取文件里所有的视频
  523. img_postfixs = ['.jpg','.JPG','.PNG','.png'];
  524. vides_postfixs= ['.MP4','.mp4','.avi']
  525. if os.path.isdir(impth):
  526. for postfix in img_postfixs:
  527. imgpaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  528. for postfix in ['.MP4','.mp4','.avi']:
  529. videopaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  530. else:
  531. postfix = os.path.splitext(impth)[-1]
  532. if postfix in img_postfixs: imgpaths=[ impth ]
  533. if postfix in vides_postfixs: videopaths = [impth ]
  534. imgpaths.sort()
  535. modelPar={ 'det_Model': model,'seg_Model':segmodel }
  536. processPar={'half':par['half'],'device':device,'conf_thres':conf_thres,'iou_thres':iou_thres,'trtFlag_det':trtFlag_det,'iou2nd':iou2nd}
  537. drawPar={'names':names,'label_arraylist':label_arraylist,'rainbows':rainbows,'font': par['digitFont'],'allowedList':allowedList}
  538. for i in range(len(imgpaths)):
  539. #for i in range(2):
  540. #imgpath = os.path.join(impth, folders[i])
  541. imgpath = imgpaths[i]
  542. bname = os.path.basename(imgpath )
  543. im0s=[cv2.imread(imgpath)]
  544. time00 = time.time()
  545. retResults,timeOut = AI_det_track_batch(im0s, [i] ,modelPar,processPar,sort_tracker ,trackPar,segPar)
  546. #print('###line627:',retResults[2])
  547. #retResults,timeInfos = AI_det_track_batch(imgarray_list, iframe_list ,par0['modelPar'],par0['processPar'],par0['sort_tracker'] ,par0['trackPar'],segPar=par0['segPar'])
  548. if len(retResults[1])>0:
  549. retResults[0][0] = drawBoxTraceSimplied(retResults[1],i, retResults[0][0],rainbows=rainbows,boxFlag=True,traceFlag=False,names=drawPar['names'])
  550. time11 = time.time()
  551. image_array = retResults[0][0]
  552. '''
  553. 返回值retResults[2] --list,其中每一个元素为一个list,表示每一帧的检测结果,每一个结果是由多个list构成,每个list表示一个框,格式为[ cls , x0 ,y0 ,x1 ,y1 ,conf,ifrmae,trackId ]
  554. --etc. retResults[2][j][k]表示第j帧的第k个框。
  555. '''
  556. cv2.imwrite( os.path.join( outpth,bname ) ,image_array )
  557. print('----image:%s, process:%s ( %s ),save:%s'%(bname,(time11-time00) * 1000, timeOut,(time.time() - time11) * 1000) )
  558. ##process video
  559. print('##begin to process videos, total %d videos'%( len(videopaths)))
  560. for i,video in enumerate(videopaths):
  561. print('process video%d :%s '%(i,video))
  562. par0={'modelPar':modelPar,'processPar':processPar,'drawPar':drawPar,'outpth':par['testOutPath'], 'sort_tracker':sort_tracker,'trackPar':trackPar,'segPar':segPar}
  563. process_video(video,par0,mode='track')
  564. def OCR_demo2(opt):
  565. from ocrUtils2 import crnn_model
  566. from ocrUtils2.ocrUtils import get_cfg,recognition_ocr,strLabelConverter
  567. if opt['business'] == 'ocr2':
  568. par={
  569. 'image_dir':'images/ocr_en',
  570. 'outtxt':'images/results',
  571. 'weights':'../AIlib2/weights/conf/ocr2/crnn_448X32.pth',
  572. #'weights':'../weights/2080Ti/AIlib2/ocr2/crnn_2080Ti_fp16_448X32.engine',
  573. 'device':'cuda:0',
  574. 'cfg':'../AIlib2/weights/conf/ocr2/360CC_config.yaml',
  575. 'char_file':'../AIlib2/weights/conf/ocr2/chars.txt',
  576. 'imgH':32,
  577. 'imgW':448,
  578. 'workers':1
  579. }
  580. image_dir=par['image_dir']
  581. outtxt=par['outtxt']
  582. workers=par['workers']
  583. weights= par['weights']
  584. device=par['device']
  585. char_file=par['char_file']
  586. imgH=par['imgH']
  587. imgW=par['imgW']
  588. cfg = par['cfg']
  589. config = get_cfg(cfg, char_file)
  590. par['contextFlag']=False
  591. device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
  592. if weights.endswith('.pth'):
  593. model = crnn_model.get_crnn(config,weights=weights).to(device)
  594. par['model_mode']='pth'
  595. else:
  596. logger = trt.Logger(trt.Logger.ERROR)
  597. with open(weights, "rb") as f, trt.Runtime(logger) as runtime:
  598. model = runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象
  599. print('#####load TRT file:',weights,'success #####')
  600. context = model.create_execution_context()
  601. par['model_mode']='trt';par['contextFlag']=context
  602. converter = strLabelConverter(config.DATASET.ALPHABETS)
  603. img_urls=glob.glob('%s/*.jpg'%( image_dir ))
  604. img_urls.extend( glob.glob('%s/*.png'%( image_dir )) )
  605. cnt=len(img_urls)
  606. print('%s has %d images'%(image_dir ,len(img_urls) ) )
  607. # 准备数据
  608. parList=[]
  609. for i in range(cnt):
  610. img_patch=cv2.imread( img_urls[i] , cv2.IMREAD_GRAYSCALE)
  611. started = time.time()
  612. img = cv2.imread(img_urls[i])
  613. sim_pred = recognition_ocr(config, img, model, converter, device,par=par)
  614. finished = time.time()
  615. print('{0}: elapsed time: {1} prd:{2} '.format( os.path.basename( img_urls[i] ), finished - started, sim_pred ))
  616. def OBB_track_demo(opt):
  617. ###倾斜框(OBB)的ship目标检测
  618. '''
  619. par={
  620. 'model_size':(608,608), #width,height
  621. 'K':100, #Maximum of objects'
  622. 'conf_thresh':0.18,##Confidence threshold, 0.1 for general evaluation
  623. 'device':"cuda:0",
  624. 'down_ratio':4,'num_classes':15,
  625. #'weights':'../AIlib2/weights/conf/ship2/obb_608X608.engine',
  626. 'weights':'../weights/%s/AIlib2/%s/obb_608X608_%s_fp16.engine'%(opt['gpu'],opt['business'],opt['gpu']),
  627. 'dataset':'dota',
  628. 'test_dir': '/mnt/thsw2/DSP2/videos/obbShips',
  629. 'outpth': 'images/results',
  630. 'half': False,
  631. 'mean':(0.5, 0.5, 0.5),
  632. 'std':(1, 1, 1),
  633. 'model_size':(608,608),##width,height
  634. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  635. 'heads': {'hm': None,'wh': 10,'reg': 2,'cls_theta': 1},
  636. 'decoder':None,
  637. 'test_flag':True,
  638. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
  639. 'drawBox':True,#####是否画框
  640. 'digitWordFont': { 'line_thickness':2,'boxLine_thickness':1,'wordSize':40, 'fontSize':1.0,'label_location':'leftTop'},
  641. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business'] ), ###检测类别对照表
  642. }
  643. '''
  644. par={
  645. 'obbModelPar':{
  646. 'model_size':(608,608),'K':100,'conf_thresh':0.3, 'down_ratio':4,'num_classes':15,'dataset':'dota',
  647. 'heads': {'hm': None,'wh': 10,'reg': 2,'cls_theta': 1},
  648. 'mean':(0.5, 0.5, 0.5),'std':(1, 1, 1), 'half': False,'decoder':None,
  649. 'weights':'../weights/%s/AIlib2/%s/obb_608X608_%s_fp16.engine'%(opt['gpu'],opt['business'],opt['gpu']),
  650. },
  651. 'outpth': 'images/results',
  652. 'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100},
  653. 'device':"cuda:0",
  654. #'test_dir': '/mnt/thsw2/DSP2/videos/obbShips/DJI_20230208110806_0001_W_6M.MP4',
  655. 'test_dir':'/mnt/thsw2/DSP2/videos/obbShips/freighter2.mp4',
  656. 'test_flag':True,
  657. 'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
  658. 'drawBox':True,#####是否画框
  659. 'drawPar': { 'digitWordFont' :{'line_thickness':2,'boxLine_thickness':1,'wordSize':40, 'fontSize':1.0,'label_location':'leftTop'}} ,
  660. 'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business'] ), ###检测类别对照表
  661. }
  662. #par['model_size'],par['mean'],par['std'],par['half'],par['saveType'],par['heads'],par['labelnames'],par['decoder'],par['down_ratio'],par['drawBox']
  663. #par['rainbows'],par['label_array'],par['digitWordFont']
  664. obbModelPar = par['obbModelPar']
  665. ####加载模型
  666. model,decoder2=load_model_decoder_OBB(obbModelPar)
  667. obbModelPar['decoder']=decoder2
  668. names=get_labelnames(par['labelnames']);obbModelPar['labelnames']=names
  669. _,_,_,rainbows=get_postProcess_para(par['postFile']);par['drawPar']['rainbows']=rainbows
  670. label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['drawPar']['digitWordFont']['wordSize'],fontpath="../AIlib2/conf/platech.ttf")
  671. #par['label_array']=label_arraylist
  672. trackPar=par['trackPar']
  673. sort_tracker = OBB_Sort(max_age=trackPar['sort_max_age'],
  674. min_hits=trackPar['sort_min_hits'],
  675. iou_threshold=trackPar['sort_iou_thresh'])
  676. ##图像测试和视频
  677. impth = par['test_dir']
  678. img_urls=[]###获取文件里所有的图像
  679. video_urls=[]###获取文件里所有的视频
  680. img_postfixs = ['.jpg','.JPG','.PNG','.png'];
  681. vides_postfixs= ['.MP4','.mp4','.avi']
  682. if os.path.isdir(impth):
  683. for postfix in img_postfixs:
  684. img_urls.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  685. for postfix in ['.MP4','.mp4','.avi']:
  686. video_urls.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  687. else:
  688. postfix = os.path.splitext(impth)[-1]
  689. if postfix in img_postfixs: img_urls=[ impth ]
  690. if postfix in vides_postfixs: video_urls = [impth ]
  691. parIn = {'obbModelPar':obbModelPar,'modelPar':{'obbmodel': model},'sort_tracker':sort_tracker,'outpth':par['outpth'],'trackPar':trackPar,'drawPar':par['drawPar']}
  692. par['drawPar']['label_array']=label_arraylist
  693. for img_url in img_urls:
  694. #print(img_url)
  695. ori_image=cv2.imread(img_url)
  696. #ori_image_list,infos = OBB_infer(model,ori_image,obbModelPar)
  697. ori_image_list,infos = OBB_tracker_batch([ori_image],[0],parIn['modelPar'],parIn['obbModelPar'],None,parIn['trackPar'],None)
  698. ori_image_list[1] = draw_obb(ori_image_list[2] ,ori_image_list[1],par['drawPar'])
  699. imgName = os.path.basename(img_url)
  700. saveFile = os.path.join(par['outpth'], imgName)
  701. ret=cv2.imwrite(saveFile, ori_image_list[1])
  702. if not ret:
  703. print(saveFile, ' not created ')
  704. print( os.path.basename(img_url),':',infos,ori_image_list[2])
  705. ###处理视频
  706. for video_url in video_urls:
  707. process_video(video_url, parIn ,mode='obbTrack')
  708. if __name__=="__main__":
  709. #jkm_demo()
  710. #businessAll=['river', 'river2','highWay2','noParking','drowning','forest2','vehicle','pedestrian','smogfire' , 'AnglerSwimmer','channelEmergency', 'countryRoad','cityMangement','ship2']
  711. businessAll = ['river2']
  712. videopaths = ['/home/th/tuo_heng/dev/DJI_20211229100908_0002_S.mp4']
  713. for busi in businessAll:
  714. print('-'*40,'beg to test:',busi,'-'*40)
  715. opt={'gpu':'2080Ti','business':busi}
  716. if busi in ['ship2']:
  717. OBB_track_demo(opt)
  718. else:
  719. #if opt['business'] in ['river','highWay2','noParking','drowning','']:
  720. det_track_demo(opt, videopaths)