You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

518 lines
26KB

  1. import cv2,os,time,json
  2. from models.experimental import attempt_load
  3. from segutils.segmodel import SegModel,get_largest_contours
  4. from segutils.trtUtils import segtrtEval,yolov5Trtforward,OcrTrtForward
  5. from segutils.trafficUtils import trafficPostProcessing,colour_code_segmentation,get_label_info,trafficPostProcessingV2,tracfficAccidentMixFunction
  6. from utils.torch_utils import select_device
  7. from utilsK.queRiver import get_labelnames,get_label_arrays,post_process_,img_pad,draw_painting_joint,detectDraw,getDetections,getDetectionsFromPreds
  8. from trackUtils.sort import moving_average_wang
  9. from utils.datasets import letterbox
  10. import numpy as np
  11. import torch
  12. import math
  13. from PIL import Image
  14. import torch.nn.functional as F
  15. from copy import deepcopy
  16. from scipy import interpolate
  17. def xywh2xyxy(box,iW=None,iH=None):
  18. xc,yc,w,h = box[0:4]
  19. x0 =max(0, xc-w/2.0)
  20. x1 =min(1, xc+w/2.0)
  21. y0=max(0, yc-h/2.0)
  22. y1=min(1,yc+h/2.0)
  23. if iW: x0,x1 = x0*iW,x1*iW
  24. if iH: y0,y1 = y0*iH,y1*iH
  25. return [x0,y0,x1,y1]
  26. def get_ms(t2,t1):
  27. return (t2-t1)*1000.0
  28. def get_postProcess_para(parfile):
  29. with open(parfile) as fp:
  30. par = json.load(fp)
  31. assert 'post_process' in par.keys(), ' parfile has not key word:post_process'
  32. parPost=par['post_process']
  33. return parPost["conf_thres"],parPost["iou_thres"],parPost["classes"],parPost["rainbows"]
  34. def get_postProcess_para_dic(parfile):
  35. with open(parfile) as fp:
  36. par = json.load(fp)
  37. parPost=par['post_process']
  38. return parPost
  39. def AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,objectPar={ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False }, font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,segPar={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True},mode='others',postPar=None):
  40. #输入参数
  41. # im0s---原始图像列表
  42. # model---检测模型,segmodel---分割模型(如若没有用到,则为None)
  43. #
  44. #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout
  45. # [im0s[0],im0,det_xywh,iframe]中,
  46. # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。
  47. # det_xywh--检测结果,是一个列表。
  48. # 其中每一个元素表示一个目标构成如:[ xc,yc,w,h, float(conf_c),float(cls_c) ] ,2023.08.03修改输出格式
  49. # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间
  50. # #strout---统计AI处理个环节的时间
  51. # Letterbox
  52. half,device,conf_thres,iou_thres,allowedList = objectPar['half'],objectPar['device'],objectPar['conf_thres'],objectPar['iou_thres'],objectPar['allowedList']
  53. trtFlag_det,trtFlag_seg,segRegionCnt = objectPar['trtFlag_det'],objectPar['trtFlag_seg'],objectPar['segRegionCnt']
  54. if 'ovlap_thres_crossCategory' in objectPar.keys():
  55. ovlap_thres = objectPar['ovlap_thres_crossCategory']
  56. else:
  57. ovlap_thres = None
  58. time0=time.time()
  59. if trtFlag_det:
  60. img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
  61. else:
  62. img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
  63. # Stack
  64. img = np.stack(img, 0)
  65. # Convert
  66. img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
  67. img = np.ascontiguousarray(img)
  68. img = torch.from_numpy(img).to(device)
  69. img = img.half() if half else img.float() # uint8 to fp16/32
  70. img /= 255.0
  71. time01=time.time()
  72. if segmodel:
  73. if trtFlag_seg:
  74. seg_pred,segstr = segtrtEval(segmodel,im0s[0],par=segPar)
  75. else:
  76. seg_pred,segstr = segmodel.eval(im0s[0] )
  77. segFlag=True
  78. else:
  79. seg_pred = None;segFlag=False;segstr='Not implemented'
  80. #if mode=='highWay3.0':
  81. # seg_pred_mulcls = seg_pred.copy()
  82. # #seg_pred = (seg_pred==1).astype(np.uint8) ###把路提取出来,路的类别是1
  83. time1=time.time()
  84. if trtFlag_det:
  85. pred = yolov5Trtforward(model,img)
  86. else:
  87. pred = model(img,augment=False)[0]
  88. time2=time.time()
  89. #p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos,ovlap_thres=ovlap_thres)
  90. p_result, timeOut = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=ovlap_thres,padInfos=padInfos)
  91. #if mode=='highWay3.0':
  92. #if segmodel:
  93. if segPar['mixFunction']['function']:
  94. #assert postPar , ' postPar not implemented'
  95. #det_coords_original = tracfficAccidentMixFunction(p_result[2],seg_pred_mulcls,segPar['mixFunction']['pars'])
  96. #p_result[2] = det_coords_original
  97. mixFunction = segPar['mixFunction']['function'];H,W = im0s[0].shape[0:2]
  98. parMix = segPar['mixFunction']['pars'];#print('###line117:',parMix,p_result[2])
  99. parMix['imgSize'] = (W,H)
  100. #print('##before:',p_result[2])
  101. p_result[2] , timeMixPost= mixFunction(p_result[2], seg_pred, pars=parMix )
  102. #print('##after:',p_result[2])
  103. p_result.append(seg_pred)
  104. else:
  105. timeMixPost=':0 ms'
  106. #print('#### line121: segstr:%s timeMixPost:%s timeOut:%s'%( segstr.strip(), timeMixPost,timeOut ))
  107. time_info = 'letterbox:%.1f, seg:%.1f , infer:%.1f,%s, seginfo:%s ,timeMixPost:%s '%( (time01-time0)*1000, (time1-time01)*1000 ,(time2-time1)*1000,timeOut , segstr.strip(),timeMixPost )
  108. #if mode=='highWay3.0':
  109. return p_result,time_info
  110. def AI_Seg_process(im0s,segmodel,digitWordFont,trtFlag_seg=True,segPar={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True},postPar= {'label_csv': './AIlib2/weights/conf/trafficAccident/class_dict.csv', 'speedRoadArea': 5100, 'vehicleArea': 100, 'speedRoadVehicleAngleMin': 15, 'speedRoadVehicleAngleMax': 75, 'vehicleLengthWidthThreshold': 4, 'vehicleSafeDistance': 7}):
  111. '''
  112. 输入参数
  113. im0s---原始图像列表
  114. segmodel---分割模型,segmodel---分割模型(如若没有用到,则为None)
  115. digitWordFont--显示字体,数字等参数
  116. trtFlag_seg--模型是否是TRT格式
  117. segPar--分割模型的参数
  118. postPar--后处理参数
  119. 输出
  120. seg_pred--返回语义分割的结果图(0,1,2...表示)
  121. img_draw--原图上带有矩形框的图
  122. segstr-----文本数据包括时间信息
  123. list1-----返回目标的坐标结果,每一个目标用[ cls, x0,y0,x1,y1,conf ]
  124. '''
  125. time1=time.time()
  126. H,W=im0s[0].shape[0:2]
  127. img_draw=im0s[0].copy()
  128. if trtFlag_seg:
  129. seg_pred,segstr = segtrtEval(segmodel,im0s[0],par=segPar)
  130. else:
  131. seg_pred,segstr = segmodel.eval(im0s[0] )
  132. time2 = time.time()
  133. label_info = get_label_info(postPar['label_csv'])
  134. postPar['CCS']=colour_code_segmentation(seg_pred.copy(), label_info)
  135. postPar['sourceImageSize'] = im0s[0].shape[0:2]
  136. postPar['seg_pred_size'] = seg_pred.shape[0:2]
  137. list1,post_time_infos = trafficPostProcessing(postPar)
  138. list2=[]
  139. cls=0
  140. label_arraylist=digitWordFont['label_arraylist']
  141. rainbows=digitWordFont['rainbows']
  142. for bpoints in list1:
  143. #print('###line104:',bpoints)
  144. bpoints=np.array(bpoints)
  145. x0=np.min( bpoints[:,0] )
  146. y0=np.min( bpoints[:,1] )
  147. x1=np.max( bpoints[:,0] )
  148. y1=np.max( bpoints[:,1] )
  149. conf= ((x0+x1)/W + (y0+y1)/H)/4.0;
  150. conf=1.0 - math.fabs((conf-0.5)/0.5)
  151. xyxy=[x0,y0,x1,y1]
  152. xyxy=[int(x+0.5) for x in xyxy]
  153. #float(cls_c), *xywh, float(conf_c)]
  154. list2.append( [ cls, x0,y0,x1,y1,conf ] )
  155. img_draw = draw_painting_joint(xyxy,img_draw,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=digitWordFont)
  156. segstr = 'segInfer:%.2f %s '%( (time2-time1)*1000.0,post_time_infos )
  157. return seg_pred,img_draw,segstr,list2
  158. def AI_process_v2(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,allowedList=[0,1,2,3], font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ):
  159. #输入参数
  160. # im0s---原始图像列表
  161. # model---检测模型,segmodel---分割模型(如若没有用到,则为None)
  162. #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout
  163. # [im0s[0],im0,det_xywh,iframe]中,
  164. # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。
  165. # det_xywh--检测结果,是一个列表。
  166. # 其中每一个元素表示一个目标构成如:[float(cls_c), xc,yc,w,h, float(conf_c)]
  167. # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间
  168. # #strout---统计AI处理个环节的时间
  169. # Letterbox
  170. time0=time.time()
  171. #img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s]
  172. img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
  173. # Stack
  174. img = np.stack(img, 0)
  175. # Convert
  176. img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
  177. img = np.ascontiguousarray(img)
  178. img = torch.from_numpy(img).to(device)
  179. img = img.half() if half else img.float() # uint8 to fp16/32
  180. time01=time.time()
  181. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  182. if segmodel:
  183. seg_pred,segstr = segmodel.eval(im0s[0] )
  184. segFlag=True
  185. else:
  186. seg_pred = None;segFlag=False
  187. time1=time.time()
  188. pred = model(img,augment=False)
  189. time2=time.time()
  190. datas = [[''], img, im0s, None,pred,seg_pred,10]
  191. p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,object_config=allowedList,segmodel=segFlag,font=font,padInfos=padInfos)
  192. time_info = 'letterbox:%.1f, seg:%.1f , infer:%.1f,%s, seginfo:%s'%( (time01-time0)*1000, (time1-time01)*1000 ,(time2-time1)*1000,timeOut , segstr )
  193. return p_result,time_info
  194. def AI_process_forest(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,allowedList=[0,1,2,3], font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,trtFlag_det=False,SecNms=None):
  195. #输入参数
  196. # im0s---原始图像列表
  197. # model---检测模型,segmodel---分割模型(如若没有用到,则为None)
  198. #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout
  199. # [im0s[0],im0,det_xywh,iframe]中,
  200. # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。
  201. # det_xywh--检测结果,是一个列表。
  202. # 其中每一个元素表示一个目标构成如:[ xc,yc,w,h, float(conf_c),float(cls_c)],#2023.08.03,修改输出格式
  203. # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间
  204. # #strout---统计AI处理个环节的时间
  205. # Letterbox
  206. time0=time.time()
  207. if trtFlag_det:
  208. img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
  209. else:
  210. img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
  211. #img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s]
  212. # Stack
  213. img = np.stack(img, 0)
  214. # Convert
  215. img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
  216. img = np.ascontiguousarray(img)
  217. img = torch.from_numpy(img).to(device)
  218. img = img.half() if half else img.float() # uint8 to fp16/32
  219. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  220. if segmodel:
  221. seg_pred,segstr = segmodel.eval(im0s[0] )
  222. segFlag=True
  223. else:
  224. seg_pred = None;segFlag=False
  225. time1=time.time()
  226. pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0]
  227. time2=time.time()
  228. datas = [[''], img, im0s, None,pred,seg_pred,10]
  229. ObjectPar={ 'object_config':allowedList, 'slopeIndex':[] ,'segmodel':segFlag,'segRegionCnt':0 }
  230. p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos,ovlap_thres=SecNms)
  231. #print('###line274:',p_result[2])
  232. #p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,object_config=allowedList,segmodel=segFlag,font=font,padInfos=padInfos)
  233. time_info = 'letterbox:%.1f, infer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 )
  234. return p_result,time_info+timeOut
  235. def AI_det_track( im0s_in,modelPar,processPar,sort_tracker,segPar=None):
  236. im0s,iframe=im0s_in[0],im0s_in[1]
  237. model = modelPar['det_Model']
  238. segmodel = modelPar['seg_Model']
  239. half,device,conf_thres, iou_thres,trtFlag_det = processPar['half'], processPar['device'], processPar['conf_thres'], processPar['iou_thres'],processPar['trtFlag_det']
  240. iou2nd = processPar['iou2nd']
  241. time0=time.time()
  242. if trtFlag_det:
  243. img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
  244. else:
  245. img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
  246. img = np.stack(img, 0)
  247. # Convert
  248. img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
  249. img = np.ascontiguousarray(img)
  250. img = torch.from_numpy(img).to(device)
  251. img = img.half() if half else img.float() # uint8 to fp16/32
  252. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  253. seg_pred = None;segFlag=False
  254. time1=time.time()
  255. pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0]
  256. time2=time.time()
  257. #p_result,timeOut = getDetections(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos)
  258. p_result, timeOut = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=iou2nd,padInfos=padInfos)
  259. if segmodel:
  260. timeS1=time.time()
  261. seg_pred,segstr = segtrtEval(segmodel,im0s[0],par=segPar) if segPar['trtFlag_seg'] else segmodel.eval(im0s[0] )
  262. timeS2=time.time()
  263. mixFunction = segPar['mixFunction']['function']
  264. #print('##line316: before ', p_result[2])
  265. p_result[2],timeInfos_post = mixFunction(p_result[2], seg_pred, pars=segPar['mixFunction']['pars'] )
  266. #print('##line318: after ', p_result[2])
  267. timeInfos_seg_post = 'segInfer:%.1f ,postProcess:%s'%( (timeS2-timeS1)*1000, timeInfos_post )
  268. else:
  269. timeInfos_seg_post = ' '
  270. #print('######line341:',seg_pred.shape,np.max(seg_pred),np.min(seg_pred) , len(p_result[2]) )
  271. time_info = 'letterbox:%.1f, detinfer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 )
  272. if sort_tracker:
  273. #在这里增加设置调用追踪器的频率
  274. #..................USE TRACK FUNCTION....................
  275. #pass an empty array to sort
  276. dets_to_sort = np.empty((0,7), dtype=np.float32)
  277. # NOTE: We send in detected object class too
  278. #for detclass,x1,y1,x2,y2,conf in p_result[2]:
  279. for x1,y1,x2,y2,conf, detclass in p_result[2]:
  280. #print('#######line342:',x1,y1,x2,y2,img.shape,[x1, y1, x2, y2, conf, detclass,iframe])
  281. dets_to_sort = np.vstack((dets_to_sort,
  282. np.array([x1, y1, x2, y2, conf, detclass,iframe],dtype=np.float32) ))
  283. # Run SORT
  284. tracked_dets = deepcopy(sort_tracker.update(dets_to_sort) )
  285. tracks =sort_tracker.getTrackers()
  286. p_result.append(tracked_dets) ###index=4
  287. p_result.append(tracks) ###index=5
  288. return p_result,time_info+timeOut+timeInfos_seg_post
  289. def AI_det_track_batch(imgarray_list, iframe_list ,modelPar,processPar,sort_tracker,trackPar,segPar=None):
  290. '''
  291. 输入:
  292. imgarray_list--图像列表
  293. iframe_list -- 帧号列表
  294. modelPar--模型参数,字典,modelPar={'det_Model':,'seg_Model':}
  295. processPar--字典,存放检测相关参数,'half', 'device', 'conf_thres', 'iou_thres','trtFlag_det'
  296. sort_tracker--对象,初始化的跟踪对象。为了保持一致,即使是单帧也要有。
  297. trackPar--跟踪参数,关键字包括:det_cnt,windowsize
  298. segPar--None,分割模型相关参数。如果用不到,则为None
  299. 输入:retResults,timeInfos
  300. retResults:list
  301. retResults[0]--imgarray_list
  302. retResults[1]--所有结果用numpy格式,所有的检测结果,包括8类,每列分别是x1, y1, x2, y2, conf, detclass,iframe,trackId
  303. retResults[2]--所有结果用list表示,其中每一个元素为一个list,表示每一帧的检测结果,每一个结果是由多个list构成,每个list表示一个框,格式为[ x0 ,y0 ,x1 ,y1 ,conf, cls ,ifrmae,trackId ],如 retResults[2][j][k]表示第j帧的第k个框。2023.08.03,修改输出格式
  304. '''
  305. det_cnt,windowsize = trackPar['det_cnt'] ,trackPar['windowsize']
  306. trackers_dic={}
  307. index_list = list(range( 0, len(iframe_list) ,det_cnt ));
  308. if len(index_list)>1 and index_list[-1]!= iframe_list[-1]:
  309. index_list.append( len(iframe_list) - 1 )
  310. if len(imgarray_list)==1: #如果是单帧图片,则不用跟踪
  311. retResults = []
  312. p_result,timeOut = AI_det_track( [ [imgarray_list[0]] ,iframe_list[0] ],modelPar,processPar,None,segPar )
  313. ##下面4行内容只是为了保持格式一致
  314. detArray = np.array(p_result[2])
  315. #print('##line371:',detArray)
  316. if len(p_result[2])==0:res=[]
  317. else:
  318. cnt = detArray.shape[0];trackIds=np.zeros((cnt,1));iframes = np.zeros((cnt,1)) + iframe_list[0]
  319. #detArray = np.hstack( (detArray[:,1:5], detArray[:,5:6] ,detArray[:,0:1],iframes, trackIds ) )
  320. detArray = np.hstack( (detArray[:,0:4], detArray[:,4:6] ,iframes, trackIds ) ) ##2023.08.03 修改输入格式
  321. res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in detArray ]
  322. retResults=[imgarray_list,detArray,res ]
  323. #print('##line380:',retResults[2])
  324. return retResults,timeOut
  325. else:
  326. t0 = time.time()
  327. timeInfos_track=''
  328. for iframe_index, index_frame in enumerate(index_list):
  329. p_result,timeOut = AI_det_track( [ [imgarray_list[index_frame]] ,iframe_list[index_frame] ],modelPar,processPar,sort_tracker,segPar )
  330. timeInfos_track='%s:%s'%(timeInfos_track,timeOut)
  331. for tracker in p_result[5]:
  332. trackers_dic[tracker.id]=deepcopy(tracker)
  333. t1 = time.time()
  334. track_det_result = np.empty((0,8))
  335. for trackId in trackers_dic.keys():
  336. tracker = trackers_dic[trackId]
  337. bbox_history = np.array(tracker.bbox_history)
  338. if len(bbox_history)<2: continue
  339. ###把(x0,y0,x1,y1)转换成(xc,yc,w,h)
  340. xcs_ycs = (bbox_history[:,0:2] + bbox_history[:,2:4] )/2
  341. whs = bbox_history[:,2:4] - bbox_history[:,0:2]
  342. bbox_history[:,0:2] = xcs_ycs;bbox_history[:,2:4] = whs;
  343. arrays_box = bbox_history[:,0:7].transpose();frames=bbox_history[:,6]
  344. #frame_min--表示该批次图片的起始帧,如该批次是[1,100],则frame_min=1,[101,200]--frame_min=101
  345. #frames[0]--表示该目标出现的起始帧,如[1,11,21,31,41],则frames[0]=1,frames[0]可能会在frame_min之前出现,即一个横跨了多个批次。
  346. ##如果要最好化插值范围,则取内区间[frame_min,则frame_max ]和[frames[0],frames[-1] ]的交集
  347. #inter_frame_min = int(max(frame_min, frames[0])); inter_frame_max = int(min( frame_max, frames[-1] )) ##
  348. ##如果要求得到完整的目标轨迹,则插值区间要以目标出现的起始点为准
  349. inter_frame_min=int(frames[0]);inter_frame_max=int(frames[-1])
  350. new_frames= np.linspace(inter_frame_min,inter_frame_max,inter_frame_max-inter_frame_min+1 )
  351. f_linear = interpolate.interp1d(frames,arrays_box); interpolation_x0s = (f_linear(new_frames)).transpose()
  352. move_cnt_use =(len(interpolation_x0s)+1)//2*2-1 if len(interpolation_x0s)<windowsize else windowsize
  353. for im in range(4):
  354. interpolation_x0s[:,im] = moving_average_wang(interpolation_x0s[:,im],move_cnt_use )
  355. cnt = inter_frame_max-inter_frame_min+1; trackIds = np.zeros((cnt,1)) + trackId
  356. interpolation_x0s = np.hstack( (interpolation_x0s, trackIds ) )
  357. track_det_result = np.vstack(( track_det_result, interpolation_x0s) )
  358. #print('#####line116:',trackId,frame_min,frame_max,'----------',interpolation_x0s.shape,track_det_result.shape ,'-----')
  359. ##将[xc,yc,w,h]转为[x0,y0,x1,y1]
  360. x0s = track_det_result[:,0] - track_det_result[:,2]/2 ; x1s = track_det_result[:,0] + track_det_result[:,2]/2
  361. y0s = track_det_result[:,1] - track_det_result[:,3]/2 ; y1s = track_det_result[:,1] + track_det_result[:,3]/2
  362. track_det_result[:,0] = x0s; track_det_result[:,1] = y0s;
  363. track_det_result[:,2] = x1s; track_det_result[:,3] = y1s;
  364. detResults=[]
  365. for iiframe in iframe_list:
  366. boxes_oneFrame = track_det_result[ track_det_result[:,6]==iiframe ]
  367. res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in boxes_oneFrame ]
  368. #[ x0 ,y0 ,x1 ,y1 ,conf,cls,ifrmae,trackId ]
  369. #[ifrmae, x0 ,y0 ,x1 ,y1 ,conf,cls,trackId ]
  370. detResults.append( res )
  371. retResults=[imgarray_list,track_det_result,detResults ]
  372. t2 = time.time()
  373. timeInfos = 'detTrack:%.1f TrackPost:%.1f, %s'%(get_ms(t1,t0),get_ms(t2,t1), timeInfos_track )
  374. return retResults,timeInfos
  375. def ocr_process(pars):
  376. img_patch,engine,context,converter,AlignCollate_normal,device=pars[0:6]
  377. time1 = time.time()
  378. img_tensor = AlignCollate_normal([ Image.fromarray(img_patch,'L') ])
  379. img_input = img_tensor.to('cuda:0')
  380. time2 = time.time()
  381. preds,trtstr=OcrTrtForward(engine,[img_input],context)
  382. time3 = time.time()
  383. batch_size = preds.size(0)
  384. preds_size = torch.IntTensor([preds.size(1)] * batch_size)
  385. ######## filter ignore_char, rebalance
  386. preds_prob = F.softmax(preds, dim=2)
  387. preds_prob = preds_prob.cpu().detach().numpy()
  388. pred_norm = preds_prob.sum(axis=2)
  389. preds_prob = preds_prob/np.expand_dims(pred_norm, axis=-1)
  390. preds_prob = torch.from_numpy(preds_prob).float().to(device)
  391. _, preds_index = preds_prob.max(2)
  392. preds_index = preds_index.view(-1)
  393. time4 = time.time()
  394. preds_str = converter.decode_greedy(preds_index.data.cpu().detach().numpy(), preds_size.data)
  395. time5 = time.time()
  396. info_str= ('pre-process:%.2f TRTforward:%.2f (%s) postProcess:%2.f decoder:%.2f, Total:%.2f , pred:%s'%(get_ms(time2,time1 ),get_ms(time3,time2 ),trtstr, get_ms(time4,time3 ), get_ms(time5,time4 ), get_ms(time5,time1 ), preds_str ) )
  397. return preds_str,info_str
  398. def main():
  399. ##预先设置的参数
  400. device_='1' ##选定模型,可选 cpu,'0','1'
  401. ##以下参数目前不可改
  402. Detweights = "weights/yolov5/class5/best_5classes.pt"
  403. seg_nclass = 2
  404. Segweights = "weights/BiSeNet/checkpoint.pth"
  405. conf_thres,iou_thres,classes= 0.25,0.45,5
  406. labelnames = "weights/yolov5/class5/labelnames.json"
  407. rainbows = [ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
  408. allowedList=[0,1,2,3]
  409. ##加载模型,准备好显示字符
  410. device = select_device(device_)
  411. names=get_labelnames(labelnames)
  412. label_arraylist = get_label_arrays(names,rainbows,outfontsize=40,fontpath="conf/platech.ttf")
  413. half = device.type != 'cpu' # half precision only supported on CUDA
  414. model = attempt_load(Detweights, map_location=device) # load FP32 model
  415. if half: model.half()
  416. segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device)
  417. ##图像测试
  418. #url='images/examples/20220624_响水河_12300_1621.jpg'
  419. impth = 'images/examples/'
  420. outpth = 'images/results/'
  421. folders = os.listdir(impth)
  422. for i in range(len(folders)):
  423. imgpath = os.path.join(impth, folders[i])
  424. im0s=[cv2.imread(imgpath)]
  425. time00 = time.time()
  426. p_result,timeOut = AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,half,device,conf_thres, iou_thres,allowedList,fontSize=1.0)
  427. time11 = time.time()
  428. image_array = p_result[1]
  429. cv2.imwrite( os.path.join( outpth,folders[i] ) ,image_array )
  430. #print('----process:%s'%(folders[i]), (time.time() - time11) * 1000)
  431. if __name__=="__main__":
  432. main()