Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

680 rindas
33KB

  1. import cv2,os,time,json
  2. from models.experimental import attempt_load
  3. from segutils.segmodel import SegModel,get_largest_contours
  4. from segutils.trtUtils import segtrtEval,yolov5Trtforward,OcrTrtForward
  5. from segutils.trafficUtils import tracfficAccidentMixFunction
  6. from utils.torch_utils import select_device
  7. from utilsK.queRiver import get_labelnames,get_label_arrays,post_process_,img_pad,draw_painting_joint,detectDraw,getDetections,getDetectionsFromPreds
  8. from trackUtils.sort import moving_average_wang
  9. from utils.datasets import letterbox
  10. import numpy as np
  11. import torch
  12. import math
  13. from PIL import Image
  14. import torch.nn.functional as F
  15. from copy import deepcopy
  16. from scipy import interpolate
  17. import glob
  18. def get_images_videos(impth, imageFixs=['.jpg','.JPG','.PNG','.png'],videoFixs=['.MP4','.mp4','.avi']):
  19. imgpaths=[];###获取文件里所有的图像
  20. videopaths=[]###获取文件里所有的视频
  21. if os.path.isdir(impth):
  22. for postfix in imageFixs:
  23. imgpaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  24. for postfix in videoFixs:
  25. videopaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
  26. else:
  27. postfix = os.path.splitext(impth)[-1]
  28. if postfix in imageFixs: imgpaths=[ impth ]
  29. if postfix in videoFixs: videopaths = [impth ]
  30. print('%s: test Images:%d , test videos:%d '%(impth, len(imgpaths), len(videopaths)))
  31. return imgpaths,videopaths
  32. def xywh2xyxy(box,iW=None,iH=None):
  33. xc,yc,w,h = box[0:4]
  34. x0 =max(0, xc-w/2.0)
  35. x1 =min(1, xc+w/2.0)
  36. y0=max(0, yc-h/2.0)
  37. y1=min(1,yc+h/2.0)
  38. if iW: x0,x1 = x0*iW,x1*iW
  39. if iH: y0,y1 = y0*iH,y1*iH
  40. return [x0,y0,x1,y1]
  41. def get_ms(t2,t1):
  42. return (t2-t1)*1000.0
  43. def get_postProcess_para(parfile):
  44. with open(parfile) as fp:
  45. par = json.load(fp)
  46. assert 'post_process' in par.keys(), ' parfile has not key word:post_process'
  47. parPost=par['post_process']
  48. return parPost["conf_thres"],parPost["iou_thres"],parPost["classes"],parPost["rainbows"]
  49. def get_postProcess_para_dic(parfile):
  50. with open(parfile) as fp:
  51. par = json.load(fp)
  52. parPost=par['post_process']
  53. return parPost
  54. def score_filter_byClass(pdetections,score_para_2nd):
  55. ret=[]
  56. for det in pdetections:
  57. score,cls = det[4],det[5]
  58. if int(cls) in score_para_2nd.keys():
  59. score_th = score_para_2nd[int(cls)]
  60. elif str(int(cls)) in score_para_2nd.keys():
  61. score_th = score_para_2nd[str(int(cls))]
  62. else:
  63. score_th = 0.7
  64. if score > score_th:
  65. ret.append(det)
  66. return ret
  67. def AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,objectPar={ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False,'score_byClass':{x:0.1 for x in range(30)} }, font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,segPar={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True},mode='others',postPar=None):
  68. #输入参数
  69. # im0s---原始图像列表
  70. # model---检测模型,segmodel---分割模型(如若没有用到,则为None)
  71. #
  72. #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout
  73. # [im0s[0],im0,det_xywh,iframe]中,
  74. # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。
  75. # det_xywh--检测结果,是一个列表。
  76. # 其中每一个元素表示一个目标构成如:[ xc,yc,w,h, float(conf_c),float(cls_c) ] ,2023.08.03修改输出格式
  77. # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间
  78. # #strout---统计AI处理个环节的时间
  79. # Letterbox
  80. half,device,conf_thres,iou_thres,allowedList = objectPar['half'],objectPar['device'],objectPar['conf_thres'],objectPar['iou_thres'],objectPar['allowedList']
  81. trtFlag_det,trtFlag_seg,segRegionCnt = objectPar['trtFlag_det'],objectPar['trtFlag_seg'],objectPar['segRegionCnt']
  82. if 'ovlap_thres_crossCategory' in objectPar.keys(): ovlap_thres = objectPar['ovlap_thres_crossCategory']
  83. else: ovlap_thres = None
  84. if 'score_byClass' in objectPar.keys(): score_byClass = objectPar['score_byClass']
  85. else: score_byClass = None
  86. time0=time.time()
  87. if trtFlag_det:
  88. img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
  89. else:
  90. #print('####line72:',im0s[0][10:12,10:12,2])
  91. img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
  92. #print('####line74:',img[0][10:12,10:12,2])
  93. # Stack
  94. img = np.stack(img, 0)
  95. # Convert
  96. img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
  97. img = np.ascontiguousarray(img)
  98. img = torch.from_numpy(img).to(device)
  99. img = img.half() if half else img.float() # uint8 to fp16/32
  100. img /= 255.0
  101. time01=time.time()
  102. if segmodel:
  103. seg_pred,segstr = segmodel.eval(im0s[0] )
  104. segFlag=True
  105. else:
  106. seg_pred = None;segFlag=False;segstr='Not implemented'
  107. time1=time.time()
  108. if trtFlag_det:
  109. pred = yolov5Trtforward(model,img)
  110. else:
  111. #print('####line96:',img[0,0,10:12,10:12])
  112. pred = model(img,augment=False)[0]
  113. time2=time.time()
  114. p_result, timeOut = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=ovlap_thres,padInfos=padInfos)
  115. if score_byClass:
  116. p_result[2] = score_filter_byClass(p_result[2],score_byClass)
  117. print('-'*10,p_result[2])
  118. #if mode=='highWay3.0':
  119. #if segmodel:
  120. if segPar and segPar['mixFunction']['function']:
  121. mixFunction = segPar['mixFunction']['function'];H,W = im0s[0].shape[0:2]
  122. parMix = segPar['mixFunction']['pars'];#print('###line117:',parMix,p_result[2])
  123. parMix['imgSize'] = (W,H)
  124. #print(' -----------line110: ',p_result[2] ,'\n', seg_pred)
  125. p_result[2] , timeMixPost= mixFunction(p_result[2], seg_pred, pars=parMix )
  126. #print(' -----------line112: ',p_result[2] )
  127. p_result.append(seg_pred)
  128. else:
  129. timeMixPost=':0 ms'
  130. #print('#### line121: segstr:%s timeMixPost:%s timeOut:%s'%( segstr.strip(), timeMixPost,timeOut ))
  131. time_info = 'letterbox:%.1f, seg:%.1f , infer:%.1f,%s, seginfo:%s ,timeMixPost:%s '%( (time01-time0)*1000, (time1-time01)*1000 ,(time2-time1)*1000,timeOut , segstr.strip(),timeMixPost )
  132. #if mode=='highWay3.0':
  133. return p_result,time_info
  134. def default_mix(predlist,par):
  135. return predlist[0],''
  136. def AI_process_N(im0s,modelList,postProcess):
  137. #输入参数
  138. ## im0s---原始图像列表
  139. ## modelList--所有的模型
  140. # postProcess--字典{},包括后处理函数,及其参数
  141. #输出参数
  142. ##ret[0]--检测结果;
  143. ##ret[1]--时间信息
  144. #modelList包括模型,每个模型是一个类,里面的eval函数可以输出该模型的推理结果
  145. modelRets=[ model.eval(im0s[0]) for model in modelList]
  146. timeInfos = [ x[1] for x in modelRets]
  147. timeInfos=''.join(timeInfos)
  148. timeInfos=timeInfos
  149. #postProcess['function']--后处理函数,输入的就是所有模型输出结果
  150. mixFunction =postProcess['function']
  151. predsList = [ modelRet[0] for modelRet in modelRets ]
  152. H,W = im0s[0].shape[0:2]
  153. postProcess['pars']['imgSize'] = (W,H)
  154. #ret就是混合处理后的结果
  155. ret = mixFunction( predsList, postProcess['pars'])
  156. return ret[0],timeInfos+ret[1]
  157. def AI_process_C(im0s,modelList,postProcess):
  158. #输入参数
  159. ## im0s---原始图像列表
  160. ## modelList--所有的模型
  161. # postProcess--字典{},包括后处理函数,及其参数
  162. #输出参数
  163. ##ret[0]--检测结果;
  164. ##ret[1]--时间信息
  165. #modelList包括模型,每个模型是一个类,里面的eval函数可以输出该模型的推理结果
  166. t0=time.time()
  167. detRets0 = modelList[0].eval(im0s[0])
  168. detRets0 = detRets0[0]
  169. #detRets0=[[12, 46, 1127, 1544, 0.2340087890625, 2.0], [1884, 1248, 2992, 1485, 0.64208984375, 1.0]]
  170. detRets0 = list(filter(lambda x: x[5]<=2, detRets0 ))
  171. t1=time.time()
  172. imagePatches = [ im0s[0][int(x[1]):int(x[3]) ,int(x[0]):int(x[2])] for x in detRets0 ]
  173. detRets1 = [modelList[1].eval(patch) for patch in imagePatches]
  174. detRets1 = [x[0]*255 for x in detRets1]
  175. t2=time.time()
  176. mixFunction =postProcess['function']
  177. crackInfos = [mixFunction(patchMask) for patchMask in detRets1]
  178. rets = [ detRets0[i]+ crackInfos[i] for i in range(len(imagePatches)) ]
  179. t3=time.time()
  180. outInfos='total:%.1f (det:%.1f %d次segs:%.1f mixProcess:%.1f) '%( (t3-t0)*1000, (t1-t0)*1000, len(detRets1),(t2-t1)*1000, (t3-t2)*1000 )
  181. return rets,outInfos
  182. def AI_process_forest(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,allowedList=[0,1,2,3], font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,trtFlag_det=False,SecNms=None):
  183. #输入参数
  184. # im0s---原始图像列表
  185. # model---检测模型,segmodel---分割模型(如若没有用到,则为None)
  186. #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout
  187. # [im0s[0],im0,det_xywh,iframe]中,
  188. # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。
  189. # det_xywh--检测结果,是一个列表。
  190. # 其中每一个元素表示一个目标构成如:[ xc,yc,w,h, float(conf_c),float(cls_c)],#2023.08.03,修改输出格式
  191. # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间
  192. # #strout---统计AI处理个环节的时间
  193. # Letterbox
  194. time0=time.time()
  195. if trtFlag_det:
  196. img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
  197. else:
  198. img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
  199. #img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s]
  200. # Stack
  201. img = np.stack(img, 0)
  202. # Convert
  203. img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
  204. img = np.ascontiguousarray(img)
  205. img = torch.from_numpy(img).to(device)
  206. img = img.half() if half else img.float() # uint8 to fp16/32
  207. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  208. if segmodel:
  209. seg_pred,segstr = segmodel.eval(im0s[0] )
  210. segFlag=True
  211. else:
  212. seg_pred = None;segFlag=False
  213. time1=time.time()
  214. pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0]
  215. time2=time.time()
  216. datas = [[''], img, im0s, None,pred,seg_pred,10]
  217. ObjectPar={ 'object_config':allowedList, 'slopeIndex':[] ,'segmodel':segFlag,'segRegionCnt':0 }
  218. p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos,ovlap_thres=SecNms)
  219. #print('###line274:',p_result[2])
  220. #p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,object_config=allowedList,segmodel=segFlag,font=font,padInfos=padInfos)
  221. time_info = 'letterbox:%.1f, infer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 )
  222. return p_result,time_info+timeOut
  223. def AI_det_track( im0s_in,modelPar,processPar,sort_tracker,segPar=None):
  224. im0s,iframe=im0s_in[0],im0s_in[1]
  225. model = modelPar['det_Model']
  226. segmodel = modelPar['seg_Model']
  227. half,device,conf_thres, iou_thres,trtFlag_det = processPar['half'], processPar['device'], processPar['conf_thres'], processPar['iou_thres'],processPar['trtFlag_det']
  228. if 'score_byClass' in processPar.keys(): score_byClass = processPar['score_byClass']
  229. else: score_byClass = None
  230. iou2nd = processPar['iou2nd']
  231. time0=time.time()
  232. if trtFlag_det:
  233. img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
  234. else:
  235. img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
  236. img = np.stack(img, 0)
  237. # Convert
  238. img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
  239. img = np.ascontiguousarray(img)
  240. img = torch.from_numpy(img).to(device)
  241. img = img.half() if half else img.float() # uint8 to fp16/32
  242. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  243. seg_pred = None;segFlag=False
  244. time1=time.time()
  245. pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0]
  246. time2=time.time()
  247. #p_result,timeOut = getDetections(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos)
  248. p_result, timeOut = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=iou2nd,padInfos=padInfos)
  249. if score_byClass:
  250. p_result[2] = score_filter_byClass(p_result[2],score_byClass)
  251. if segmodel:
  252. seg_pred,segstr = segmodel.eval(im0s[0] )
  253. segFlag=True
  254. else:
  255. seg_pred = None;segFlag=False;segstr='No segmodel'
  256. if segPar and segPar['mixFunction']['function']:
  257. mixFunction = segPar['mixFunction']['function']
  258. H,W = im0s[0].shape[0:2]
  259. parMix = segPar['mixFunction']['pars'];#print('###line117:',parMix,p_result[2])
  260. parMix['imgSize'] = (W,H)
  261. p_result[2],timeInfos_post = mixFunction(p_result[2], seg_pred, pars=parMix )
  262. timeInfos_seg_post = 'segInfer:%s ,postMixProcess:%s'%( segstr, timeInfos_post )
  263. else:
  264. timeInfos_seg_post = ' '
  265. '''
  266. if segmodel:
  267. timeS1=time.time()
  268. #seg_pred,segstr = segtrtEval(segmodel,im0s[0],par=segPar) if segPar['trtFlag_seg'] else segmodel.eval(im0s[0] )
  269. seg_pred,segstr = segmodel.eval(im0s[0] )
  270. timeS2=time.time()
  271. mixFunction = segPar['mixFunction']['function']
  272. p_result[2],timeInfos_post = mixFunction(p_result[2], seg_pred, pars=segPar['mixFunction']['pars'] )
  273. timeInfos_seg_post = 'segInfer:%.1f ,postProcess:%s'%( (timeS2-timeS1)*1000, timeInfos_post )
  274. else:
  275. timeInfos_seg_post = ' '
  276. #print('######line341:',seg_pred.shape,np.max(seg_pred),np.min(seg_pred) , len(p_result[2]) )
  277. '''
  278. time_info = 'letterbox:%.1f, detinfer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 )
  279. if sort_tracker:
  280. #在这里增加设置调用追踪器的频率
  281. #..................USE TRACK FUNCTION....................
  282. #pass an empty array to sort
  283. dets_to_sort = np.empty((0,7), dtype=np.float32)
  284. # NOTE: We send in detected object class too
  285. #for detclass,x1,y1,x2,y2,conf in p_result[2]:
  286. for x1,y1,x2,y2,conf, detclass in p_result[2]:
  287. #print('#######line342:',x1,y1,x2,y2,img.shape,[x1, y1, x2, y2, conf, detclass,iframe])
  288. dets_to_sort = np.vstack((dets_to_sort,
  289. np.array([x1, y1, x2, y2, conf, detclass,iframe],dtype=np.float32) ))
  290. # Run SORT
  291. tracked_dets = deepcopy(sort_tracker.update(dets_to_sort) )
  292. tracks =sort_tracker.getTrackers()
  293. p_result.append(tracked_dets) ###index=4
  294. p_result.append(tracks) ###index=5
  295. return p_result,time_info+timeOut+timeInfos_seg_post
  296. def AI_det_track_batch(imgarray_list, iframe_list ,modelPar,processPar,sort_tracker,trackPar,segPar=None):
  297. '''
  298. 输入:
  299. imgarray_list--图像列表
  300. iframe_list -- 帧号列表
  301. modelPar--模型参数,字典,modelPar={'det_Model':,'seg_Model':}
  302. processPar--字典,存放检测相关参数,'half', 'device', 'conf_thres', 'iou_thres','trtFlag_det'
  303. sort_tracker--对象,初始化的跟踪对象。为了保持一致,即使是单帧也要有。
  304. trackPar--跟踪参数,关键字包括:det_cnt,windowsize
  305. segPar--None,分割模型相关参数。如果用不到,则为None
  306. 输入:retResults,timeInfos
  307. retResults:list
  308. retResults[0]--imgarray_list
  309. retResults[1]--所有结果用numpy格式,所有的检测结果,包括8类,每列分别是x1, y1, x2, y2, conf, detclass,iframe,trackId
  310. retResults[2]--所有结果用list表示,其中每一个元素为一个list,表示每一帧的检测结果,每一个结果是由多个list构成,每个list表示一个框,格式为[ x0 ,y0 ,x1 ,y1 ,conf, cls ,ifrmae,trackId ],如 retResults[2][j][k]表示第j帧的第k个框。2023.08.03,修改输出格式
  311. '''
  312. det_cnt,windowsize = trackPar['det_cnt'] ,trackPar['windowsize']
  313. trackers_dic={}
  314. index_list = list(range( 0, len(iframe_list) ,det_cnt ));
  315. if len(index_list)>1 and index_list[-1]!= iframe_list[-1]:
  316. index_list.append( len(iframe_list) - 1 )
  317. if len(imgarray_list)==1: #如果是单帧图片,则不用跟踪
  318. retResults = []
  319. p_result,timeOut = AI_det_track( [ [imgarray_list[0]] ,iframe_list[0] ],modelPar,processPar,None,segPar )
  320. ##下面4行内容只是为了保持格式一致
  321. detArray = np.array(p_result[2])
  322. #print('##line371:',detArray)
  323. if len(p_result[2])==0:res=[]
  324. else:
  325. cnt = detArray.shape[0];trackIds=np.zeros((cnt,1));iframes = np.zeros((cnt,1)) + iframe_list[0]
  326. #detArray = np.hstack( (detArray[:,1:5], detArray[:,5:6] ,detArray[:,0:1],iframes, trackIds ) )
  327. detArray = np.hstack( (detArray[:,0:4], detArray[:,4:6] ,iframes, trackIds ) ) ##2023.08.03 修改输入格式
  328. res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in detArray ]
  329. retResults=[imgarray_list,detArray,res ]
  330. #print('##line380:',retResults[2])
  331. return retResults,timeOut
  332. else:
  333. t0 = time.time()
  334. timeInfos_track=''
  335. for iframe_index, index_frame in enumerate(index_list):
  336. p_result,timeOut = AI_det_track( [ [imgarray_list[index_frame]] ,iframe_list[index_frame] ],modelPar,processPar,sort_tracker,segPar )
  337. timeInfos_track='%s:%s'%(timeInfos_track,timeOut)
  338. for tracker in p_result[5]:
  339. trackers_dic[tracker.id]=deepcopy(tracker)
  340. t1 = time.time()
  341. track_det_result = np.empty((0,8))
  342. for trackId in trackers_dic.keys():
  343. tracker = trackers_dic[trackId]
  344. bbox_history = np.array(tracker.bbox_history)
  345. if len(bbox_history)<2: continue
  346. ###把(x0,y0,x1,y1)转换成(xc,yc,w,h)
  347. xcs_ycs = (bbox_history[:,0:2] + bbox_history[:,2:4] )/2
  348. whs = bbox_history[:,2:4] - bbox_history[:,0:2]
  349. bbox_history[:,0:2] = xcs_ycs;bbox_history[:,2:4] = whs;
  350. arrays_box = bbox_history[:,0:7].transpose();frames=bbox_history[:,6]
  351. #frame_min--表示该批次图片的起始帧,如该批次是[1,100],则frame_min=1,[101,200]--frame_min=101
  352. #frames[0]--表示该目标出现的起始帧,如[1,11,21,31,41],则frames[0]=1,frames[0]可能会在frame_min之前出现,即一个横跨了多个批次。
  353. ##如果要最好化插值范围,则取内区间[frame_min,则frame_max ]和[frames[0],frames[-1] ]的交集
  354. #inter_frame_min = int(max(frame_min, frames[0])); inter_frame_max = int(min( frame_max, frames[-1] )) ##
  355. ##如果要求得到完整的目标轨迹,则插值区间要以目标出现的起始点为准
  356. inter_frame_min=int(frames[0]);inter_frame_max=int(frames[-1])
  357. new_frames= np.linspace(inter_frame_min,inter_frame_max,inter_frame_max-inter_frame_min+1 )
  358. f_linear = interpolate.interp1d(frames,arrays_box); interpolation_x0s = (f_linear(new_frames)).transpose()
  359. move_cnt_use =(len(interpolation_x0s)+1)//2*2-1 if len(interpolation_x0s)<windowsize else windowsize
  360. for im in range(4):
  361. interpolation_x0s[:,im] = moving_average_wang(interpolation_x0s[:,im],move_cnt_use )
  362. cnt = inter_frame_max-inter_frame_min+1; trackIds = np.zeros((cnt,1)) + trackId
  363. interpolation_x0s = np.hstack( (interpolation_x0s, trackIds ) )
  364. track_det_result = np.vstack(( track_det_result, interpolation_x0s) )
  365. #print('#####line116:',trackId,frame_min,frame_max,'----------',interpolation_x0s.shape,track_det_result.shape ,'-----')
  366. ##将[xc,yc,w,h]转为[x0,y0,x1,y1]
  367. x0s = track_det_result[:,0] - track_det_result[:,2]/2 ; x1s = track_det_result[:,0] + track_det_result[:,2]/2
  368. y0s = track_det_result[:,1] - track_det_result[:,3]/2 ; y1s = track_det_result[:,1] + track_det_result[:,3]/2
  369. track_det_result[:,0] = x0s; track_det_result[:,1] = y0s;
  370. track_det_result[:,2] = x1s; track_det_result[:,3] = y1s;
  371. detResults=[]
  372. for iiframe in iframe_list:
  373. boxes_oneFrame = track_det_result[ track_det_result[:,6]==iiframe ]
  374. res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in boxes_oneFrame ]
  375. #[ x0 ,y0 ,x1 ,y1 ,conf,cls,ifrmae,trackId ]
  376. #[ifrmae, x0 ,y0 ,x1 ,y1 ,conf,cls,trackId ]
  377. detResults.append( res )
  378. retResults=[imgarray_list,track_det_result,detResults ]
  379. t2 = time.time()
  380. timeInfos = 'detTrack:%.1f TrackPost:%.1f, %s'%(get_ms(t1,t0),get_ms(t2,t1), timeInfos_track )
  381. return retResults,timeInfos
  382. def AI_det_track_N( im0s_in,modelList,postProcess,sort_tracker):
  383. im0s,iframe=im0s_in[0],im0s_in[1]
  384. dets = AI_process_N(im0s,modelList,postProcess)
  385. p_result=[[],[],dets[0],[] ]
  386. if sort_tracker:
  387. #在这里增加设置调用追踪器的频率
  388. #..................USE TRACK FUNCTION....................
  389. #pass an empty array to sort
  390. dets_to_sort = np.empty((0,7), dtype=np.float32)
  391. # NOTE: We send in detected object class too
  392. #for detclass,x1,y1,x2,y2,conf in p_result[2]:
  393. for x1,y1,x2,y2,conf, detclass in p_result[2]:
  394. #print('#######line342:',x1,y1,x2,y2,img.shape,[x1, y1, x2, y2, conf, detclass,iframe])
  395. dets_to_sort = np.vstack((dets_to_sort,
  396. np.array([x1, y1, x2, y2, conf, detclass,iframe],dtype=np.float32) ))
  397. # Run SORT
  398. tracked_dets = deepcopy(sort_tracker.update(dets_to_sort) )
  399. tracks =sort_tracker.getTrackers()
  400. p_result.append(tracked_dets) ###index=4
  401. p_result.append(tracks) ###index=5
  402. return p_result,dets[1]
  403. def get_tracker_cls(boxes,scId=4,clsId=5):
  404. #正常来说一各跟踪链上是一个类别,但是有时目标框检测错误,导致有的跟踪链上有多个类别
  405. #为此,根据跟踪链上每一个类别对应的所有框的置信度之和,作为这个跟踪链上目标的类别
  406. #输入boxes--跟踪是保留的box_history,[[xc,yc,width,height,score,class,iframe],[...],[...]]
  407. ## scId=4,score所在的序号; clsId=5;类别所在的序号
  408. #输出类别
  409. ##这个跟踪链上目标的类别
  410. ids = list(set(boxes[:,clsId].tolist()))
  411. scores = [np.sum( boxes[:,scId] [ boxes[:,clsId]==x ] ) for x in ids]
  412. maxScoreId = scores.index(np.max(scores))
  413. return int(ids[maxScoreId])
  414. def AI_det_track_batch_N(imgarray_list, iframe_list ,modelList,postProcess,sort_tracker,trackPar):
  415. '''
  416. 输入:
  417. imgarray_list--图像列表
  418. iframe_list -- 帧号列表
  419. modelPar--模型参数,字典,modelPar={'det_Model':,'seg_Model':}
  420. processPar--字典,存放检测相关参数,'half', 'device', 'conf_thres', 'iou_thres','trtFlag_det'
  421. sort_tracker--对象,初始化的跟踪对象。为了保持一致,即使是单帧也要有。
  422. trackPar--跟踪参数,关键字包括:det_cnt,windowsize
  423. segPar--None,分割模型相关参数。如果用不到,则为None
  424. 输入:retResults,timeInfos
  425. retResults:list
  426. retResults[0]--imgarray_list
  427. retResults[1]--所有结果用numpy格式,所有的检测结果,包括8类,每列分别是x1, y1, x2, y2, conf, detclass,iframe,trackId
  428. retResults[2]--所有结果用list表示,其中每一个元素为一个list,表示每一帧的检测结果,每一个结果是由多个list构成,每个list表示一个框,格式为[ x0 ,y0 ,x1 ,y1 ,conf, cls ,ifrmae,trackId ],如 retResults[2][j][k]表示第j帧的第k个框。2023.08.03,修改输出格式
  429. '''
  430. det_cnt,windowsize = trackPar['det_cnt'] ,trackPar['windowsize']
  431. trackers_dic={}
  432. index_list = list(range( 0, len(iframe_list) ,det_cnt ));
  433. if len(index_list)>1 and index_list[-1]!= iframe_list[-1]:
  434. index_list.append( len(iframe_list) - 1 )
  435. if len(imgarray_list)==1: #如果是单帧图片,则不用跟踪
  436. retResults = []
  437. p_result,timeOut = AI_det_track_N( [ [imgarray_list[0]] ,iframe_list[0] ],modelList,postProcess,None )
  438. ##下面4行内容只是为了保持格式一致
  439. detArray = np.array(p_result[2])
  440. if len(p_result[2])==0:res=[]
  441. else:
  442. cnt = detArray.shape[0];trackIds=np.zeros((cnt,1));iframes = np.zeros((cnt,1)) + iframe_list[0]
  443. #detArray = np.hstack( (detArray[:,1:5], detArray[:,5:6] ,detArray[:,0:1],iframes, trackIds ) )
  444. detArray = np.hstack( (detArray[:,0:4], detArray[:,4:6] ,iframes, trackIds ) ) ##2023.08.03 修改输入格式
  445. res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in detArray ]
  446. retResults=[imgarray_list,detArray,res ]
  447. #print('##line380:',retResults[2])
  448. return retResults,timeOut
  449. else:
  450. t0 = time.time()
  451. timeInfos_track=''
  452. for iframe_index, index_frame in enumerate(index_list):
  453. p_result,timeOut = AI_det_track_N( [ [imgarray_list[index_frame]] ,iframe_list[index_frame] ],modelList,postProcess,sort_tracker )
  454. timeInfos_track='%s:%s'%(timeInfos_track,timeOut)
  455. for tracker in p_result[5]:
  456. trackers_dic[tracker.id]=deepcopy(tracker)
  457. t1 = time.time()
  458. track_det_result = np.empty((0,8))
  459. for trackId in trackers_dic.keys():
  460. tracker = trackers_dic[trackId]
  461. bbox_history = np.array(tracker.bbox_history).copy()
  462. if len(bbox_history)<2: continue
  463. ###把(x0,y0,x1,y1)转换成(xc,yc,w,h)
  464. xcs_ycs = (bbox_history[:,0:2] + bbox_history[:,2:4] )/2
  465. whs = bbox_history[:,2:4] - bbox_history[:,0:2]
  466. bbox_history[:,0:2] = xcs_ycs;bbox_history[:,2:4] = whs;
  467. #2023.11.17添加的。目的是修正跟踪链上所有的框的类别一样
  468. chainClsId = get_tracker_cls(bbox_history,scId=4,clsId=5)
  469. bbox_history[:,5] = chainClsId
  470. arrays_box = bbox_history[:,0:7].transpose();frames=bbox_history[:,6]
  471. #frame_min--表示该批次图片的起始帧,如该批次是[1,100],则frame_min=1,[101,200]--frame_min=101
  472. #frames[0]--表示该目标出现的起始帧,如[1,11,21,31,41],则frames[0]=1,frames[0]可能会在frame_min之前出现,即一个横跨了多个批次。
  473. ##如果要最好化插值范围,则取内区间[frame_min,则frame_max ]和[frames[0],frames[-1] ]的交集
  474. #inter_frame_min = int(max(frame_min, frames[0])); inter_frame_max = int(min( frame_max, frames[-1] )) ##
  475. ##如果要求得到完整的目标轨迹,则插值区间要以目标出现的起始点为准
  476. inter_frame_min=int(frames[0]);inter_frame_max=int(frames[-1])
  477. new_frames= np.linspace(inter_frame_min,inter_frame_max,inter_frame_max-inter_frame_min+1 )
  478. f_linear = interpolate.interp1d(frames,arrays_box); interpolation_x0s = (f_linear(new_frames)).transpose()
  479. move_cnt_use =(len(interpolation_x0s)+1)//2*2-1 if len(interpolation_x0s)<windowsize else windowsize
  480. for im in range(4):
  481. interpolation_x0s[:,im] = moving_average_wang(interpolation_x0s[:,im],move_cnt_use )
  482. cnt = inter_frame_max-inter_frame_min+1; trackIds = np.zeros((cnt,1)) + trackId
  483. interpolation_x0s = np.hstack( (interpolation_x0s, trackIds ) )
  484. track_det_result = np.vstack(( track_det_result, interpolation_x0s) )
  485. #print('#####line116:',trackId,'----------',interpolation_x0s.shape,track_det_result.shape,bbox_history ,'-----')
  486. ##将[xc,yc,w,h]转为[x0,y0,x1,y1]
  487. x0s = track_det_result[:,0] - track_det_result[:,2]/2 ; x1s = track_det_result[:,0] + track_det_result[:,2]/2
  488. y0s = track_det_result[:,1] - track_det_result[:,3]/2 ; y1s = track_det_result[:,1] + track_det_result[:,3]/2
  489. track_det_result[:,0] = x0s; track_det_result[:,1] = y0s;
  490. track_det_result[:,2] = x1s; track_det_result[:,3] = y1s;
  491. detResults=[]
  492. for iiframe in iframe_list:
  493. boxes_oneFrame = track_det_result[ track_det_result[:,6]==iiframe ]
  494. res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in boxes_oneFrame ]
  495. #[ x0 ,y0 ,x1 ,y1 ,conf,cls,ifrmae,trackId ]
  496. #[ifrmae, x0 ,y0 ,x1 ,y1 ,conf,cls,trackId ]
  497. detResults.append( res )
  498. retResults=[imgarray_list,track_det_result,detResults ]
  499. t2 = time.time()
  500. timeInfos = 'detTrack:%.1f TrackPost:%.1f, %s'%(get_ms(t1,t0),get_ms(t2,t1), timeInfos_track )
  501. return retResults,timeInfos
  502. def ocr_process(pars):
  503. img_patch,engine,context,converter,AlignCollate_normal,device=pars[0:6]
  504. time1 = time.time()
  505. img_tensor = AlignCollate_normal([ Image.fromarray(img_patch,'L') ])
  506. img_input = img_tensor.to('cuda:0')
  507. time2 = time.time()
  508. preds,trtstr=OcrTrtForward(engine,[img_input],context)
  509. time3 = time.time()
  510. batch_size = preds.size(0)
  511. preds_size = torch.IntTensor([preds.size(1)] * batch_size)
  512. ######## filter ignore_char, rebalance
  513. preds_prob = F.softmax(preds, dim=2)
  514. preds_prob = preds_prob.cpu().detach().numpy()
  515. pred_norm = preds_prob.sum(axis=2)
  516. preds_prob = preds_prob/np.expand_dims(pred_norm, axis=-1)
  517. preds_prob = torch.from_numpy(preds_prob).float().to(device)
  518. _, preds_index = preds_prob.max(2)
  519. preds_index = preds_index.view(-1)
  520. time4 = time.time()
  521. preds_str = converter.decode_greedy(preds_index.data.cpu().detach().numpy(), preds_size.data)
  522. time5 = time.time()
  523. info_str= ('pre-process:%.2f TRTforward:%.2f (%s) postProcess:%2.f decoder:%.2f, Total:%.2f , pred:%s'%(get_ms(time2,time1 ),get_ms(time3,time2 ),trtstr, get_ms(time4,time3 ), get_ms(time5,time4 ), get_ms(time5,time1 ), preds_str ) )
  524. return preds_str,info_str
  525. def main():
  526. ##预先设置的参数
  527. device_='1' ##选定模型,可选 cpu,'0','1'
  528. ##以下参数目前不可改
  529. Detweights = "weights/yolov5/class5/best_5classes.pt"
  530. seg_nclass = 2
  531. Segweights = "weights/BiSeNet/checkpoint.pth"
  532. conf_thres,iou_thres,classes= 0.25,0.45,5
  533. labelnames = "weights/yolov5/class5/labelnames.json"
  534. rainbows = [ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
  535. allowedList=[0,1,2,3]
  536. ##加载模型,准备好显示字符
  537. device = select_device(device_)
  538. names=get_labelnames(labelnames)
  539. label_arraylist = get_label_arrays(names,rainbows,outfontsize=40,fontpath="conf/platech.ttf")
  540. half = device.type != 'cpu' # half precision only supported on CUDA
  541. model = attempt_load(Detweights, map_location=device) # load FP32 model
  542. if half: model.half()
  543. segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device)
  544. ##图像测试
  545. #url='images/examples/20220624_响水河_12300_1621.jpg'
  546. impth = 'images/examples/'
  547. outpth = 'images/results/'
  548. folders = os.listdir(impth)
  549. for i in range(len(folders)):
  550. imgpath = os.path.join(impth, folders[i])
  551. im0s=[cv2.imread(imgpath)]
  552. time00 = time.time()
  553. p_result,timeOut = AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,half,device,conf_thres, iou_thres,allowedList,fontSize=1.0)
  554. time11 = time.time()
  555. image_array = p_result[1]
  556. cv2.imwrite( os.path.join( outpth,folders[i] ) ,image_array )
  557. #print('----process:%s'%(folders[i]), (time.time() - time11) * 1000)
  558. if __name__=="__main__":
  559. main()