|
- import cv2,os,time,json
- from models.experimental import attempt_load
- from segutils.segmodel import SegModel,get_largest_contours
- from segutils.trtUtils import segtrtEval,yolov5Trtforward,OcrTrtForward
- from segutils.trafficUtils import tracfficAccidentMixFunction
-
-
- from utils.torch_utils import select_device
- from utilsK.queRiver import get_labelnames,get_label_arrays,post_process_,img_pad,draw_painting_joint,detectDraw,getDetections,getDetectionsFromPreds
- from trackUtils.sort import moving_average_wang
-
- from utils.datasets import letterbox
- import numpy as np
- import torch
- import math
- from PIL import Image
- import torch.nn.functional as F
- from copy import deepcopy
- from scipy import interpolate
- import glob
-
- def get_images_videos(impth, imageFixs=['.jpg','.JPG','.PNG','.png'],videoFixs=['.MP4','.mp4','.avi']):
- imgpaths=[];###获取文件里所有的图像
- videopaths=[]###获取文件里所有的视频
- if os.path.isdir(impth):
- for postfix in imageFixs:
- imgpaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
- for postfix in videoFixs:
- videopaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
- else:
- postfix = os.path.splitext(impth)[-1]
- if postfix in imageFixs: imgpaths=[ impth ]
- if postfix in videoFixs: videopaths = [impth ]
-
- print('%s: test Images:%d , test videos:%d '%(impth, len(imgpaths), len(videopaths)))
- return imgpaths,videopaths
-
-
- def xywh2xyxy(box,iW=None,iH=None):
- xc,yc,w,h = box[0:4]
- x0 =max(0, xc-w/2.0)
- x1 =min(1, xc+w/2.0)
- y0=max(0, yc-h/2.0)
- y1=min(1,yc+h/2.0)
- if iW: x0,x1 = x0*iW,x1*iW
- if iH: y0,y1 = y0*iH,y1*iH
- return [x0,y0,x1,y1]
-
-
- def get_ms(t2,t1):
- return (t2-t1)*1000.0
- def get_postProcess_para(parfile):
- with open(parfile) as fp:
- par = json.load(fp)
- assert 'post_process' in par.keys(), ' parfile has not key word:post_process'
- parPost=par['post_process']
-
- return parPost["conf_thres"],parPost["iou_thres"],parPost["classes"],parPost["rainbows"]
- def get_postProcess_para_dic(parfile):
- with open(parfile) as fp:
- par = json.load(fp)
- parPost=par['post_process']
- return parPost
- def score_filter_byClass(pdetections,score_para_2nd):
- ret=[]
- for det in pdetections:
- score,cls = det[4],det[5]
- if int(cls) in score_para_2nd.keys():
- score_th = score_para_2nd[int(cls)]
- elif str(int(cls)) in score_para_2nd.keys():
- score_th = score_para_2nd[str(int(cls))]
- else:
- score_th = 0.7
- if score > score_th:
- ret.append(det)
- return ret
-
- def AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,objectPar={ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False,'score_byClass':{x:0.1 for x in range(30)} }, font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,segPar={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True},mode='others',postPar=None):
-
- #输入参数
- # im0s---原始图像列表
- # model---检测模型,segmodel---分割模型(如若没有用到,则为None)
- #
- #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout
- # [im0s[0],im0,det_xywh,iframe]中,
- # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。
- # det_xywh--检测结果,是一个列表。
- # 其中每一个元素表示一个目标构成如:[ xc,yc,w,h, float(conf_c),float(cls_c) ] ,2023.08.03修改输出格式
- # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间
- # #strout---统计AI处理个环节的时间
- # Letterbox
-
- half,device,conf_thres,iou_thres,allowedList = objectPar['half'],objectPar['device'],objectPar['conf_thres'],objectPar['iou_thres'],objectPar['allowedList']
-
- trtFlag_det,trtFlag_seg,segRegionCnt = objectPar['trtFlag_det'],objectPar['trtFlag_seg'],objectPar['segRegionCnt']
- if 'ovlap_thres_crossCategory' in objectPar.keys(): ovlap_thres = objectPar['ovlap_thres_crossCategory']
- else: ovlap_thres = None
-
- if 'score_byClass' in objectPar.keys(): score_byClass = objectPar['score_byClass']
- else: score_byClass = None
-
- time0=time.time()
- if trtFlag_det:
- img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
- else:
- #print('####line72:',im0s[0][10:12,10:12,2])
- img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
- #print('####line74:',img[0][10:12,10:12,2])
- # Stack
- img = np.stack(img, 0)
- # Convert
- img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
- img = np.ascontiguousarray(img)
-
-
- img = torch.from_numpy(img).to(device)
- img = img.half() if half else img.float() # uint8 to fp16/32
- img /= 255.0
- time01=time.time()
-
-
- if segmodel:
- seg_pred,segstr = segmodel.eval(im0s[0] )
- segFlag=True
- else:
- seg_pred = None;segFlag=False;segstr='Not implemented'
-
- time1=time.time()
- if trtFlag_det:
- pred = yolov5Trtforward(model,img)
- else:
- #print('####line96:',img[0,0,10:12,10:12])
- pred = model(img,augment=False)[0]
-
- time2=time.time()
-
-
- p_result, timeOut = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=ovlap_thres,padInfos=padInfos)
- if score_byClass:
- p_result[2] = score_filter_byClass(p_result[2],score_byClass)
- print('-'*10,p_result[2])
- #if mode=='highWay3.0':
- #if segmodel:
- if segPar and segPar['mixFunction']['function']:
-
- mixFunction = segPar['mixFunction']['function'];H,W = im0s[0].shape[0:2]
- parMix = segPar['mixFunction']['pars'];#print('###line117:',parMix,p_result[2])
- parMix['imgSize'] = (W,H)
- #print(' -----------line110: ',p_result[2] ,'\n', seg_pred)
- p_result[2] , timeMixPost= mixFunction(p_result[2], seg_pred, pars=parMix )
- #print(' -----------line112: ',p_result[2] )
- p_result.append(seg_pred)
-
- else:
- timeMixPost=':0 ms'
- #print('#### line121: segstr:%s timeMixPost:%s timeOut:%s'%( segstr.strip(), timeMixPost,timeOut ))
- time_info = 'letterbox:%.1f, seg:%.1f , infer:%.1f,%s, seginfo:%s ,timeMixPost:%s '%( (time01-time0)*1000, (time1-time01)*1000 ,(time2-time1)*1000,timeOut , segstr.strip(),timeMixPost )
- #if mode=='highWay3.0':
-
-
- return p_result,time_info
- def default_mix(predlist,par):
- return predlist[0],''
-
- def AI_process_N(im0s,modelList,postProcess):
-
- #输入参数
- ## im0s---原始图像列表
- ## modelList--所有的模型
- # postProcess--字典{},包括后处理函数,及其参数
- #输出参数
- ##ret[0]--检测结果;
- ##ret[1]--时间信息
-
- #modelList包括模型,每个模型是一个类,里面的eval函数可以输出该模型的推理结果
- modelRets=[ model.eval(im0s[0]) for model in modelList]
-
- timeInfos = [ x[1] for x in modelRets]
- timeInfos=''.join(timeInfos)
- timeInfos=timeInfos
-
- #postProcess['function']--后处理函数,输入的就是所有模型输出结果
- mixFunction =postProcess['function']
- predsList = [ modelRet[0] for modelRet in modelRets ]
- H,W = im0s[0].shape[0:2]
- postProcess['pars']['imgSize'] = (W,H)
-
- #ret就是混合处理后的结果
- ret = mixFunction( predsList, postProcess['pars'])
-
- return ret[0],timeInfos+ret[1]
-
- def getMaxScoreWords(detRets0):
- maxScore=-1;maxId=0
- for i,detRet in enumerate(detRets0):
- if detRet[4]>maxScore:
- maxId=i
- maxScore = detRet[4]
- return maxId
-
-
- def AI_process_C(im0s,modelList,postProcess):
- #函数定制的原因:
- ## 之前模型处理流是
- ## 图片---> 模型1-->result1;图片---> 模型2->result2;[result1,result2]--->后处理函数
- ## 本函数的处理流程是
- ## 图片---> 模型1-->result1;[图片,result1]---> 模型2->result2;[result1,result2]--->后处理函数
- ## 模型2的输入,是有模型1的输出决定的。如模型2是ocr模型,需要将模型1检测出来的船名抠图出来输入到模型2.
- ## 之前的模型流都是模型2是分割模型,输入就是原始图片,与模型1的输出无关。
- #输入参数
- ## im0s---原始图像列表
- ## modelList--所有的模型
- # postProcess--字典{},包括后处理函数,及其参数
- #输出参数
- ##ret[0]--检测结果;
- ##ret[1]--时间信息
-
- #modelList包括模型,每个模型是一个类,里面的eval函数可以输出该模型的推理结果
-
- t0=time.time()
- detRets0 = modelList[0].eval(im0s[0])
-
- #detRets0=[[12, 46, 1127, 1544, 0.2340087890625, 2.0], [1884, 1248, 2992, 1485, 0.64208984375, 1.0]]
- detRets0 = detRets0[0]
- parsIn=postProcess['pars']
-
- _detRets0_obj = list(filter(lambda x: x[5] in parsIn['objs'], detRets0 ))
- _detRets0_others = list(filter(lambda x: x[5] not in parsIn['objs'], detRets0 ))
- _detRets0 = []
- if postProcess['name']=='channel2':
- if len(_detRets0_obj)>0:
- maxId=getMaxScoreWords(_detRets0_obj)
- _detRets0 = _detRets0_obj[maxId:maxId+1]
- else: _detRets0 = detRets0
-
-
- t1=time.time()
- imagePatches = [ im0s[0][int(x[1]):int(x[3] ) ,int(x[0]):int(x[2])] for x in _detRets0 ]
- detRets1 = [modelList[1].eval(patch) for patch in imagePatches]
- print('###line240:',detRets1)
- if postProcess['name']=='crackMeasurement':
- detRets1 = [x[0]*255 for x in detRets1]
- t2=time.time()
- mixFunction =postProcess['function']
- crackInfos = [mixFunction(patchMask,par=parsIn) for patchMask in detRets1]
-
- rets = [ _detRets0[i]+ crackInfos[i] for i in range(len(imagePatches)) ]
- t3=time.time()
- outInfos='total:%.1f (det:%.1f %d次segs:%.1f mixProcess:%.1f) '%( (t3-t0)*1000, (t1-t0)*1000, len(detRets1),(t2-t1)*1000, (t3-t2)*1000 )
- elif postProcess['name']=='channel2':
- H,W = im0s[0].shape[0:2];parsIn['imgSize'] = (W,H)
- mixFunction =postProcess['function']
- _detRets0_others = mixFunction([_detRets0_others], parsIn)
- ocrInfo='no ocr'
- if len(_detRets0_obj)>0:
- res_real = detRets1[0][0]
- res_real="".join( list(filter(lambda x:(ord(x) >19968 and ord(x)<63865 ) or (ord(x) >47 and ord(x)<58 ),res_real)))
-
- #detRets1[0][0]="".join( list(filter(lambda x:(ord(x) >19968 and ord(x)<63865 ) or (ord(x) >47 and ord(x)<58 ),detRets1[0][0])))
- _detRets0_obj[maxId].append(res_real )
- _detRets0_obj = [_detRets0_obj[maxId]]##只输出有OCR的那个船名结果
- ocrInfo=detRets1[0][1]
- print( ' _detRets0_obj:{} _detRets0_others:{} '.format( _detRets0_obj, _detRets0_others ) )
- rets=_detRets0_obj+_detRets0_others
- t3=time.time()
- outInfos='total:%.1f ,where det:%.1f, ocr:%s'%( (t3-t0)*1000, (t1-t0)*1000, ocrInfo)
-
- #print('###line233:',detRets1,detRets0 )
-
- return rets,outInfos
-
-
-
- def AI_process_forest(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,allowedList=[0,1,2,3], font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,trtFlag_det=False,SecNms=None):
- #输入参数
- # im0s---原始图像列表
- # model---检测模型,segmodel---分割模型(如若没有用到,则为None)
- #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout
- # [im0s[0],im0,det_xywh,iframe]中,
- # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。
- # det_xywh--检测结果,是一个列表。
- # 其中每一个元素表示一个目标构成如:[ xc,yc,w,h, float(conf_c),float(cls_c)],#2023.08.03,修改输出格式
- # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间
- # #strout---统计AI处理个环节的时间
-
- # Letterbox
- time0=time.time()
- if trtFlag_det:
- img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
- else:
- img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
- #img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s]
- # Stack
- img = np.stack(img, 0)
- # Convert
- img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
- img = np.ascontiguousarray(img)
-
- img = torch.from_numpy(img).to(device)
- img = img.half() if half else img.float() # uint8 to fp16/32
-
- img /= 255.0 # 0 - 255 to 0.0 - 1.0
- if segmodel:
- seg_pred,segstr = segmodel.eval(im0s[0] )
- segFlag=True
- else:
- seg_pred = None;segFlag=False
- time1=time.time()
- pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0]
-
-
- time2=time.time()
- datas = [[''], img, im0s, None,pred,seg_pred,10]
-
- ObjectPar={ 'object_config':allowedList, 'slopeIndex':[] ,'segmodel':segFlag,'segRegionCnt':0 }
- p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos,ovlap_thres=SecNms)
- #print('###line274:',p_result[2])
- #p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,object_config=allowedList,segmodel=segFlag,font=font,padInfos=padInfos)
- time_info = 'letterbox:%.1f, infer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 )
- return p_result,time_info+timeOut
-
-
- def AI_det_track( im0s_in,modelPar,processPar,sort_tracker,segPar=None):
- im0s,iframe=im0s_in[0],im0s_in[1]
- model = modelPar['det_Model']
- segmodel = modelPar['seg_Model']
- half,device,conf_thres, iou_thres,trtFlag_det = processPar['half'], processPar['device'], processPar['conf_thres'], processPar['iou_thres'],processPar['trtFlag_det']
- if 'score_byClass' in processPar.keys(): score_byClass = processPar['score_byClass']
- else: score_byClass = None
-
- iou2nd = processPar['iou2nd']
- time0=time.time()
-
- if trtFlag_det:
- img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
- else:
- img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
- img = np.stack(img, 0)
- # Convert
- img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
- img = np.ascontiguousarray(img)
-
- img = torch.from_numpy(img).to(device)
- img = img.half() if half else img.float() # uint8 to fp16/32
-
- img /= 255.0 # 0 - 255 to 0.0 - 1.0
-
- seg_pred = None;segFlag=False
- time1=time.time()
- pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0]
-
- time2=time.time()
-
- #p_result,timeOut = getDetections(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos)
- p_result, timeOut = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=iou2nd,padInfos=padInfos)
- if score_byClass:
- p_result[2] = score_filter_byClass(p_result[2],score_byClass)
- if segmodel:
- seg_pred,segstr = segmodel.eval(im0s[0] )
- segFlag=True
- else:
- seg_pred = None;segFlag=False;segstr='No segmodel'
-
-
- if segPar and segPar['mixFunction']['function']:
- mixFunction = segPar['mixFunction']['function']
-
- H,W = im0s[0].shape[0:2]
- parMix = segPar['mixFunction']['pars'];#print('###line117:',parMix,p_result[2])
- parMix['imgSize'] = (W,H)
-
-
- p_result[2],timeInfos_post = mixFunction(p_result[2], seg_pred, pars=parMix )
- timeInfos_seg_post = 'segInfer:%s ,postMixProcess:%s'%( segstr, timeInfos_post )
- else:
- timeInfos_seg_post = ' '
- '''
- if segmodel:
- timeS1=time.time()
- #seg_pred,segstr = segtrtEval(segmodel,im0s[0],par=segPar) if segPar['trtFlag_seg'] else segmodel.eval(im0s[0] )
- seg_pred,segstr = segmodel.eval(im0s[0] )
- timeS2=time.time()
- mixFunction = segPar['mixFunction']['function']
-
- p_result[2],timeInfos_post = mixFunction(p_result[2], seg_pred, pars=segPar['mixFunction']['pars'] )
-
- timeInfos_seg_post = 'segInfer:%.1f ,postProcess:%s'%( (timeS2-timeS1)*1000, timeInfos_post )
-
- else:
- timeInfos_seg_post = ' '
- #print('######line341:',seg_pred.shape,np.max(seg_pred),np.min(seg_pred) , len(p_result[2]) )
- '''
- time_info = 'letterbox:%.1f, detinfer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 )
-
- if sort_tracker:
- #在这里增加设置调用追踪器的频率
- #..................USE TRACK FUNCTION....................
- #pass an empty array to sort
- dets_to_sort = np.empty((0,7), dtype=np.float32)
-
- # NOTE: We send in detected object class too
- #for detclass,x1,y1,x2,y2,conf in p_result[2]:
- for x1,y1,x2,y2,conf, detclass in p_result[2]:
- #print('#######line342:',x1,y1,x2,y2,img.shape,[x1, y1, x2, y2, conf, detclass,iframe])
- dets_to_sort = np.vstack((dets_to_sort,
- np.array([x1, y1, x2, y2, conf, detclass,iframe],dtype=np.float32) ))
-
- # Run SORT
- tracked_dets = deepcopy(sort_tracker.update(dets_to_sort) )
- tracks =sort_tracker.getTrackers()
- p_result.append(tracked_dets) ###index=4
- p_result.append(tracks) ###index=5
-
- return p_result,time_info+timeOut+timeInfos_seg_post
- def AI_det_track_batch(imgarray_list, iframe_list ,modelPar,processPar,sort_tracker,trackPar,segPar=None):
- '''
- 输入:
- imgarray_list--图像列表
- iframe_list -- 帧号列表
- modelPar--模型参数,字典,modelPar={'det_Model':,'seg_Model':}
- processPar--字典,存放检测相关参数,'half', 'device', 'conf_thres', 'iou_thres','trtFlag_det'
- sort_tracker--对象,初始化的跟踪对象。为了保持一致,即使是单帧也要有。
- trackPar--跟踪参数,关键字包括:det_cnt,windowsize
- segPar--None,分割模型相关参数。如果用不到,则为None
- 输入:retResults,timeInfos
- retResults:list
- retResults[0]--imgarray_list
- retResults[1]--所有结果用numpy格式,所有的检测结果,包括8类,每列分别是x1, y1, x2, y2, conf, detclass,iframe,trackId
- retResults[2]--所有结果用list表示,其中每一个元素为一个list,表示每一帧的检测结果,每一个结果是由多个list构成,每个list表示一个框,格式为[ x0 ,y0 ,x1 ,y1 ,conf, cls ,ifrmae,trackId ],如 retResults[2][j][k]表示第j帧的第k个框。2023.08.03,修改输出格式
- '''
-
- det_cnt,windowsize = trackPar['det_cnt'] ,trackPar['windowsize']
- trackers_dic={}
- index_list = list(range( 0, len(iframe_list) ,det_cnt ));
- if len(index_list)>1 and index_list[-1]!= iframe_list[-1]:
- index_list.append( len(iframe_list) - 1 )
-
- if len(imgarray_list)==1: #如果是单帧图片,则不用跟踪
- retResults = []
- p_result,timeOut = AI_det_track( [ [imgarray_list[0]] ,iframe_list[0] ],modelPar,processPar,None,segPar )
- ##下面4行内容只是为了保持格式一致
- detArray = np.array(p_result[2])
- #print('##line371:',detArray)
- if len(p_result[2])==0:res=[]
- else:
- cnt = detArray.shape[0];trackIds=np.zeros((cnt,1));iframes = np.zeros((cnt,1)) + iframe_list[0]
-
- #detArray = np.hstack( (detArray[:,1:5], detArray[:,5:6] ,detArray[:,0:1],iframes, trackIds ) )
- detArray = np.hstack( (detArray[:,0:4], detArray[:,4:6] ,iframes, trackIds ) ) ##2023.08.03 修改输入格式
- res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in detArray ]
- retResults=[imgarray_list,detArray,res ]
- #print('##line380:',retResults[2])
- return retResults,timeOut
-
- else:
- t0 = time.time()
- timeInfos_track=''
- for iframe_index, index_frame in enumerate(index_list):
- p_result,timeOut = AI_det_track( [ [imgarray_list[index_frame]] ,iframe_list[index_frame] ],modelPar,processPar,sort_tracker,segPar )
- timeInfos_track='%s:%s'%(timeInfos_track,timeOut)
-
- for tracker in p_result[5]:
- trackers_dic[tracker.id]=deepcopy(tracker)
- t1 = time.time()
-
- track_det_result = np.empty((0,8))
- for trackId in trackers_dic.keys():
- tracker = trackers_dic[trackId]
- bbox_history = np.array(tracker.bbox_history)
- if len(bbox_history)<2: continue
- ###把(x0,y0,x1,y1)转换成(xc,yc,w,h)
- xcs_ycs = (bbox_history[:,0:2] + bbox_history[:,2:4] )/2
- whs = bbox_history[:,2:4] - bbox_history[:,0:2]
- bbox_history[:,0:2] = xcs_ycs;bbox_history[:,2:4] = whs;
-
- arrays_box = bbox_history[:,0:7].transpose();frames=bbox_history[:,6]
- #frame_min--表示该批次图片的起始帧,如该批次是[1,100],则frame_min=1,[101,200]--frame_min=101
- #frames[0]--表示该目标出现的起始帧,如[1,11,21,31,41],则frames[0]=1,frames[0]可能会在frame_min之前出现,即一个横跨了多个批次。
-
- ##如果要最好化插值范围,则取内区间[frame_min,则frame_max ]和[frames[0],frames[-1] ]的交集
- #inter_frame_min = int(max(frame_min, frames[0])); inter_frame_max = int(min( frame_max, frames[-1] )) ##
-
- ##如果要求得到完整的目标轨迹,则插值区间要以目标出现的起始点为准
- inter_frame_min=int(frames[0]);inter_frame_max=int(frames[-1])
- new_frames= np.linspace(inter_frame_min,inter_frame_max,inter_frame_max-inter_frame_min+1 )
- f_linear = interpolate.interp1d(frames,arrays_box); interpolation_x0s = (f_linear(new_frames)).transpose()
- move_cnt_use =(len(interpolation_x0s)+1)//2*2-1 if len(interpolation_x0s)<windowsize else windowsize
- for im in range(4):
- interpolation_x0s[:,im] = moving_average_wang(interpolation_x0s[:,im],move_cnt_use )
-
- cnt = inter_frame_max-inter_frame_min+1; trackIds = np.zeros((cnt,1)) + trackId
- interpolation_x0s = np.hstack( (interpolation_x0s, trackIds ) )
- track_det_result = np.vstack(( track_det_result, interpolation_x0s) )
- #print('#####line116:',trackId,frame_min,frame_max,'----------',interpolation_x0s.shape,track_det_result.shape ,'-----')
-
- ##将[xc,yc,w,h]转为[x0,y0,x1,y1]
- x0s = track_det_result[:,0] - track_det_result[:,2]/2 ; x1s = track_det_result[:,0] + track_det_result[:,2]/2
- y0s = track_det_result[:,1] - track_det_result[:,3]/2 ; y1s = track_det_result[:,1] + track_det_result[:,3]/2
- track_det_result[:,0] = x0s; track_det_result[:,1] = y0s;
- track_det_result[:,2] = x1s; track_det_result[:,3] = y1s;
- detResults=[]
- for iiframe in iframe_list:
- boxes_oneFrame = track_det_result[ track_det_result[:,6]==iiframe ]
- res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in boxes_oneFrame ]
- #[ x0 ,y0 ,x1 ,y1 ,conf,cls,ifrmae,trackId ]
- #[ifrmae, x0 ,y0 ,x1 ,y1 ,conf,cls,trackId ]
- detResults.append( res )
-
-
- retResults=[imgarray_list,track_det_result,detResults ]
- t2 = time.time()
- timeInfos = 'detTrack:%.1f TrackPost:%.1f, %s'%(get_ms(t1,t0),get_ms(t2,t1), timeInfos_track )
- return retResults,timeInfos
- def AI_det_track_N( im0s_in,modelList,postProcess,sort_tracker):
- im0s,iframe=im0s_in[0],im0s_in[1]
- dets = AI_process_N(im0s,modelList,postProcess)
- p_result=[[],[],dets[0],[] ]
- if sort_tracker:
- #在这里增加设置调用追踪器的频率
- #..................USE TRACK FUNCTION....................
- #pass an empty array to sort
- dets_to_sort = np.empty((0,7), dtype=np.float32)
-
- # NOTE: We send in detected object class too
- #for detclass,x1,y1,x2,y2,conf in p_result[2]:
- for x1,y1,x2,y2,conf, detclass in p_result[2]:
- #print('#######line342:',x1,y1,x2,y2,img.shape,[x1, y1, x2, y2, conf, detclass,iframe])
- dets_to_sort = np.vstack((dets_to_sort,
- np.array([x1, y1, x2, y2, conf, detclass,iframe],dtype=np.float32) ))
-
- # Run SORT
- tracked_dets = deepcopy(sort_tracker.update(dets_to_sort) )
- tracks =sort_tracker.getTrackers()
- p_result.append(tracked_dets) ###index=4
- p_result.append(tracks) ###index=5
-
- return p_result,dets[1]
- def get_tracker_cls(boxes,scId=4,clsId=5):
- #正常来说一各跟踪链上是一个类别,但是有时目标框检测错误,导致有的跟踪链上有多个类别
- #为此,根据跟踪链上每一个类别对应的所有框的置信度之和,作为这个跟踪链上目标的类别
- #输入boxes--跟踪是保留的box_history,[[xc,yc,width,height,score,class,iframe],[...],[...]]
- ## scId=4,score所在的序号; clsId=5;类别所在的序号
- #输出类别
- ##这个跟踪链上目标的类别
- ids = list(set(boxes[:,clsId].tolist()))
- scores = [np.sum( boxes[:,scId] [ boxes[:,clsId]==x ] ) for x in ids]
- maxScoreId = scores.index(np.max(scores))
- return int(ids[maxScoreId])
-
- def AI_det_track_batch_N(imgarray_list, iframe_list ,modelList,postProcess,sort_tracker,trackPar):
- '''
- 输入:
- imgarray_list--图像列表
- iframe_list -- 帧号列表
- modelPar--模型参数,字典,modelPar={'det_Model':,'seg_Model':}
- processPar--字典,存放检测相关参数,'half', 'device', 'conf_thres', 'iou_thres','trtFlag_det'
- sort_tracker--对象,初始化的跟踪对象。为了保持一致,即使是单帧也要有。
- trackPar--跟踪参数,关键字包括:det_cnt,windowsize
- segPar--None,分割模型相关参数。如果用不到,则为None
- 输入:retResults,timeInfos
- retResults:list
- retResults[0]--imgarray_list
- retResults[1]--所有结果用numpy格式,所有的检测结果,包括8类,每列分别是x1, y1, x2, y2, conf, detclass,iframe,trackId
- retResults[2]--所有结果用list表示,其中每一个元素为一个list,表示每一帧的检测结果,每一个结果是由多个list构成,每个list表示一个框,格式为[ x0 ,y0 ,x1 ,y1 ,conf, cls ,ifrmae,trackId ],如 retResults[2][j][k]表示第j帧的第k个框。2023.08.03,修改输出格式
- '''
-
- det_cnt,windowsize = trackPar['det_cnt'] ,trackPar['windowsize']
- trackers_dic={}
- index_list = list(range( 0, len(iframe_list) ,det_cnt ));
- if len(index_list)>1 and index_list[-1]!= iframe_list[-1]:
- index_list.append( len(iframe_list) - 1 )
-
- if len(imgarray_list)==1: #如果是单帧图片,则不用跟踪
- retResults = []
- p_result,timeOut = AI_det_track_N( [ [imgarray_list[0]] ,iframe_list[0] ],modelList,postProcess,None )
- ##下面4行内容只是为了保持格式一致
- detArray = np.array(p_result[2])
- if len(p_result[2])==0:res=[]
- else:
- cnt = detArray.shape[0];trackIds=np.zeros((cnt,1));iframes = np.zeros((cnt,1)) + iframe_list[0]
-
- #detArray = np.hstack( (detArray[:,1:5], detArray[:,5:6] ,detArray[:,0:1],iframes, trackIds ) )
- detArray = np.hstack( (detArray[:,0:4], detArray[:,4:6] ,iframes, trackIds ) ) ##2023.08.03 修改输入格式
- res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in detArray ]
- retResults=[imgarray_list,detArray,res ]
- #print('##line380:',retResults[2])
- return retResults,timeOut
-
- else:
- t0 = time.time()
- timeInfos_track=''
- for iframe_index, index_frame in enumerate(index_list):
- p_result,timeOut = AI_det_track_N( [ [imgarray_list[index_frame]] ,iframe_list[index_frame] ],modelList,postProcess,sort_tracker )
- timeInfos_track='%s:%s'%(timeInfos_track,timeOut)
-
- for tracker in p_result[5]:
- trackers_dic[tracker.id]=deepcopy(tracker)
- t1 = time.time()
-
- track_det_result = np.empty((0,8))
- for trackId in trackers_dic.keys():
- tracker = trackers_dic[trackId]
- bbox_history = np.array(tracker.bbox_history).copy()
- if len(bbox_history)<2: continue
- ###把(x0,y0,x1,y1)转换成(xc,yc,w,h)
- xcs_ycs = (bbox_history[:,0:2] + bbox_history[:,2:4] )/2
- whs = bbox_history[:,2:4] - bbox_history[:,0:2]
- bbox_history[:,0:2] = xcs_ycs;bbox_history[:,2:4] = whs;
-
- #2023.11.17添加的。目的是修正跟踪链上所有的框的类别一样
- chainClsId = get_tracker_cls(bbox_history,scId=4,clsId=5)
- bbox_history[:,5] = chainClsId
-
- arrays_box = bbox_history[:,0:7].transpose();frames=bbox_history[:,6]
- #frame_min--表示该批次图片的起始帧,如该批次是[1,100],则frame_min=1,[101,200]--frame_min=101
- #frames[0]--表示该目标出现的起始帧,如[1,11,21,31,41],则frames[0]=1,frames[0]可能会在frame_min之前出现,即一个横跨了多个批次。
-
- ##如果要最好化插值范围,则取内区间[frame_min,则frame_max ]和[frames[0],frames[-1] ]的交集
- #inter_frame_min = int(max(frame_min, frames[0])); inter_frame_max = int(min( frame_max, frames[-1] )) ##
-
- ##如果要求得到完整的目标轨迹,则插值区间要以目标出现的起始点为准
- inter_frame_min=int(frames[0]);inter_frame_max=int(frames[-1])
- new_frames= np.linspace(inter_frame_min,inter_frame_max,inter_frame_max-inter_frame_min+1 )
- f_linear = interpolate.interp1d(frames,arrays_box); interpolation_x0s = (f_linear(new_frames)).transpose()
- move_cnt_use =(len(interpolation_x0s)+1)//2*2-1 if len(interpolation_x0s)<windowsize else windowsize
- for im in range(4):
- interpolation_x0s[:,im] = moving_average_wang(interpolation_x0s[:,im],move_cnt_use )
-
- cnt = inter_frame_max-inter_frame_min+1; trackIds = np.zeros((cnt,1)) + trackId
- interpolation_x0s = np.hstack( (interpolation_x0s, trackIds ) )
- track_det_result = np.vstack(( track_det_result, interpolation_x0s) )
- #print('#####line116:',trackId,'----------',interpolation_x0s.shape,track_det_result.shape,bbox_history ,'-----')
-
- ##将[xc,yc,w,h]转为[x0,y0,x1,y1]
- x0s = track_det_result[:,0] - track_det_result[:,2]/2 ; x1s = track_det_result[:,0] + track_det_result[:,2]/2
- y0s = track_det_result[:,1] - track_det_result[:,3]/2 ; y1s = track_det_result[:,1] + track_det_result[:,3]/2
- track_det_result[:,0] = x0s; track_det_result[:,1] = y0s;
- track_det_result[:,2] = x1s; track_det_result[:,3] = y1s;
- detResults=[]
- for iiframe in iframe_list:
- boxes_oneFrame = track_det_result[ track_det_result[:,6]==iiframe ]
- res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in boxes_oneFrame ]
- #[ x0 ,y0 ,x1 ,y1 ,conf,cls,ifrmae,trackId ]
- #[ifrmae, x0 ,y0 ,x1 ,y1 ,conf,cls,trackId ]
- detResults.append( res )
-
-
- retResults=[imgarray_list,track_det_result,detResults ]
- t2 = time.time()
- timeInfos = 'detTrack:%.1f TrackPost:%.1f, %s'%(get_ms(t1,t0),get_ms(t2,t1), timeInfos_track )
- return retResults,timeInfos
-
-
-
- def ocr_process(pars):
-
- img_patch,engine,context,converter,AlignCollate_normal,device=pars[0:6]
- time1 = time.time()
- img_tensor = AlignCollate_normal([ Image.fromarray(img_patch,'L') ])
- img_input = img_tensor.to('cuda:0')
- time2 = time.time()
-
- preds,trtstr=OcrTrtForward(engine,[img_input],context)
- time3 = time.time()
-
- batch_size = preds.size(0)
- preds_size = torch.IntTensor([preds.size(1)] * batch_size)
-
- ######## filter ignore_char, rebalance
- preds_prob = F.softmax(preds, dim=2)
- preds_prob = preds_prob.cpu().detach().numpy()
- pred_norm = preds_prob.sum(axis=2)
- preds_prob = preds_prob/np.expand_dims(pred_norm, axis=-1)
- preds_prob = torch.from_numpy(preds_prob).float().to(device)
- _, preds_index = preds_prob.max(2)
- preds_index = preds_index.view(-1)
- time4 = time.time()
- preds_str = converter.decode_greedy(preds_index.data.cpu().detach().numpy(), preds_size.data)
- time5 = time.time()
-
- info_str= ('pre-process:%.2f TRTforward:%.2f (%s) postProcess:%2.f decoder:%.2f, Total:%.2f , pred:%s'%(get_ms(time2,time1 ),get_ms(time3,time2 ),trtstr, get_ms(time4,time3 ), get_ms(time5,time4 ), get_ms(time5,time1 ), preds_str ) )
- return preds_str,info_str
- def main():
- ##预先设置的参数
- device_='1' ##选定模型,可选 cpu,'0','1'
-
- ##以下参数目前不可改
- Detweights = "weights/yolov5/class5/best_5classes.pt"
- seg_nclass = 2
- Segweights = "weights/BiSeNet/checkpoint.pth"
- conf_thres,iou_thres,classes= 0.25,0.45,5
- labelnames = "weights/yolov5/class5/labelnames.json"
- rainbows = [ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
- allowedList=[0,1,2,3]
-
-
- ##加载模型,准备好显示字符
- device = select_device(device_)
- names=get_labelnames(labelnames)
- label_arraylist = get_label_arrays(names,rainbows,outfontsize=40,fontpath="conf/platech.ttf")
- half = device.type != 'cpu' # half precision only supported on CUDA
- model = attempt_load(Detweights, map_location=device) # load FP32 model
- if half: model.half()
- segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device)
-
-
- ##图像测试
- #url='images/examples/20220624_响水河_12300_1621.jpg'
- impth = 'images/examples/'
- outpth = 'images/results/'
- folders = os.listdir(impth)
- for i in range(len(folders)):
- imgpath = os.path.join(impth, folders[i])
- im0s=[cv2.imread(imgpath)]
- time00 = time.time()
- p_result,timeOut = AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,half,device,conf_thres, iou_thres,allowedList,fontSize=1.0)
- time11 = time.time()
- image_array = p_result[1]
- cv2.imwrite( os.path.join( outpth,folders[i] ) ,image_array )
- #print('----process:%s'%(folders[i]), (time.time() - time11) * 1000)
-
-
-
-
-
- if __name__=="__main__":
- main()
|