import cv2,os,time,json from models.experimental import attempt_load from segutils.segmodel import SegModel,get_largest_contours from segutils.trtUtils import segtrtEval,yolov5Trtforward from segutils.trafficUtils import trafficPostProcessing,colour_code_segmentation,get_label_info from utils.torch_utils import select_device from utilsK.queRiver import get_labelnames,get_label_arrays,post_process_,img_pad,draw_painting_joint from utils.datasets import letterbox import numpy as np import torch import math def get_postProcess_para(parfile): with open(parfile) as fp: par = json.load(fp) assert 'post_process' in par.keys(), ' parfile has not key word:post_process' parPost=par['post_process'] return parPost["conf_thres"],parPost["iou_thres"],parPost["classes"],parPost["rainbows"] def AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,objectPar={ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'slopeIndex':[5,6,7],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False }, font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,segPar={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True}): #输入参数 # im0s---原始图像列表 # model---检测模型,segmodel---分割模型(如若没有用到,则为None) #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout # [im0s[0],im0,det_xywh,iframe]中, # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。 # det_xywh--检测结果,是一个列表。 # 其中每一个元素表示一个目标构成如:[float(cls_c), xc,yc,w,h, float(conf_c)] # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间 # #strout---统计AI处理个环节的时间 # Letterbox half,device,conf_thres,iou_thres,allowedList = objectPar['half'],objectPar['device'],objectPar['conf_thres'],objectPar['iou_thres'],objectPar['allowedList'] slopeIndex, trtFlag_det,trtFlag_seg,segRegionCnt = objectPar['slopeIndex'],objectPar['trtFlag_det'],objectPar['trtFlag_seg'],objectPar['segRegionCnt'] time0=time.time() if trtFlag_det: img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img] else: img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None # Stack img = np.stack(img, 0) # Convert img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 img = np.ascontiguousarray(img) img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 time01=time.time() img /= 255.0 # 0 - 255 to 0.0 - 1.0 if segmodel: if trtFlag_seg: seg_pred,segstr = segtrtEval(segmodel,im0s[0],par=segPar) else: seg_pred,segstr = segmodel.eval(im0s[0] ) segFlag=True else: seg_pred = None;segFlag=False;segstr='Not implemented' time1=time.time() if trtFlag_det: pred = yolov5Trtforward(model,img) else: pred = model(img,augment=False)[0] time2=time.time() datas = [[''], img, im0s, None,pred,seg_pred,10] ObjectPar={ 'object_config':allowedList, 'slopeIndex':slopeIndex ,'segmodel':segFlag,'segRegionCnt':segRegionCnt } p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos) time_info = 'letterbox:%.1f, seg:%.1f , infer:%.1f,%s, seginfo:%s'%( (time01-time0)*1000, (time1-time01)*1000 ,(time2-time1)*1000,timeOut , segstr ) return p_result,time_info def AI_Seg_process(im0s,segmodel,digitWordFont,trtFlag_seg=True,segPar={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True},postPar= {'label_csv': './AIlib2/weights/conf/trafficAccident/class_dict.csv', 'speedRoadArea': 5100, 'vehicleArea': 100, 'speedRoadVehicleAngleMin': 15, 'speedRoadVehicleAngleMax': 75, 'vehicleLengthWidthThreshold': 4, 'vehicleSafeDistance': 7}): ''' 输入参数 im0s---原始图像列表 segmodel---分割模型,segmodel---分割模型(如若没有用到,则为None) digitWordFont--显示字体,数字等参数 trtFlag_seg--模型是否是TRT格式 segPar--分割模型的参数 postPar--后处理参数 输出 seg_pred--返回语义分割的结果图(0,1,2...表示) img_draw--原图上带有矩形框的图 segstr-----文本数据包括时间信息 list1-----返回目标的坐标结果,每一个目标用[ cls, x0,y0,x1,y1,conf ] ''' time1=time.time() H,W=im0s[0].shape[0:2] img_draw=im0s[0].copy() if trtFlag_seg: seg_pred,segstr = segtrtEval(segmodel,im0s[0],par=segPar) else: seg_pred,segstr = segmodel.eval(im0s[0] ) time2 = time.time() label_info = get_label_info(postPar['label_csv']) postPar['CCS']=colour_code_segmentation(seg_pred, label_info) postPar['sourceImageSize'] = im0s[0].shape[0:2] postPar['seg_pred_size'] = seg_pred.shape[0:2] list1,post_time_infos = trafficPostProcessing(postPar) list2=[] cls=0 label_arraylist=digitWordFont['label_arraylist'] rainbows=digitWordFont['rainbows'] for bpoints in list1: #print('###line104:',bpoints) bpoints=np.array(bpoints) x0=np.min( bpoints[:,0] ) y0=np.min( bpoints[:,1] ) x1=np.max( bpoints[:,0] ) y1=np.max( bpoints[:,1] ) conf= ((x0+x1)/W + (y0+y1)/H)/4.0; conf=1.0 - math.fabs((conf-0.5)/0.5) xyxy=[x0,y0,x1,y1] xyxy=[int(x+0.5) for x in xyxy] #float(cls_c), *xywh, float(conf_c)] list2.append( [ cls, x0,y0,x1,y1,conf ] ) img_draw = draw_painting_joint(xyxy,img_draw,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=digitWordFont) segstr = 'segInfer:%.2f %s '%( (time2-time1)*1000.0,post_time_infos ) return seg_pred,img_draw,segstr,list2 def AI_process_v2(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,allowedList=[0,1,2,3], font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ): #输入参数 # im0s---原始图像列表 # model---检测模型,segmodel---分割模型(如若没有用到,则为None) #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout # [im0s[0],im0,det_xywh,iframe]中, # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。 # det_xywh--检测结果,是一个列表。 # 其中每一个元素表示一个目标构成如:[float(cls_c), xc,yc,w,h, float(conf_c)] # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间 # #strout---统计AI处理个环节的时间 # Letterbox time0=time.time() #img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s] img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img] # Stack img = np.stack(img, 0) # Convert img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 img = np.ascontiguousarray(img) img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 time01=time.time() img /= 255.0 # 0 - 255 to 0.0 - 1.0 if segmodel: seg_pred,segstr = segmodel.eval(im0s[0] ) segFlag=True else: seg_pred = None;segFlag=False time1=time.time() pred = model(img,augment=False) time2=time.time() datas = [[''], img, im0s, None,pred,seg_pred,10] p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,object_config=allowedList,segmodel=segFlag,font=font,padInfos=padInfos) time_info = 'letterbox:%.1f, seg:%.1f , infer:%.1f,%s, seginfo:%s'%( (time01-time0)*1000, (time1-time01)*1000 ,(time2-time1)*1000,timeOut , segstr ) return p_result,time_info def AI_process_forest(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,allowedList=[0,1,2,3], font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,trtFlag_det=False): #输入参数 # im0s---原始图像列表 # model---检测模型,segmodel---分割模型(如若没有用到,则为None) #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout # [im0s[0],im0,det_xywh,iframe]中, # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。 # det_xywh--检测结果,是一个列表。 # 其中每一个元素表示一个目标构成如:[float(cls_c), xc,yc,w,h, float(conf_c)] # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间 # #strout---统计AI处理个环节的时间 # Letterbox time0=time.time() if trtFlag_det: img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img] else: img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None #img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s] # Stack img = np.stack(img, 0) # Convert img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 img = np.ascontiguousarray(img) img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 if segmodel: seg_pred,segstr = segmodel.eval(im0s[0] ) segFlag=True else: seg_pred = None;segFlag=False time1=time.time() pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0] time2=time.time() datas = [[''], img, im0s, None,pred,seg_pred,10] ObjectPar={ 'object_config':allowedList, 'slopeIndex':[] ,'segmodel':segFlag,'segRegionCnt':0 } p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos) #p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,object_config=allowedList,segmodel=segFlag,font=font,padInfos=padInfos) time_info = 'letterbox:%.1f, infer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 ) return p_result,time_info+timeOut def main(): ##预先设置的参数 device_='1' ##选定模型,可选 cpu,'0','1' ##以下参数目前不可改 Detweights = "weights/yolov5/class5/best_5classes.pt" seg_nclass = 2 Segweights = "weights/BiSeNet/checkpoint.pth" conf_thres,iou_thres,classes= 0.25,0.45,5 labelnames = "weights/yolov5/class5/labelnames.json" rainbows = [ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] allowedList=[0,1,2,3] ##加载模型,准备好显示字符 device = select_device(device_) names=get_labelnames(labelnames) label_arraylist = get_label_arrays(names,rainbows,outfontsize=40,fontpath="conf/platech.ttf") half = device.type != 'cpu' # half precision only supported on CUDA model = attempt_load(Detweights, map_location=device) # load FP32 model if half: model.half() segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device) ##图像测试 #url='images/examples/20220624_响水河_12300_1621.jpg' impth = 'images/examples/' outpth = 'images/results/' folders = os.listdir(impth) for i in range(len(folders)): imgpath = os.path.join(impth, folders[i]) im0s=[cv2.imread(imgpath)] time00 = time.time() p_result,timeOut = AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,half,device,conf_thres, iou_thres,allowedList,fontSize=1.0) time11 = time.time() image_array = p_result[1] cv2.imwrite( os.path.join( outpth,folders[i] ) ,image_array ) print('----process:%s'%(folders[i]), (time.time() - time11) * 1000) if __name__=="__main__": main()