Du kannst nicht mehr als 25 Themen auswählen Themen müssen entweder mit einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

269 Zeilen
13KB

  1. import cv2,os,time,json
  2. from models.experimental import attempt_load
  3. from segutils.segmodel import SegModel,get_largest_contours
  4. from segutils.trtUtils import segtrtEval,yolov5Trtforward
  5. from segutils.trafficUtils import trafficPostProcessing,colour_code_segmentation,get_label_info
  6. from utils.torch_utils import select_device
  7. from utilsK.queRiver import get_labelnames,get_label_arrays,post_process_,img_pad,draw_painting_joint
  8. from utils.datasets import letterbox
  9. import numpy as np
  10. import torch
  11. import math
  12. def get_postProcess_para(parfile):
  13. with open(parfile) as fp:
  14. par = json.load(fp)
  15. assert 'post_process' in par.keys(), ' parfile has not key word:post_process'
  16. parPost=par['post_process']
  17. return parPost["conf_thres"],parPost["iou_thres"],parPost["classes"],parPost["rainbows"]
  18. def AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,objectPar={ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'slopeIndex':[5,6,7],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False }, font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,segPar={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True}):
  19. #输入参数
  20. # im0s---原始图像列表
  21. # model---检测模型,segmodel---分割模型(如若没有用到,则为None)
  22. #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout
  23. # [im0s[0],im0,det_xywh,iframe]中,
  24. # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。
  25. # det_xywh--检测结果,是一个列表。
  26. # 其中每一个元素表示一个目标构成如:[float(cls_c), xc,yc,w,h, float(conf_c)]
  27. # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间
  28. # #strout---统计AI处理个环节的时间
  29. # Letterbox
  30. half,device,conf_thres,iou_thres,allowedList = objectPar['half'],objectPar['device'],objectPar['conf_thres'],objectPar['iou_thres'],objectPar['allowedList']
  31. slopeIndex, trtFlag_det,trtFlag_seg,segRegionCnt = objectPar['slopeIndex'],objectPar['trtFlag_det'],objectPar['trtFlag_seg'],objectPar['segRegionCnt']
  32. time0=time.time()
  33. if trtFlag_det:
  34. img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
  35. else:
  36. img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
  37. # Stack
  38. img = np.stack(img, 0)
  39. # Convert
  40. img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
  41. img = np.ascontiguousarray(img)
  42. img = torch.from_numpy(img).to(device)
  43. img = img.half() if half else img.float() # uint8 to fp16/32
  44. time01=time.time()
  45. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  46. if segmodel:
  47. if trtFlag_seg:
  48. seg_pred,segstr = segtrtEval(segmodel,im0s[0],par=segPar)
  49. else:
  50. seg_pred,segstr = segmodel.eval(im0s[0] )
  51. segFlag=True
  52. else:
  53. seg_pred = None;segFlag=False;segstr='Not implemented'
  54. time1=time.time()
  55. if trtFlag_det:
  56. pred = yolov5Trtforward(model,img)
  57. else:
  58. pred = model(img,augment=False)[0]
  59. time2=time.time()
  60. datas = [[''], img, im0s, None,pred,seg_pred,10]
  61. ObjectPar={ 'object_config':allowedList, 'slopeIndex':slopeIndex ,'segmodel':segFlag,'segRegionCnt':segRegionCnt }
  62. p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos)
  63. time_info = 'letterbox:%.1f, seg:%.1f , infer:%.1f,%s, seginfo:%s'%( (time01-time0)*1000, (time1-time01)*1000 ,(time2-time1)*1000,timeOut , segstr )
  64. return p_result,time_info
  65. def AI_Seg_process(im0s,segmodel,digitWordFont,trtFlag_seg=True,segPar={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True},postPar= {'label_csv': './AIlib2/weights/conf/trafficAccident/class_dict.csv', 'speedRoadArea': 5100, 'vehicleArea': 100, 'speedRoadVehicleAngleMin': 15, 'speedRoadVehicleAngleMax': 75, 'vehicleLengthWidthThreshold': 4, 'vehicleSafeDistance': 7}):
  66. '''
  67. 输入参数
  68. im0s---原始图像列表
  69. segmodel---分割模型,segmodel---分割模型(如若没有用到,则为None)
  70. digitWordFont--显示字体,数字等参数
  71. trtFlag_seg--模型是否是TRT格式
  72. segPar--分割模型的参数
  73. postPar--后处理参数
  74. 输出
  75. seg_pred--返回语义分割的结果图(0,1,2...表示)
  76. img_draw--原图上带有矩形框的图
  77. segstr-----文本数据包括时间信息
  78. list1-----返回目标的坐标结果,每一个目标用[ cls, x0,y0,x1,y1,conf ]
  79. '''
  80. time1=time.time()
  81. H,W=im0s[0].shape[0:2]
  82. img_draw=im0s[0].copy()
  83. if trtFlag_seg:
  84. seg_pred,segstr = segtrtEval(segmodel,im0s[0],par=segPar)
  85. else:
  86. seg_pred,segstr = segmodel.eval(im0s[0] )
  87. time2 = time.time()
  88. label_info = get_label_info(postPar['label_csv'])
  89. postPar['CCS']=colour_code_segmentation(seg_pred, label_info)
  90. postPar['sourceImageSize'] = im0s[0].shape[0:2]
  91. postPar['seg_pred_size'] = seg_pred.shape[0:2]
  92. list1,post_time_infos = trafficPostProcessing(postPar)
  93. list2=[]
  94. cls=0
  95. label_arraylist=digitWordFont['label_arraylist']
  96. rainbows=digitWordFont['rainbows']
  97. for bpoints in list1:
  98. #print('###line104:',bpoints)
  99. bpoints=np.array(bpoints)
  100. x0=np.min( bpoints[:,0] )
  101. y0=np.min( bpoints[:,1] )
  102. x1=np.max( bpoints[:,0] )
  103. y1=np.max( bpoints[:,1] )
  104. conf= ((x0+x1)/W + (y0+y1)/H)/4.0;
  105. conf=1.0 - math.fabs((conf-0.5)/0.5)
  106. xyxy=[x0,y0,x1,y1]
  107. xyxy=[int(x+0.5) for x in xyxy]
  108. #float(cls_c), *xywh, float(conf_c)]
  109. list2.append( [ cls, x0,y0,x1,y1,conf ] )
  110. img_draw = draw_painting_joint(xyxy,img_draw,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=digitWordFont)
  111. segstr = 'segInfer:%.2f %s '%( (time2-time1)*1000.0,post_time_infos )
  112. return seg_pred,img_draw,segstr,list2
  113. def AI_process_v2(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,allowedList=[0,1,2,3], font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ):
  114. #输入参数
  115. # im0s---原始图像列表
  116. # model---检测模型,segmodel---分割模型(如若没有用到,则为None)
  117. #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout
  118. # [im0s[0],im0,det_xywh,iframe]中,
  119. # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。
  120. # det_xywh--检测结果,是一个列表。
  121. # 其中每一个元素表示一个目标构成如:[float(cls_c), xc,yc,w,h, float(conf_c)]
  122. # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间
  123. # #strout---统计AI处理个环节的时间
  124. # Letterbox
  125. time0=time.time()
  126. #img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s]
  127. img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
  128. # Stack
  129. img = np.stack(img, 0)
  130. # Convert
  131. img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
  132. img = np.ascontiguousarray(img)
  133. img = torch.from_numpy(img).to(device)
  134. img = img.half() if half else img.float() # uint8 to fp16/32
  135. time01=time.time()
  136. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  137. if segmodel:
  138. seg_pred,segstr = segmodel.eval(im0s[0] )
  139. segFlag=True
  140. else:
  141. seg_pred = None;segFlag=False
  142. time1=time.time()
  143. pred = model(img,augment=False)
  144. time2=time.time()
  145. datas = [[''], img, im0s, None,pred,seg_pred,10]
  146. p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,object_config=allowedList,segmodel=segFlag,font=font,padInfos=padInfos)
  147. time_info = 'letterbox:%.1f, seg:%.1f , infer:%.1f,%s, seginfo:%s'%( (time01-time0)*1000, (time1-time01)*1000 ,(time2-time1)*1000,timeOut , segstr )
  148. return p_result,time_info
  149. def AI_process_forest(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,allowedList=[0,1,2,3], font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,trtFlag_det=False):
  150. #输入参数
  151. # im0s---原始图像列表
  152. # model---检测模型,segmodel---分割模型(如若没有用到,则为None)
  153. #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout
  154. # [im0s[0],im0,det_xywh,iframe]中,
  155. # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。
  156. # det_xywh--检测结果,是一个列表。
  157. # 其中每一个元素表示一个目标构成如:[float(cls_c), xc,yc,w,h, float(conf_c)]
  158. # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间
  159. # #strout---统计AI处理个环节的时间
  160. # Letterbox
  161. time0=time.time()
  162. if trtFlag_det:
  163. img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
  164. else:
  165. img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
  166. #img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s]
  167. # Stack
  168. img = np.stack(img, 0)
  169. # Convert
  170. img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
  171. img = np.ascontiguousarray(img)
  172. img = torch.from_numpy(img).to(device)
  173. img = img.half() if half else img.float() # uint8 to fp16/32
  174. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  175. if segmodel:
  176. seg_pred,segstr = segmodel.eval(im0s[0] )
  177. segFlag=True
  178. else:
  179. seg_pred = None;segFlag=False
  180. time1=time.time()
  181. pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0]
  182. time2=time.time()
  183. datas = [[''], img, im0s, None,pred,seg_pred,10]
  184. ObjectPar={ 'object_config':allowedList, 'slopeIndex':[] ,'segmodel':segFlag,'segRegionCnt':0 }
  185. p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos)
  186. #p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,object_config=allowedList,segmodel=segFlag,font=font,padInfos=padInfos)
  187. time_info = 'letterbox:%.1f, infer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 )
  188. return p_result,time_info+timeOut
  189. def main():
  190. ##预先设置的参数
  191. device_='1' ##选定模型,可选 cpu,'0','1'
  192. ##以下参数目前不可改
  193. Detweights = "weights/yolov5/class5/best_5classes.pt"
  194. seg_nclass = 2
  195. Segweights = "weights/BiSeNet/checkpoint.pth"
  196. conf_thres,iou_thres,classes= 0.25,0.45,5
  197. labelnames = "weights/yolov5/class5/labelnames.json"
  198. rainbows = [ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
  199. allowedList=[0,1,2,3]
  200. ##加载模型,准备好显示字符
  201. device = select_device(device_)
  202. names=get_labelnames(labelnames)
  203. label_arraylist = get_label_arrays(names,rainbows,outfontsize=40,fontpath="conf/platech.ttf")
  204. half = device.type != 'cpu' # half precision only supported on CUDA
  205. model = attempt_load(Detweights, map_location=device) # load FP32 model
  206. if half: model.half()
  207. segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device)
  208. ##图像测试
  209. #url='images/examples/20220624_响水河_12300_1621.jpg'
  210. impth = 'images/examples/'
  211. outpth = 'images/results/'
  212. folders = os.listdir(impth)
  213. for i in range(len(folders)):
  214. imgpath = os.path.join(impth, folders[i])
  215. im0s=[cv2.imread(imgpath)]
  216. time00 = time.time()
  217. p_result,timeOut = AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,half,device,conf_thres, iou_thres,allowedList,fontSize=1.0)
  218. time11 = time.time()
  219. image_array = p_result[1]
  220. cv2.imwrite( os.path.join( outpth,folders[i] ) ,image_array )
  221. print('----process:%s'%(folders[i]), (time.time() - time11) * 1000)
  222. if __name__=="__main__":
  223. main()