You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

453 lines
17KB

  1. import torch
  2. import argparse
  3. import sys,os
  4. from torchvision import transforms
  5. import cv2,glob
  6. import numpy as np
  7. import matplotlib.pyplot as plt
  8. import time
  9. from pathlib import Path
  10. from concurrent.futures import ThreadPoolExecutor
  11. import tensorrt as trt
  12. #import pycuda.driver as cuda
  13. def get_largest_contours(contours):
  14. areas = [cv2.contourArea(x) for x in contours]
  15. max_area = max(areas)
  16. max_id = areas.index(max_area)
  17. return max_id
  18. def infer_usage():
  19. image_url = '/home/thsw2/WJ/data/THexit/val/images/DJI_0645.JPG'
  20. nclass = 2
  21. #weights = '../weights/segmentation/BiSeNet/checkpoint.pth'
  22. #weights = '../weights/BiSeNet/checkpoint.pth'
  23. #segmodel = SegModel_BiSeNet(nclass=nclass,weights=weights)
  24. weights = '../weights/BiSeNet/checkpoint_640X360_epo33.pth'
  25. segmodel = SegModel_BiSeNet(nclass=nclass,weights=weights,modelsize=(640,360))
  26. image_urls=glob.glob('../../../../data/无人机起飞测试图像/*')
  27. out_dir ='results/';
  28. os.makedirs(out_dir,exist_ok=True)
  29. for im,image_url in enumerate(image_urls[0:]):
  30. #image_url = '/home/thsw2/WJ/data/THexit/val/images/54(199).JPG'
  31. image_array0 = cv2.imread(image_url)
  32. H,W,C = image_array0.shape
  33. time_1=time.time()
  34. pred,outstr = segmodel.eval(image_array0 )
  35. #plt.figure(1);plt.imshow(pred);
  36. #plt.show()
  37. binary0 = pred.copy()
  38. time0 = time.time()
  39. contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
  40. max_id = -1
  41. if len(contours)>0:
  42. max_id = get_largest_contours(contours)
  43. binary0[:,:] = 0
  44. cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1)
  45. time1 = time.time()
  46. time2 = time.time()
  47. cv2.drawContours(image_array0,contours,max_id,(0,255,255),3)
  48. time3 = time.time()
  49. out_url='%s/%s'%(out_dir,os.path.basename(image_url))
  50. ret = cv2.imwrite(out_url,image_array0)
  51. time4 = time.time()
  52. print('image:%d,%s ,%d*%d,eval:%.1f ms, %s,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,get_ms(time0,time_1),outstr,get_ms(time1,time0), get_ms(time3,time2),get_ms(time3,time_1)) )
  53. def colorstr(*input):
  54. # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
  55. *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
  56. colors = {'black': '\033[30m', # basic colors
  57. 'red': '\033[31m',
  58. 'green': '\033[32m',
  59. 'yellow': '\033[33m',
  60. 'blue': '\033[34m',
  61. 'magenta': '\033[35m',
  62. 'cyan': '\033[36m',
  63. 'white': '\033[37m',
  64. 'bright_black': '\033[90m', # bright colors
  65. 'bright_red': '\033[91m',
  66. 'bright_green': '\033[92m',
  67. 'bright_yellow': '\033[93m',
  68. 'bright_blue': '\033[94m',
  69. 'bright_magenta': '\033[95m',
  70. 'bright_cyan': '\033[96m',
  71. 'bright_white': '\033[97m',
  72. 'end': '\033[0m', # misc
  73. 'bold': '\033[1m',
  74. 'underline': '\033[4m'}
  75. return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
  76. def file_size(path):
  77. # Return file/dir size (MB)
  78. path = Path(path)
  79. if path.is_file():
  80. return path.stat().st_size / 1E6
  81. elif path.is_dir():
  82. return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6
  83. else:
  84. return 0.0
  85. def toONNX(seg_model,onnxFile,inputShape=(1,3,360,640),device=torch.device('cuda:0')):
  86. print('####begin to export to onnx')
  87. import onnx
  88. im = torch.rand(inputShape).to(device)
  89. seg_model.eval()
  90. text_for_pred = torch.LongTensor(1, 90).fill_(0).to(device)
  91. try:
  92. out=seg_model(im,text_for_pred)
  93. input2=(im,text_for_pred)
  94. except Exception as e:
  95. out = seg_model(im)
  96. input2=(im)
  97. print('###test model infer example####')
  98. train=False
  99. dynamic = False
  100. opset=11
  101. torch.onnx.export(seg_model, input2 ,onnxFile, opset_version=opset,
  102. training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL,
  103. do_constant_folding=not train,
  104. input_names=['images'],
  105. output_names=['output'],
  106. dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640)
  107. 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
  108. } if dynamic else None)
  109. #torch.onnx.export(model, (dummy_input, dummy_text), "vitstr.onnx", verbose=True)
  110. print('output onnx file:',onnxFile)
  111. def ONNXtoTrt(onnxFile,trtFile,half=True):
  112. import tensorrt as trt
  113. #onnx = Path('../weights/BiSeNet/checkpoint.onnx')
  114. #onnxFile = Path('../weights/STDC/model_maxmIOU75_1720_0.946_360640.onnx')
  115. time0=time.time()
  116. #half=True;
  117. verbose=True;workspace=4;prefix=colorstr('TensorRT:')
  118. #f = onnx.with_suffix('.engine') # TensorRT engine file
  119. f=trtFile
  120. logger = trt.Logger(trt.Logger.INFO)
  121. if verbose:
  122. logger.min_severity = trt.Logger.Severity.VERBOSE
  123. builder = trt.Builder(logger)
  124. config = builder.create_builder_config()
  125. config.max_workspace_size = workspace * 1 << 30
  126. flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
  127. network = builder.create_network(flag)
  128. parser = trt.OnnxParser(network, logger)
  129. if not parser.parse_from_file(str(onnxFile)):
  130. raise RuntimeError(f'failed to load ONNX file: {onnx}')
  131. inputs = [network.get_input(i) for i in range(network.num_inputs)]
  132. outputs = [network.get_output(i) for i in range(network.num_outputs)]
  133. print(f'{prefix} Network Description:')
  134. for inp in inputs:
  135. print(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}')
  136. for out in outputs:
  137. print(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}')
  138. half &= builder.platform_has_fast_fp16
  139. print(f'{prefix} building FP{16 if half else 32} engine in {f}')
  140. if half:
  141. config.set_flag(trt.BuilderFlag.FP16)
  142. with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
  143. t.write(engine.serialize())
  144. print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
  145. time1=time.time()
  146. print('output trtfile from ONNX, time:%.4f s, half: ,'%(time1-time0),trtFile,half)
  147. def ONNX_eval():
  148. import onnx
  149. import numpy as np
  150. import onnxruntime as ort
  151. import cv2
  152. #model_path = '../weights/BiSeNet/checkpoint.onnx';modelSize=(512,512);mean=(0.335, 0.358, 0.332),std = (0.141, 0.138, 0.143)
  153. model_path = '../weights/STDC/model_maxmIOU75_1720_0.946_360640.onnx';modelSize=(640,360);mean = (0.485, 0.456, 0.406);std = (0.229, 0.224, 0.225)
  154. # 验证模型合法性
  155. onnx_model = onnx.load(model_path)
  156. onnx.checker.check_model(onnx_model)
  157. # 读入图像并调整为输入维度
  158. img = cv2.imread("../../river_demo/images/slope/菜地_20220713_青年河8_4335_1578.jpg")
  159. H,W,C=img.shape
  160. img = cv2.resize(img,modelSize).transpose(2,0,1)
  161. img = np.array(img)[np.newaxis, :, :, :].astype(np.float32)
  162. # 设置模型session以及输入信息
  163. sess = ort.InferenceSession(model_path,providers= ort.get_available_providers())
  164. print('len():',len( sess.get_inputs() ))
  165. input_name1 = sess.get_inputs()[0].name
  166. #input_name2 = sess.get_inputs()[1].name
  167. #input_name3 = sess.get_inputs()[2].name
  168. #output = sess.run(None, {input_name1: img, input_name2: img, input_name3: img})
  169. output = sess.run(None, {input_name1: img})
  170. pred = np.argmax(output[0], axis=1)[0]#得到每行
  171. pred = cv2.resize(pred.astype(np.uint8),(W,H))
  172. #plt.imshow(pred);plt.show()
  173. print( 'type:',type(output) , output[0].shape, output[0].dtype )
  174. #weights = Path('../weights/BiSeNet/checkpoint.engine')
  175. half = False;device = 'cuda:0'
  176. image_url = '/home/thsw2/WJ/data/THexit/val/images/DJI_0645.JPG'
  177. #image_urls=glob.glob('../../river_demo/images/slope/*')
  178. image_urls=glob.glob('../../../../data/无人机起飞测试图像/*')
  179. #out_dir ='../../river_demo/images/results/'
  180. out_dir ='results'
  181. os.makedirs(out_dir,exist_ok=True)
  182. for im,image_url in enumerate(image_urls[0:]):
  183. image_array0 = cv2.imread(image_url)
  184. #img=segPreProcess_image(image_array0).to(device)
  185. img=segPreProcess_image(image_array0,modelSize=modelSize,mean=mean,std=std,numpy=True)
  186. #img = cv2.resize(img,(512,512)).transpose(2,0,1)
  187. img = np.array(img)[np.newaxis, :, :, :].astype(np.float32)
  188. H,W,C = image_array0.shape
  189. time_1=time.time()
  190. #pred,outstr = segmodel.eval(image_array0 )
  191. output = sess.run(None, {input_name1: img})
  192. pred =output[0]
  193. #pred = model(img, augment=False, visualize=False)
  194. #pred = pred.data.cpu().numpy()
  195. pred = np.argmax(pred, axis=1)[0]#得到每行
  196. pred = cv2.resize(pred.astype(np.uint8),(W,H))
  197. outstr='###---###'
  198. binary0 = pred.copy()
  199. time0 = time.time()
  200. contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
  201. max_id = -1
  202. if len(contours)>0:
  203. max_id = get_largest_contours(contours)
  204. binary0[:,:] = 0
  205. cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1)
  206. time1 = time.time()
  207. time2 = time.time()
  208. cv2.drawContours(image_array0,contours,max_id,(0,255,255),3)
  209. time3 = time.time()
  210. out_url='%s/%s'%(out_dir,os.path.basename(image_url))
  211. ret = cv2.imwrite(out_url,image_array0)
  212. time4 = time.time()
  213. print('image:%d,%s ,%d*%d,eval:%.1f ms, %s,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,get_ms(time0,time_1),outstr,get_ms(time1,time0), get_ms(time3,time2),get_ms(time3,time_1)) )
  214. print('outimage:',out_url)
  215. def EngineInfer_onePic_thread(pars_thread):
  216. engine,image_array0,out_dir,image_url,im = pars_thread[0:6]
  217. H,W,C = image_array0.shape
  218. time0=time.time()
  219. time1=time.time()
  220. # 运行模型
  221. pred,segInfoStr=segtrtEval(engine,image_array0,par={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True})
  222. pred = 1 - pred
  223. time2=time.time()
  224. outstr='###---###'
  225. binary0 = pred.copy()
  226. time3 = time.time()
  227. contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
  228. max_id = -1
  229. #if len(contours)>0:
  230. # max_id = get_largest_contours(contours)
  231. # binary0[:,:] = 0
  232. # cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1)
  233. time4 = time.time()
  234. cv2.drawContours(image_array0,contours,max_id,(0,255,255),3)
  235. time5 = time.time()
  236. out_url='%s/%s'%(out_dir,os.path.basename(image_url))
  237. ret = cv2.imwrite(out_url,image_array0)
  238. time6 = time.time()
  239. print('image:%d,%s ,%d*%d, %s,,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,segInfoStr, get_ms(time4,time3),get_ms(time5,time4),get_ms(time5,time0) ))
  240. return 'success'
  241. def trt_version():
  242. return trt.__version__
  243. def torch_device_from_trt(device):
  244. if device == trt.TensorLocation.DEVICE:
  245. return torch.device("cuda")
  246. elif device == trt.TensorLocation.HOST:
  247. return torch.device("cpu")
  248. else:
  249. return TypeError("%s is not supported by torch" % device)
  250. def torch_dtype_from_trt(dtype):
  251. if dtype == trt.int8:
  252. return torch.int8
  253. elif trt_version() >= '7.0' and dtype == trt.bool:
  254. return torch.bool
  255. elif dtype == trt.int32:
  256. return torch.int32
  257. elif dtype == trt.float16:
  258. return torch.float16
  259. elif dtype == trt.float32:
  260. return torch.float32
  261. else:
  262. raise TypeError("%s is not supported by torch" % dtype)
  263. def TrtForward(engine,inputs,contextFlag=False):
  264. t0=time.time()
  265. #with engine.create_execution_context() as context:
  266. if not contextFlag: context = engine.create_execution_context()
  267. else: context=contextFlag
  268. input_names=['images'];output_names=['output']
  269. batch_size = inputs[0].shape[0]
  270. bindings = [None] * (len(input_names) + len(output_names))
  271. t1=time.time()
  272. # 创建输出tensor,并分配内存
  273. outputs = [None] * len(output_names)
  274. for i, output_name in enumerate(output_names):
  275. idx = engine.get_binding_index(output_name)#通过binding_name找到对应的input_id
  276. dtype = torch_dtype_from_trt(engine.get_binding_dtype(idx))#找到对应的数据类型
  277. shape = (batch_size,) + tuple(engine.get_binding_shape(idx))#找到对应的形状大小
  278. device = torch_device_from_trt(engine.get_location(idx))
  279. output = torch.empty(size=shape, dtype=dtype, device=device)
  280. #print('&'*10,'device:',device,'idx:',idx,'shape:',shape,'dtype:',dtype,' device:',output.get_device())
  281. outputs[i] = output
  282. #print('###line65:',output_name,i,idx,dtype,shape)
  283. bindings[idx] = output.data_ptr()#绑定输出数据指针
  284. t2=time.time()
  285. for i, input_name in enumerate(input_names):
  286. idx =engine.get_binding_index(input_name)
  287. bindings[idx] = inputs[0].contiguous().data_ptr()#应当为inputs[i],对应3个输入。但由于我们使用的是单张图片,所以将3个输入全设置为相同的图片。
  288. #print('#'*10,'input_names:,', input_name,'idx:',idx, inputs[0].dtype,', inputs[0] device:',inputs[0].get_device())
  289. t3=time.time()
  290. context.execute_v2(bindings) # 执行推理
  291. t4=time.time()
  292. if len(outputs) == 1:
  293. outputs = outputs[0]
  294. outstr='create Context:%.2f alloc memory:%.2f prepare input:%.2f conext infer:%.2f, total:%.2f'%((t1-t0 )*1000 , (t2-t1)*1000,(t3-t2)*1000,(t4-t3)*1000, (t4-t0)*1000 )
  295. return outputs[0],outstr
  296. def EngineInfer(par):
  297. modelSize=par['modelSize'];mean = par['mean'] ;std = par['std'] ;RGB_convert_first=par['RGB_convert_first'];device=par['device']
  298. weights=par['weights']; image_dir=par['image_dir']
  299. max_threads=par['max_threads']
  300. image_urls=glob.glob('%s/*'%(image_dir))
  301. out_dir =par['out_dir']
  302. os.makedirs(out_dir,exist_ok=True)
  303. #trt_model = SegModel_STDC_trt(weights=weights,modelsize=modelSize,std=std,mean=mean,device=device)
  304. logger = trt.Logger(trt.Logger.ERROR)
  305. with open(weights, "rb") as f, trt.Runtime(logger) as runtime:
  306. engine=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象
  307. print('#####load TRT file:',weights,'success #####')
  308. pars_thread=[]
  309. pars_threads=[]
  310. for im,image_url in enumerate(image_urls[0:]):
  311. image_array0 = cv2.imread(image_url)
  312. pars_thread=[engine,image_array0,out_dir,image_url,im]
  313. pars_threads.append(pars_thread)
  314. #EngineInfer_onePic_thread(pars_thread)
  315. t1=time.time()
  316. if max_threads==1:
  317. for i in range(len(pars_threads[0:])):
  318. EngineInfer_onePic_thread(pars_threads[i])
  319. else:
  320. with ThreadPoolExecutor(max_workers=max_threads) as t:
  321. for result in t.map(EngineInfer_onePic_thread, pars_threads):
  322. tt=result
  323. t2=time.time()
  324. print('All %d images time:%.1f ms, each:%.1f ms , with %d threads'%(len(image_urls),(t2-t1)*1000, (t2-t1)*1000.0/len(image_urls), max_threads) )
  325. if __name__=='__main__':
  326. parser = argparse.ArgumentParser()
  327. parser.add_argument('--weights', type=str, default='stdc_360X640.pth', help='model path(s)')
  328. opt = parser.parse_args()
  329. print( opt.weights )
  330. #pthFile = Path('../../../yolov5TRT/weights/river/stdc_360X640.pth')
  331. pthFile = Path(opt.weights)
  332. onnxFile = pthFile.with_suffix('.onnx')
  333. trtFile = onnxFile.with_suffix('.engine')
  334. nclass = 2; device=torch.device('cuda:0');
  335. '''###BiSeNet
  336. weights = '../weights/BiSeNet/checkpoint.pth';;inputShape =(1, 3, 512,512)
  337. segmodel = SegModel_BiSeNet(nclass=nclass,weights=weights)
  338. seg_model=segmodel.model
  339. '''
  340. ##STDC net
  341. weights = pthFile
  342. segmodel = SegModel_STDC(nclass=nclass,weights=weights);inputShape =(1, 3, 360,640)#(bs,channels,height,width)
  343. seg_model=segmodel.model
  344. par={'modelSize':(inputShape[3],inputShape[2]),'mean':(0.485, 0.456, 0.406),'std':(0.229, 0.224, 0.225),'RGB_convert_first':True,
  345. 'weights':trtFile,'device':device,'max_threads':1,
  346. 'image_dir':'../../river_demo/images/road','out_dir' :'results'}
  347. #infer_usage()
  348. toONNX(seg_model,onnxFile,inputShape=inputShape,device=device)
  349. ONNXtoTrt(onnxFile,trtFile)
  350. #EngineInfer(par)
  351. #ONNX_eval()