No puede seleccionar más de 25 temas Los temas deben comenzar con una letra o número, pueden incluir guiones ('-') y pueden tener hasta 35 caracteres de largo.

242 líneas
10KB

  1. import argparse
  2. import os
  3. import sys
  4. from pathlib import Path
  5. import cv2
  6. import torch
  7. import torch.backends.cudnn as cudnn
  8. import torch.nn as nn
  9. from collections import OrderedDict, namedtuple
  10. import numpy as np
  11. import time
  12. import tensorrt as trt
  13. #import pycuda.driver as cuda
  14. def trt_version():
  15. return trt.__version__
  16. def torch_device_from_trt(device):
  17. if device == trt.TensorLocation.DEVICE:
  18. return torch.device("cuda")
  19. elif device == trt.TensorLocation.HOST:
  20. return torch.device("cpu")
  21. else:
  22. return TypeError("%s is not supported by torch" % device)
  23. def torch_dtype_from_trt(dtype):
  24. if dtype == trt.int8:
  25. return torch.int8
  26. elif trt_version() >= '7.0' and dtype == trt.bool:
  27. return torch.bool
  28. elif dtype == trt.int32:
  29. return torch.int32
  30. elif dtype == trt.float16:
  31. return torch.float16
  32. elif dtype == trt.float32:
  33. return torch.float32
  34. else:
  35. raise TypeError("%s is not supported by torch" % dtype)
  36. class TRTModule(torch.nn.Module):
  37. def __init__(self, engine=None, input_names=None, output_names=None):
  38. super(TRTModule, self).__init__()
  39. self.engine = engine
  40. #if self.engine is not None:
  41. #engine创建执行context
  42. # self.context = self.engine.create_execution_context()
  43. self.input_names = input_names
  44. self.output_names = output_names
  45. def forward(self, *inputs):
  46. with self.engine.create_execution_context() as context:
  47. batch_size = inputs[0].shape[0]
  48. bindings = [None] * (len(self.input_names) + len(self.output_names))
  49. # 创建输出tensor,并分配内存
  50. outputs = [None] * len(self.output_names)
  51. for i, output_name in enumerate(self.output_names):
  52. idx = self.engine.get_binding_index(output_name)#通过binding_name找到对应的input_id
  53. dtype = torch_dtype_from_trt(self.engine.get_binding_dtype(idx))#找到对应的数据类型
  54. shape = (batch_size,) + tuple(self.engine.get_binding_shape(idx))#找到对应的形状大小
  55. device = torch_device_from_trt(self.engine.get_location(idx))
  56. output = torch.empty(size=shape, dtype=dtype, device=device)
  57. outputs[i] = output
  58. print('###line65:',output_name,i,idx,dtype,shape)
  59. bindings[idx] = output.data_ptr()#绑定输出数据指针
  60. for i, input_name in enumerate(self.input_names):
  61. idx = self.engine.get_binding_index(input_name)
  62. bindings[idx] = inputs[0].contiguous().data_ptr()#应当为inputs[i],对应3个输入。但由于我们使用的是单张图片,所以将3个输入全设置为相同的图片。
  63. #self.context.execute_async( batch_size, bindings, torch.cuda.current_stream().cuda_stream)# 执行推理 ,
  64. #self.context.execute_async_v2(bindings=bindings, stream_handle=torch.cuda.current_stream().cuda_stream) # 执行推理
  65. context.execute_v2(bindings) # 执行推理
  66. if len(outputs) == 1:
  67. outputs = outputs[0]
  68. return outputs[0]
  69. def get_ms(t1,t0):
  70. return (t1-t0)*1000.0
  71. def segPreProcess_image(image,modelSize=(640,360),mean=(0.335, 0.358, 0.332),std = (0.141, 0.138, 0.143) ,numpy=False, RGB_convert_first=False ):
  72. time0 = time.time()
  73. if RGB_convert_first:
  74. image = cv2.cvtColor( image,cv2.COLOR_RGB2BGR)
  75. image = cv2.resize(image,modelSize)
  76. time0 = time.time()
  77. image = image.astype(np.float32)
  78. image /= 255.0
  79. image[:,:,0] -=mean[0]
  80. image[:,:,1] -=mean[1]
  81. image[:,:,2] -=mean[2]
  82. image[:,:,0] /= std[0]
  83. image[:,:,1] /= std[1]
  84. image[:,:,2] /= std[2]
  85. if not RGB_convert_first:
  86. image = cv2.cvtColor( image,cv2.COLOR_RGB2BGR)
  87. image = np.transpose(image, ( 2, 0, 1))
  88. if numpy:
  89. return image
  90. else:
  91. image = torch.from_numpy(image).float()
  92. image = image.unsqueeze(0)
  93. return image
  94. def yolov5Trtforward(model,im):
  95. namess=[ model.get_binding_name(index) for index in range(model.num_bindings) ]
  96. input_names = [namess[0]];output_names=namess[1:]
  97. with model.create_execution_context() as context:
  98. batch_size = im.shape[0]
  99. bindings = [None] * (len(input_names) + len(output_names))
  100. # 创建输出tensor,并分配内存
  101. outputs = [None] * len(output_names)
  102. for i, output_name in enumerate(output_names):
  103. idx = model.get_binding_index(output_name)#通过binding_name找到对应的input_id
  104. dtype = torch_dtype_from_trt(model.get_binding_dtype(idx))#找到对应的数据类型
  105. shape = tuple(model.get_binding_shape(idx))#找到对应的形状大小
  106. device = torch_device_from_trt(model.get_location(idx))
  107. output = torch.empty(size=shape, dtype=dtype, device=device)
  108. outputs[i] = output
  109. #print('###line144:',idx,dtype,shape,output.size())
  110. bindings[idx] = output.data_ptr()#绑定输出数据指针
  111. for i, input_name in enumerate(input_names):
  112. idx = model.get_binding_index(input_name)
  113. bindings[idx] = im.contiguous().data_ptr()
  114. context.execute_v2(bindings)
  115. return outputs[3]
  116. def segTrtForward(engine,inputs,contextFlag=False):
  117. if not contextFlag: context = engine.create_execution_context()
  118. else: context=contextFlag
  119. #with engine.create_execution_context() as context:
  120. input_names=['images'];output_names=['output']
  121. batch_size = inputs[0].shape[0]
  122. bindings = [None] * (len(input_names) + len(output_names))
  123. # 创建输出tensor,并分配内存
  124. outputs = [None] * len(output_names)
  125. for i, output_name in enumerate(output_names):
  126. idx = engine.get_binding_index(output_name)#通过binding_name找到对应的input_id
  127. dtype = torch_dtype_from_trt(engine.get_binding_dtype(idx))#找到对应的数据类型
  128. shape = (batch_size,) + tuple(engine.get_binding_shape(idx))#找到对应的形状大小
  129. device = torch_device_from_trt(engine.get_location(idx))
  130. output = torch.empty(size=shape, dtype=dtype, device=device)
  131. #print('&'*10,'device:',device,'idx:',idx,'shape:',shape,'dtype:',dtype,' device:',output.get_device())
  132. outputs[i] = output
  133. #print('###line65:',output_name,i,idx,dtype,shape)
  134. bindings[idx] = output.data_ptr()#绑定输出数据指针
  135. for i, input_name in enumerate(input_names):
  136. idx =engine.get_binding_index(input_name)
  137. bindings[idx] = inputs[0].contiguous().data_ptr()#应当为inputs[i],对应3个输入。但由于我们使用的是单张图片,所以将3个输入全设置为相同的图片。
  138. #print('#'*10,'input_names:,', input_name,'idx:',idx, inputs[0].dtype,', inputs[0] device:',inputs[0].get_device())
  139. context.execute_v2(bindings) # 执行推理
  140. if len(outputs) == 1:
  141. outputs = outputs[0]
  142. return outputs[0]
  143. def OcrTrtForward(engine,inputs,contextFlag=False):
  144. t0=time.time()
  145. #with engine.create_execution_context() as context:
  146. if not contextFlag: context = engine.create_execution_context()
  147. else: context=contextFlag
  148. input_names=['images'];output_names=['output']
  149. batch_size = inputs[0].shape[0]
  150. bindings = [None] * (len(input_names) + len(output_names))
  151. t1=time.time()
  152. # 创建输出tensor,并分配内存
  153. outputs = [None] * len(output_names)
  154. for i, output_name in enumerate(output_names):
  155. idx = engine.get_binding_index(output_name)#通过binding_name找到对应的input_id
  156. dtype = torch_dtype_from_trt(engine.get_binding_dtype(idx))#找到对应的数据类型
  157. shape = (batch_size,) + tuple(engine.get_binding_shape(idx))#找到对应的形状大小
  158. device = torch_device_from_trt(engine.get_location(idx))
  159. output = torch.empty(size=shape, dtype=dtype, device=device)
  160. #print('&'*10,'device:',device,'idx:',idx,'shape:',shape,'dtype:',dtype,' device:',output.get_device())
  161. outputs[i] = output
  162. #print('###line65:',output_name,i,idx,dtype,shape)
  163. bindings[idx] = output.data_ptr()#绑定输出数据指针
  164. t2=time.time()
  165. for i, input_name in enumerate(input_names):
  166. idx =engine.get_binding_index(input_name)
  167. bindings[idx] = inputs[0].contiguous().data_ptr()#应当为inputs[i],对应3个输入。但由于我们使用的是单张图片,所以将3个输入全设置为相同的图片。
  168. #print('#'*10,'input_names:,', input_name,'idx:',idx, inputs[0].dtype,', inputs[0] device:',inputs[0].get_device())
  169. t3=time.time()
  170. context.execute_v2(bindings) # 执行推理
  171. t4=time.time()
  172. if len(outputs) == 1:
  173. outputs = outputs[0]
  174. outstr='create Context:%.2f alloc memory:%.2f prepare input:%.2f conext infer:%.2f, total:%.2f'%((t1-t0 )*1000 , (t2-t1)*1000,(t3-t2)*1000,(t4-t3)*1000, (t4-t0)*1000 )
  175. return outputs[0],outstr
  176. def segtrtEval(engine,image_array0,par={'modelSize':(640,360),'nclass':2,'predResize':True,'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True}):
  177. time0_0=time.time()
  178. H,W,C=image_array0.shape
  179. img_input = segPreProcess_image(image_array0,modelSize=par['modelSize'],mean=par['mean'],std =par['std'],numpy=par['numpy'], RGB_convert_first=par['RGB_convert_first'] )
  180. img_input = img_input.to('cuda:0')
  181. time1_0=time.time()
  182. pred=segTrtForward(engine,[img_input])
  183. time2_0=time.time()
  184. pred=torch.argmax(pred,dim=1).cpu().numpy()[0]
  185. time3_0 = time.time()
  186. if 'predResize' in par.keys():
  187. if par['predResize']:
  188. pred = cv2.resize(pred.astype(np.uint8),(W,H))
  189. else:
  190. pred = cv2.resize(pred.astype(np.uint8),(W,H))
  191. time4_0 = time.time()
  192. segInfoStr= 'pre-precess:%.1f ,infer:%.1f ,post-cpu-argmax:%.1f ,post-resize:%.1f, total:%.1f \n '%( get_ms(time1_0,time0_0),get_ms(time2_0,time1_0),get_ms(time3_0,time2_0),get_ms(time4_0,time3_0),get_ms(time4_0,time0_0) )
  193. return pred,segInfoStr