TensorRT转化代码
Du kannst nicht mehr als 25 Themen auswählen Themen müssen entweder mit einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

562 Zeilen
22KB

  1. """
  2. An example that uses TensorRT's Python api to make inferences.
  3. """
  4. import ctypes
  5. import os
  6. import shutil
  7. import random
  8. import sys
  9. import threading
  10. import time
  11. import cv2
  12. import numpy as np
  13. import pycuda.autoinit
  14. import pycuda.driver as cuda
  15. import tensorrt as trt
  16. CONF_THRESH = 0.5
  17. IOU_THRESHOLD = 0.4
  18. def get_img_path_batches(batch_size, img_dir):
  19. ret = []
  20. batch = []
  21. for root, dirs, files in os.walk(img_dir):
  22. for name in files:
  23. if len(batch) == batch_size:
  24. ret.append(batch)
  25. batch = []
  26. batch.append(os.path.join(root, name))
  27. if len(batch) > 0:
  28. ret.append(batch)
  29. return ret
  30. def plot_one_box(x, img, color=None, label=None, line_thickness=None):
  31. """
  32. description: Plots one bounding box on image img,
  33. this function comes from YoLov5 project.
  34. param:
  35. x: a box likes [x1,y1,x2,y2]
  36. img: a opencv image object
  37. color: color to draw rectangle, such as (0,255,0)
  38. label: str
  39. line_thickness: int
  40. return:
  41. no return
  42. """
  43. tl = (
  44. line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
  45. ) # line/font thickness
  46. color = color or [random.randint(0, 255) for _ in range(3)]
  47. c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
  48. cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
  49. if label:
  50. tf = max(tl - 1, 1) # font thickness
  51. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  52. c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
  53. cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
  54. cv2.putText(
  55. img,
  56. label,
  57. (c1[0], c1[1] - 2),
  58. 0,
  59. tl / 3,
  60. [225, 255, 255],
  61. thickness=tf,
  62. lineType=cv2.LINE_AA,
  63. )
  64. class YoLov5TRT(object):
  65. """
  66. description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops.
  67. """
  68. def __init__(self, engine_file_path):
  69. # Create a Context on this device,
  70. self.ctx = cuda.Device(0).make_context()
  71. stream = cuda.Stream()
  72. TRT_LOGGER = trt.Logger(trt.Logger.INFO)
  73. runtime = trt.Runtime(TRT_LOGGER)
  74. # Deserialize the engine from file
  75. with open(engine_file_path, "rb") as f:
  76. engine = runtime.deserialize_cuda_engine(f.read())
  77. context = engine.create_execution_context()
  78. host_inputs = []
  79. cuda_inputs = []
  80. host_outputs = []
  81. cuda_outputs = []
  82. bindings = []
  83. for binding in engine:
  84. print('bingding:', binding, engine.get_binding_shape(binding))
  85. size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
  86. dtype = trt.nptype(engine.get_binding_dtype(binding))
  87. # Allocate host and device buffers
  88. host_mem = cuda.pagelocked_empty(size, dtype)
  89. cuda_mem = cuda.mem_alloc(host_mem.nbytes)
  90. # Append the device buffer to device bindings.
  91. bindings.append(int(cuda_mem))
  92. # Append to the appropriate list.
  93. if engine.binding_is_input(binding):
  94. self.input_w = engine.get_binding_shape(binding)[-1]
  95. self.input_h = engine.get_binding_shape(binding)[-2]
  96. host_inputs.append(host_mem)
  97. cuda_inputs.append(cuda_mem)
  98. else:
  99. host_outputs.append(host_mem)
  100. cuda_outputs.append(cuda_mem)
  101. # Store
  102. self.stream = stream
  103. self.context = context
  104. self.engine = engine
  105. self.host_inputs = host_inputs
  106. self.cuda_inputs = cuda_inputs
  107. self.host_outputs = host_outputs
  108. self.cuda_outputs = cuda_outputs
  109. self.bindings = bindings
  110. self.batch_size = engine.max_batch_size
  111. # Data length
  112. self.det_output_length = host_outputs[0].shape[0]
  113. self.mask_output_length = host_outputs[1].shape[0]
  114. self.seg_w = int(self.input_w / 4)
  115. self.seg_h = int(self.input_h / 4)
  116. self.seg_c = int(self.mask_output_length / (self.seg_w * self.seg_w))
  117. self.det_row_output_length = self.seg_c + 6
  118. # Draw mask
  119. self.colors_obj = Colors()
  120. def infer(self, raw_image_generator):
  121. threading.Thread.__init__(self)
  122. # Make self the active context, pushing it on top of the context stack.
  123. self.ctx.push()
  124. # Restore
  125. stream = self.stream
  126. context = self.context
  127. engine = self.engine
  128. host_inputs = self.host_inputs
  129. cuda_inputs = self.cuda_inputs
  130. host_outputs = self.host_outputs
  131. cuda_outputs = self.cuda_outputs
  132. bindings = self.bindings
  133. # Do image preprocess
  134. batch_image_raw = []
  135. batch_origin_h = []
  136. batch_origin_w = []
  137. batch_input_image = np.empty(shape=[self.batch_size, 3, self.input_h, self.input_w])
  138. for i, image_raw in enumerate(raw_image_generator):
  139. input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_raw)
  140. batch_image_raw.append(image_raw)
  141. batch_origin_h.append(origin_h)
  142. batch_origin_w.append(origin_w)
  143. np.copyto(batch_input_image[i], input_image)
  144. batch_input_image = np.ascontiguousarray(batch_input_image)
  145. # Copy input image to host buffer
  146. np.copyto(host_inputs[0], batch_input_image.ravel())
  147. start = time.time()
  148. # Transfer input data to the GPU.
  149. cuda.memcpy_htod_async(cuda_inputs[0], host_inputs[0], stream)
  150. # Run inference.
  151. context.execute_async(batch_size=self.batch_size, bindings=bindings, stream_handle=stream.handle)
  152. # Transfer predictions back from the GPU.
  153. cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0], stream)
  154. cuda.memcpy_dtoh_async(host_outputs[1], cuda_outputs[1], stream)
  155. # Synchronize the stream
  156. stream.synchronize()
  157. end = time.time()
  158. # Remove any context from the top of the context stack, deactivating it.
  159. self.ctx.pop()
  160. # Here we use the first row of output in that batch_size = 1
  161. output_bbox = host_outputs[0]
  162. output_proto_mask = host_outputs[1]
  163. # Do postprocess
  164. for i in range(self.batch_size):
  165. result_boxes, result_scores, result_classid, result_proto_coef = self.post_process(
  166. output_bbox[i * self.det_output_length: (i + 1) * self.det_output_length], batch_origin_h[i], batch_origin_w[i]
  167. )
  168. if result_proto_coef.shape[0] == 0:
  169. continue
  170. result_masks = self.process_mask(output_proto_mask, result_proto_coef, result_boxes, batch_origin_h[i], batch_origin_w[i])
  171. # Draw masks on the original image
  172. self.draw_mask(result_masks, colors_=[self.colors_obj(x, True) for x in result_classid],im_src=batch_image_raw[i])
  173. # Draw rectangles and labels on the original image
  174. for j in range(len(result_boxes)):
  175. box = result_boxes[j]
  176. plot_one_box(
  177. box,
  178. batch_image_raw[i],
  179. label="{}:{:.2f}".format(
  180. categories[int(result_classid[j])], result_scores[j]
  181. ),
  182. )
  183. return batch_image_raw, end - start
  184. def destroy(self):
  185. # Remove any context from the top of the context stack, deactivating it.
  186. self.ctx.pop()
  187. def get_raw_image(self, image_path_batch):
  188. """
  189. description: Read an image from image path
  190. """
  191. for img_path in image_path_batch:
  192. yield cv2.imread(img_path)
  193. def get_raw_image_zeros(self, image_path_batch=None):
  194. """
  195. description: Ready data for warmup
  196. """
  197. for _ in range(self.batch_size):
  198. yield np.zeros([self.input_h, self.input_w, 3], dtype=np.uint8)
  199. def preprocess_image(self, raw_bgr_image):
  200. """
  201. description: Convert BGR image to RGB,
  202. resize and pad it to target size, normalize to [0,1],
  203. transform to NCHW format.
  204. param:
  205. input_image_path: str, image path
  206. return:
  207. image: the processed image
  208. image_raw: the original image
  209. h: original height
  210. w: original width
  211. """
  212. image_raw = raw_bgr_image
  213. h, w, c = image_raw.shape
  214. image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB)
  215. # Calculate widht and height and paddings
  216. r_w = self.input_w / w
  217. r_h = self.input_h / h
  218. if r_h > r_w:
  219. tw = self.input_w
  220. th = int(r_w * h)
  221. tx1 = tx2 = 0
  222. ty1 = int((self.input_h - th) / 2)
  223. ty2 = self.input_h - th - ty1
  224. else:
  225. tw = int(r_h * w)
  226. th = self.input_h
  227. tx1 = int((self.input_w - tw) / 2)
  228. tx2 = self.input_w - tw - tx1
  229. ty1 = ty2 = 0
  230. # Resize the image with long side while maintaining ratio
  231. image = cv2.resize(image, (tw, th))
  232. # Pad the short side with (128,128,128)
  233. image = cv2.copyMakeBorder(
  234. image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, None, (128, 128, 128)
  235. )
  236. image = image.astype(np.float32)
  237. # Normalize to [0,1]
  238. image /= 255.0
  239. # HWC to CHW format:
  240. image = np.transpose(image, [2, 0, 1])
  241. # CHW to NCHW format
  242. image = np.expand_dims(image, axis=0)
  243. # Convert the image to row-major order, also known as "C order":
  244. image = np.ascontiguousarray(image)
  245. return image, image_raw, h, w
  246. def xywh2xyxy(self, origin_h, origin_w, x):
  247. """
  248. description: Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  249. param:
  250. origin_h: height of original image
  251. origin_w: width of original image
  252. x: A boxes numpy, each row is a box [center_x, center_y, w, h]
  253. return:
  254. y: A boxes numpy, each row is a box [x1, y1, x2, y2]
  255. """
  256. y = np.zeros_like(x)
  257. r_w = self.input_w / origin_w
  258. r_h = self.input_h / origin_h
  259. if r_h > r_w:
  260. y[:, 0] = x[:, 0] - x[:, 2] / 2
  261. y[:, 2] = x[:, 0] + x[:, 2] / 2
  262. y[:, 1] = x[:, 1] - x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2
  263. y[:, 3] = x[:, 1] + x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2
  264. y /= r_w
  265. else:
  266. y[:, 0] = x[:, 0] - x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2
  267. y[:, 2] = x[:, 0] + x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2
  268. y[:, 1] = x[:, 1] - x[:, 3] / 2
  269. y[:, 3] = x[:, 1] + x[:, 3] / 2
  270. y /= r_h
  271. return y
  272. def post_process(self, output_boxes, origin_h, origin_w):
  273. """
  274. description: postprocess the prediction
  275. param:
  276. output: A numpy likes [num_boxes, cx, cy, w, h, conf, cls_id, mask[32], cx, cy, w, h, conf, cls_id, mask[32] ...]
  277. origin_h: height of original image
  278. origin_w: width of original image
  279. return:
  280. result_boxes: finally boxes, a boxes numpy, each row is a box [x1, y1, x2, y2]
  281. result_scores: finally scores, a numpy, each element is the score correspoing to box
  282. result_classid: finally classid, a numpy, each element is the classid correspoing to box
  283. """
  284. # Get the num of boxes detected
  285. num = int(output_boxes[0])
  286. # Reshape to a two dimentional ndarray
  287. pred = np.reshape(output_boxes[1:], (-1, self.det_row_output_length))[:num, :]
  288. # Do nms
  289. boxes = self.non_max_suppression(pred, origin_h, origin_w, conf_thres=CONF_THRESH,
  290. nms_thres=IOU_THRESHOLD)
  291. result_boxes = boxes[:, :4] if len(boxes) else np.array([])
  292. result_scores = boxes[:, 4] if len(boxes) else np.array([])
  293. result_classid = boxes[:, 5] if len(boxes) else np.array([])
  294. result_proto_coef = boxes[:, 6:] if len(boxes) else np.array([])
  295. return result_boxes, result_scores, result_classid, result_proto_coef
  296. def bbox_iou(self, box1, box2, x1y1x2y2=True):
  297. """
  298. description: compute the IoU of two bounding boxes
  299. param:
  300. box1: A box coordinate (can be (x1, y1, x2, y2) or (x, y, w, h))
  301. box2: A box coordinate (can be (x1, y1, x2, y2) or (x, y, w, h))
  302. x1y1x2y2: select the coordinate format
  303. return:
  304. iou: computed iou
  305. """
  306. if not x1y1x2y2:
  307. # Transform from center and width to exact coordinates
  308. b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
  309. b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
  310. b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
  311. b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
  312. else:
  313. # Get the coordinates of bounding boxes
  314. b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
  315. b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
  316. # Get the coordinates of the intersection rectangle
  317. inter_rect_x1 = np.maximum(b1_x1, b2_x1)
  318. inter_rect_y1 = np.maximum(b1_y1, b2_y1)
  319. inter_rect_x2 = np.minimum(b1_x2, b2_x2)
  320. inter_rect_y2 = np.minimum(b1_y2, b2_y2)
  321. # Intersection area
  322. inter_area = np.clip(inter_rect_x2 - inter_rect_x1 + 1, 0, None) * \
  323. np.clip(inter_rect_y2 - inter_rect_y1 + 1, 0, None)
  324. # Union Area
  325. b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
  326. b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
  327. iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
  328. return iou
  329. def non_max_suppression(self, prediction, origin_h, origin_w, conf_thres=0.5, nms_thres=0.4):
  330. """
  331. description: Removes detections with lower object confidence score than 'conf_thres' and performs
  332. Non-Maximum Suppression to further filter detections.
  333. param:
  334. prediction: detections, (x1, y1, x2, y2, conf, cls_id, mask coefficients[32])
  335. origin_h: original image height
  336. origin_w: original image width
  337. conf_thres: a confidence threshold to filter detections
  338. nms_thres: a iou threshold to filter detections
  339. return:
  340. boxes: output after nms with the shape (x1, y1, x2, y2, conf, cls_id)
  341. """
  342. # Get the boxes that score > CONF_THRESH
  343. boxes = prediction[prediction[:, 4] >= conf_thres]
  344. # Trandform bbox from [center_x, center_y, w, h] to [x1, y1, x2, y2]
  345. boxes[:, :4] = self.xywh2xyxy(origin_h, origin_w, boxes[:, :4])
  346. # clip the coordinates
  347. boxes[:, 0] = np.clip(boxes[:, 0], 0, origin_w - 1)
  348. boxes[:, 2] = np.clip(boxes[:, 2], 0, origin_w - 1)
  349. boxes[:, 1] = np.clip(boxes[:, 1], 0, origin_h - 1)
  350. boxes[:, 3] = np.clip(boxes[:, 3], 0, origin_h - 1)
  351. # Object confidence
  352. confs = boxes[:, 4]
  353. # Sort by the confs
  354. boxes = boxes[np.argsort(-confs)]
  355. # Perform non-maximum suppression
  356. keep_boxes = []
  357. while boxes.shape[0]:
  358. large_overlap = self.bbox_iou(np.expand_dims(boxes[0, :4], 0), boxes[:, :4]) > nms_thres
  359. label_match = boxes[0, 5] == boxes[:, 5]
  360. # Indices of boxes with lower confidence scores, large IOUs and matching labels
  361. invalid = large_overlap & label_match
  362. keep_boxes += [boxes[0]]
  363. boxes = boxes[~invalid]
  364. boxes = np.stack(keep_boxes, 0) if len(keep_boxes) else np.array([])
  365. return boxes
  366. def sigmoid(self, x):
  367. return 1 / (1 + np.exp(-x))
  368. def scale_mask(self, mask, ih, iw):
  369. mask = cv2.resize(mask, (self.input_w, self.input_h))
  370. r_w = self.input_w / (iw * 1.0)
  371. r_h = self.input_h / (ih * 1.0)
  372. if r_h > r_w:
  373. w = self.input_w
  374. h = int(r_w * ih)
  375. x = 0
  376. y = int((self.input_h - h) / 2)
  377. else:
  378. w = int(r_h * iw)
  379. h = self.input_h
  380. x = int((self.input_w - w) / 2)
  381. y = 0
  382. crop = mask[y:y+h, x:x+w]
  383. crop = cv2.resize(crop, (iw, ih))
  384. return crop
  385. def process_mask(self, output_proto_mask, result_proto_coef, result_boxes, ih, iw):
  386. """
  387. description: Mask pred by yolov5 instance segmentation ,
  388. param:
  389. output_proto_mask: prototype mask e.g. (32, 160, 160) for 640x640 input
  390. result_proto_coef: prototype mask coefficients (n, 32), n represents n results
  391. result_boxes :
  392. ih: rows of original image
  393. iw: cols of original image
  394. return:
  395. mask_result: (n, ih, iw)
  396. """
  397. result_proto_masks = output_proto_mask.reshape(self.seg_c, self.seg_h, self.seg_w)
  398. c, mh, mw = result_proto_masks.shape
  399. masks = self.sigmoid((result_proto_coef @ result_proto_masks.astype(np.float32).reshape(c, -1))).reshape(-1, mh, mw)
  400. mask_result = []
  401. for mask, box in zip(masks, result_boxes):
  402. mask_s = np.zeros((ih, iw))
  403. crop_mask = self.scale_mask(mask, ih, iw)
  404. x1 = int(box[0])
  405. y1 = int(box[1])
  406. x2 = int(box[2])
  407. y2 = int(box[3])
  408. crop = crop_mask[y1:y2, x1:x2]
  409. crop = np.where(crop >= 0.5, 1, 0)
  410. crop = crop.astype(np.uint8)
  411. mask_s[y1:y2, x1:x2] = crop
  412. mask_result.append(mask_s)
  413. mask_result = np.array(mask_result)
  414. return mask_result
  415. def draw_mask(self, masks, colors_, im_src, alpha=0.5):
  416. """
  417. description: Draw mask on image ,
  418. param:
  419. masks : result_mask
  420. colors_: color to draw mask
  421. im_src : original image
  422. alpha : scale between original image and mask
  423. return:
  424. no return
  425. """
  426. if len(masks) == 0:
  427. return
  428. masks = np.asarray(masks, dtype=np.uint8)
  429. masks = np.ascontiguousarray(masks.transpose(1, 2, 0))
  430. masks = np.asarray(masks, dtype=np.float32)
  431. colors_ = np.asarray(colors_, dtype=np.float32)
  432. s = masks.sum(2, keepdims=True).clip(0, 1)
  433. masks = (masks @ colors_).clip(0, 255)
  434. im_src[:] = masks * alpha + im_src * (1 - s * alpha)
  435. class inferThread(threading.Thread):
  436. def __init__(self, yolov5_wrapper, image_path_batch):
  437. threading.Thread.__init__(self)
  438. self.yolov5_wrapper = yolov5_wrapper
  439. self.image_path_batch = image_path_batch
  440. def run(self):
  441. batch_image_raw, use_time = self.yolov5_wrapper.infer(self.yolov5_wrapper.get_raw_image(self.image_path_batch))
  442. for i, img_path in enumerate(self.image_path_batch):
  443. parent, filename = os.path.split(img_path)
  444. save_name = os.path.join('output', filename)
  445. # Save image
  446. cv2.imwrite(save_name, batch_image_raw[i])
  447. print('input->{}, time->{:.2f}ms, saving into output/'.format(self.image_path_batch, use_time * 1000))
  448. class warmUpThread(threading.Thread):
  449. def __init__(self, yolov5_wrapper):
  450. threading.Thread.__init__(self)
  451. self.yolov5_wrapper = yolov5_wrapper
  452. def run(self):
  453. batch_image_raw, use_time = self.yolov5_wrapper.infer(self.yolov5_wrapper.get_raw_image_zeros())
  454. print('warm_up->{}, time->{:.2f}ms'.format(batch_image_raw[0].shape, use_time * 1000))
  455. class Colors:
  456. def __init__(self):
  457. hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A',
  458. '92CC17', '3DDB86', '1A9334', '00D4BB', '2C99A8', '00C2FF',
  459. '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF',
  460. 'FF95C8', 'FF37C7')
  461. self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
  462. self.n = len(self.palette)
  463. def __call__(self, i, bgr=False):
  464. c = self.palette[int(i) % self.n]
  465. return (c[2], c[1], c[0]) if bgr else c
  466. @staticmethod
  467. def hex2rgb(h): # rgb order (PIL)
  468. return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
  469. if __name__ == "__main__":
  470. # load custom plugin and engine
  471. PLUGIN_LIBRARY = "build/libmyplugins.so"
  472. engine_file_path = "build/yolov5s-seg.engine"
  473. if len(sys.argv) > 1:
  474. engine_file_path = sys.argv[1]
  475. if len(sys.argv) > 2:
  476. PLUGIN_LIBRARY = sys.argv[2]
  477. ctypes.CDLL(PLUGIN_LIBRARY)
  478. # load coco labels
  479. categories = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
  480. "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
  481. "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
  482. "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
  483. "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
  484. "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
  485. "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
  486. "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
  487. "hair drier", "toothbrush"]
  488. if os.path.exists('output/'):
  489. shutil.rmtree('output/')
  490. os.makedirs('output/')
  491. # a YoLov5TRT instance
  492. yolov5_wrapper = YoLov5TRT(engine_file_path)
  493. try:
  494. print('batch size is', yolov5_wrapper.batch_size)
  495. image_dir = "images/"
  496. image_path_batches = get_img_path_batches(yolov5_wrapper.batch_size, image_dir)
  497. for i in range(10):
  498. # create a new thread to do warm_up
  499. thread1 = warmUpThread(yolov5_wrapper)
  500. thread1.start()
  501. thread1.join()
  502. for batch in image_path_batches:
  503. # create a new thread to do inference
  504. thread1 = inferThread(yolov5_wrapper, batch)
  505. thread1.start()
  506. thread1.join()
  507. finally:
  508. # destroy the instance
  509. yolov5_wrapper.destroy()