Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

165 linhas
7.2KB

  1. import argparse
  2. import torch.backends.cudnn as cudnn
  3. from utils import google_utils
  4. from utils.datasets import *
  5. from utils.utils import *
  6. def detect(save_img=False):
  7. out, source, weights, view_img, save_txt, imgsz = \
  8. opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
  9. webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
  10. # Initialize
  11. device = torch_utils.select_device(opt.device)
  12. if os.path.exists(out):
  13. shutil.rmtree(out) # delete output folder
  14. os.makedirs(out) # make new output folder
  15. half = device.type != 'cpu' # half precision only supported on CUDA
  16. # Load model
  17. google_utils.attempt_download(weights)
  18. model = torch.load(weights, map_location=device)['model'].float() # load to FP32
  19. # torch.save(torch.load(weights, map_location=device), weights) # update model if SourceChangeWarning
  20. # model.fuse()
  21. model.to(device).eval()
  22. imgsz = check_img_size(imgsz, s=model.model[-1].stride.max()) # check img_size
  23. if half:
  24. model.half() # to FP16
  25. # Second-stage classifier
  26. classify = False
  27. if classify:
  28. modelc = torch_utils.load_classifier(name='resnet101', n=2) # initialize
  29. modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
  30. modelc.to(device).eval()
  31. # Set Dataloader
  32. vid_path, vid_writer = None, None
  33. if webcam:
  34. view_img = True
  35. cudnn.benchmark = True # set True to speed up constant image size inference
  36. dataset = LoadStreams(source, img_size=imgsz)
  37. else:
  38. save_img = True
  39. dataset = LoadImages(source, img_size=imgsz)
  40. # Get names and colors
  41. names = model.module.names if hasattr(model, 'module') else model.names
  42. colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
  43. # Run inference
  44. t0 = time.time()
  45. img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
  46. _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
  47. for path, img, im0s, vid_cap in dataset:
  48. img = torch.from_numpy(img).to(device)
  49. img = img.half() if half else img.float() # uint8 to fp16/32
  50. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  51. if img.ndimension() == 3:
  52. img = img.unsqueeze(0)
  53. # Inference
  54. t1 = torch_utils.time_synchronized()
  55. pred = model(img, augment=opt.augment)[0]
  56. # Apply NMS
  57. pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
  58. t2 = torch_utils.time_synchronized()
  59. # Apply Classifier
  60. if classify:
  61. pred = apply_classifier(pred, modelc, img, im0s)
  62. # Process detections
  63. for i, det in enumerate(pred): # detections per image
  64. if webcam: # batch_size >= 1
  65. p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
  66. else:
  67. p, s, im0 = path, '', im0s
  68. save_path = str(Path(out) / Path(p).name)
  69. txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
  70. s += '%gx%g ' % img.shape[2:] # print string
  71. gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
  72. if det is not None and len(det):
  73. # Rescale boxes from img_size to im0 size
  74. det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
  75. # Print results
  76. for c in det[:, -1].unique():
  77. n = (det[:, -1] == c).sum() # detections per class
  78. s += '%g %ss, ' % (n, names[int(c)]) # add to string
  79. # Write results
  80. for *xyxy, conf, cls in det:
  81. if save_txt: # Write to file
  82. xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
  83. with open(txt_path + '.txt', 'a') as f:
  84. f.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format
  85. if save_img or view_img: # Add bbox to image
  86. label = '%s %.2f' % (names[int(cls)], conf)
  87. plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
  88. # Print time (inference + NMS)
  89. print('%sDone. (%.3fs)' % (s, t2 - t1))
  90. # Stream results
  91. if view_img:
  92. cv2.imshow(p, im0)
  93. if cv2.waitKey(1) == ord('q'): # q to quit
  94. raise StopIteration
  95. # Save results (image with detections)
  96. if save_img:
  97. if dataset.mode == 'images':
  98. cv2.imwrite(save_path, im0)
  99. else:
  100. if vid_path != save_path: # new video
  101. vid_path = save_path
  102. if isinstance(vid_writer, cv2.VideoWriter):
  103. vid_writer.release() # release previous video writer
  104. fps = vid_cap.get(cv2.CAP_PROP_FPS)
  105. w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  106. h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  107. vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*opt.fourcc), fps, (w, h))
  108. vid_writer.write(im0)
  109. if save_txt or save_img:
  110. print('Results saved to %s' % os.getcwd() + os.sep + out)
  111. if platform == 'darwin': # MacOS
  112. os.system('open ' + save_path)
  113. print('Done. (%.3fs)' % (time.time() - t0))
  114. if __name__ == '__main__':
  115. parser = argparse.ArgumentParser()
  116. parser.add_argument('--weights', type=str, default='weights/yolov5s.pt', help='model.pt path')
  117. parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
  118. parser.add_argument('--output', type=str, default='inference/output', help='output folder') # output folder
  119. parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
  120. parser.add_argument('--conf-thres', type=float, default=0.4, help='object confidence threshold')
  121. parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
  122. parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')
  123. parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
  124. parser.add_argument('--view-img', action='store_true', help='display results')
  125. parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
  126. parser.add_argument('--classes', nargs='+', type=int, help='filter by class')
  127. parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
  128. parser.add_argument('--augment', action='store_true', help='augmented inference')
  129. opt = parser.parse_args()
  130. print(opt)
  131. with torch.no_grad():
  132. detect()
  133. # # Update all models
  134. # for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov3-spp.pt']:
  135. # detect()
  136. # create_pretrained(opt.weights, opt.weights)