Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

307 lines
14KB

  1. import argparse
  2. import glob
  3. import os
  4. import shutil
  5. from pathlib import Path
  6. import numpy as np
  7. import torch
  8. import yaml
  9. from sotabencheval.object_detection import COCOEvaluator
  10. from sotabencheval.utils import is_server
  11. from tqdm import tqdm
  12. from models.experimental import attempt_load
  13. from utils.datasets import create_dataloader
  14. from utils.general import (
  15. coco80_to_coco91_class, check_dataset, check_file, check_img_size, compute_loss, non_max_suppression, scale_coords,
  16. xyxy2xywh, clip_coords, set_logging)
  17. from utils.torch_utils import select_device, time_synchronized
  18. DATA_ROOT = './.data/vision/coco' if is_server() else '../coco' # sotabench data dir
  19. def test(data,
  20. weights=None,
  21. batch_size=16,
  22. imgsz=640,
  23. conf_thres=0.001,
  24. iou_thres=0.6, # for NMS
  25. save_json=False,
  26. single_cls=False,
  27. augment=False,
  28. verbose=False,
  29. model=None,
  30. dataloader=None,
  31. save_dir='',
  32. merge=False,
  33. save_txt=False):
  34. # Initialize/load model and set device
  35. training = model is not None
  36. if training: # called by train.py
  37. device = next(model.parameters()).device # get model device
  38. else: # called directly
  39. set_logging()
  40. device = select_device(opt.device, batch_size=batch_size)
  41. merge, save_txt = opt.merge, opt.save_txt # use Merge NMS, save *.txt labels
  42. if save_txt:
  43. out = Path('inference/output')
  44. if os.path.exists(out):
  45. shutil.rmtree(out) # delete output folder
  46. os.makedirs(out) # make new output folder
  47. # Remove previous
  48. for f in glob.glob(str(Path(save_dir) / 'test_batch*.jpg')):
  49. os.remove(f)
  50. # Load model
  51. model = attempt_load(weights, map_location=device) # load FP32 model
  52. imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
  53. # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
  54. # if device.type != 'cpu' and torch.cuda.device_count() > 1:
  55. # model = nn.DataParallel(model)
  56. # Half
  57. half = device.type != 'cpu' # half precision only supported on CUDA
  58. if half:
  59. model.half()
  60. # Configure
  61. model.eval()
  62. with open(data) as f:
  63. data = yaml.load(f, Loader=yaml.FullLoader) # model dict
  64. check_dataset(data) # check
  65. nc = 1 if single_cls else int(data['nc']) # number of classes
  66. iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
  67. niou = iouv.numel()
  68. # Dataloader
  69. if not training:
  70. img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
  71. _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
  72. path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
  73. dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt,
  74. hyp=None, augment=False, cache=True, pad=0.5, rect=True)[0]
  75. seen = 0
  76. names = model.names if hasattr(model, 'names') else model.module.names
  77. coco91class = coco80_to_coco91_class()
  78. s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
  79. p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
  80. loss = torch.zeros(3, device=device)
  81. jdict, stats, ap, ap_class = [], [], [], []
  82. evaluator = COCOEvaluator(root=DATA_ROOT, model_name=opt.weights.replace('.pt', ''))
  83. for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
  84. img = img.to(device, non_blocking=True)
  85. img = img.half() if half else img.float() # uint8 to fp16/32
  86. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  87. targets = targets.to(device)
  88. nb, _, height, width = img.shape # batch size, channels, height, width
  89. whwh = torch.Tensor([width, height, width, height]).to(device)
  90. # Disable gradients
  91. with torch.no_grad():
  92. # Run model
  93. t = time_synchronized()
  94. inf_out, train_out = model(img, augment=augment) # inference and training outputs
  95. t0 += time_synchronized() - t
  96. # Compute loss
  97. if training: # if model has loss hyperparameters
  98. loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls
  99. # Run NMS
  100. t = time_synchronized()
  101. output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, merge=merge)
  102. t1 += time_synchronized() - t
  103. # Statistics per image
  104. for si, pred in enumerate(output):
  105. labels = targets[targets[:, 0] == si, 1:]
  106. nl = len(labels)
  107. tcls = labels[:, 0].tolist() if nl else [] # target class
  108. seen += 1
  109. if pred is None:
  110. if nl:
  111. stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
  112. continue
  113. # Append to text file
  114. if save_txt:
  115. gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
  116. x = pred.clone()
  117. x[:, :4] = scale_coords(img[si].shape[1:], x[:, :4], shapes[si][0], shapes[si][1]) # to original
  118. for *xyxy, conf, cls in x:
  119. xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
  120. with open(str(out / Path(paths[si]).stem) + '.txt', 'a') as f:
  121. f.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format
  122. # Clip boxes to image bounds
  123. clip_coords(pred, (height, width))
  124. # Append to pycocotools JSON dictionary
  125. if save_json:
  126. # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
  127. image_id = Path(paths[si]).stem
  128. box = pred[:, :4].clone() # xyxy
  129. scale_coords(img[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape
  130. box = xyxy2xywh(box) # xywh
  131. box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
  132. for p, b in zip(pred.tolist(), box.tolist()):
  133. result = {'image_id': int(image_id) if image_id.isnumeric() else image_id,
  134. 'category_id': coco91class[int(p[5])],
  135. 'bbox': [round(x, 3) for x in b],
  136. 'score': round(p[4], 5)}
  137. jdict.append(result)
  138. #evaluator.add([result])
  139. #if evaluator.cache_exists:
  140. # break
  141. # # Assign all predictions as incorrect
  142. # correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
  143. # if nl:
  144. # detected = [] # target indices
  145. # tcls_tensor = labels[:, 0]
  146. #
  147. # # target boxes
  148. # tbox = xywh2xyxy(labels[:, 1:5]) * whwh
  149. #
  150. # # Per target class
  151. # for cls in torch.unique(tcls_tensor):
  152. # ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
  153. # pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
  154. #
  155. # # Search for detections
  156. # if pi.shape[0]:
  157. # # Prediction to target ious
  158. # ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices
  159. #
  160. # # Append detections
  161. # detected_set = set()
  162. # for j in (ious > iouv[0]).nonzero(as_tuple=False):
  163. # d = ti[i[j]] # detected target
  164. # if d.item() not in detected_set:
  165. # detected_set.add(d.item())
  166. # detected.append(d)
  167. # correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
  168. # if len(detected) == nl: # all targets already located in image
  169. # break
  170. #
  171. # # Append statistics (correct, conf, pcls, tcls)
  172. # stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
  173. # # Plot images
  174. # if batch_i < 1:
  175. # f = Path(save_dir) / ('test_batch%g_gt.jpg' % batch_i) # filename
  176. # plot_images(img, targets, paths, str(f), names) # ground truth
  177. # f = Path(save_dir) / ('test_batch%g_pred.jpg' % batch_i)
  178. # plot_images(img, output_to_target(output, width, height), paths, str(f), names) # predictions
  179. evaluator.add(jdict)
  180. evaluator.save()
  181. # # Compute statistics
  182. # stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
  183. # if len(stats) and stats[0].any():
  184. # p, r, ap, f1, ap_class = ap_per_class(*stats)
  185. # p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
  186. # mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
  187. # nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
  188. # else:
  189. # nt = torch.zeros(1)
  190. #
  191. # # Print results
  192. # pf = '%20s' + '%12.3g' * 6 # print format
  193. # print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
  194. #
  195. # # Print results per class
  196. # if verbose and nc > 1 and len(stats):
  197. # for i, c in enumerate(ap_class):
  198. # print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
  199. #
  200. # # Print speeds
  201. # t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
  202. # if not training:
  203. # print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
  204. #
  205. # # Save JSON
  206. # if save_json and len(jdict):
  207. # f = 'detections_val2017_%s_results.json' % \
  208. # (weights.split(os.sep)[-1].replace('.pt', '') if isinstance(weights, str) else '') # filename
  209. # print('\nCOCO mAP with pycocotools... saving %s...' % f)
  210. # with open(f, 'w') as file:
  211. # json.dump(jdict, file)
  212. #
  213. # try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
  214. # from pycocotools.coco import COCO
  215. # from pycocotools.cocoeval import COCOeval
  216. #
  217. # imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files]
  218. # cocoGt = COCO(glob.glob('../coco/annotations/instances_val*.json')[0]) # initialize COCO ground truth api
  219. # cocoDt = cocoGt.loadRes(f) # initialize COCO pred api
  220. # cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
  221. # cocoEval.params.imgIds = imgIds # image IDs to evaluate
  222. # cocoEval.evaluate()
  223. # cocoEval.accumulate()
  224. # cocoEval.summarize()
  225. # map, map50 = cocoEval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
  226. # except Exception as e:
  227. # print('ERROR: pycocotools unable to run: %s' % e)
  228. #
  229. # # Return results
  230. # model.float() # for training
  231. # maps = np.zeros(nc) + map
  232. # for i, c in enumerate(ap_class):
  233. # maps[c] = ap[i]
  234. # return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
  235. if __name__ == '__main__':
  236. parser = argparse.ArgumentParser(prog='test.py')
  237. parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
  238. parser.add_argument('--data', type=str, default='data/coco.yaml', help='*.data path')
  239. parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
  240. parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
  241. parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
  242. parser.add_argument('--iou-thres', type=float, default=0.65, help='IOU threshold for NMS')
  243. parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
  244. parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
  245. parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
  246. parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
  247. parser.add_argument('--augment', action='store_true', help='augmented inference')
  248. parser.add_argument('--merge', action='store_true', help='use Merge NMS')
  249. parser.add_argument('--verbose', action='store_true', help='report mAP by class')
  250. parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
  251. opt = parser.parse_args()
  252. opt.save_json |= opt.data.endswith('coco.yaml')
  253. opt.data = check_file(opt.data) # check file
  254. print(opt)
  255. if opt.task in ['val', 'test']: # run normally
  256. test(opt.data,
  257. opt.weights,
  258. opt.batch_size,
  259. opt.img_size,
  260. opt.conf_thres,
  261. opt.iou_thres,
  262. opt.save_json,
  263. opt.single_cls,
  264. opt.augment,
  265. opt.verbose)
  266. elif opt.task == 'study': # run over a range of settings and save/plot
  267. for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
  268. f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
  269. x = list(range(320, 800, 64)) # x axis
  270. y = [] # y axis
  271. for i in x: # img-size
  272. print('\nRunning %s point %s...' % (f, i))
  273. r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json)
  274. y.append(r + t) # results and times
  275. np.savetxt(f, y, fmt='%10.4g') # save
  276. os.system('zip -r study.zip study_*.txt')
  277. # utils.general.plot_study_txt(f, x) # plot