You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1278 lines
52KB

  1. import glob
  2. import logging
  3. import math
  4. import os
  5. import platform
  6. import random
  7. import shutil
  8. import subprocess
  9. import time
  10. from contextlib import contextmanager
  11. from copy import copy
  12. from pathlib import Path
  13. import cv2
  14. import matplotlib
  15. import matplotlib.pyplot as plt
  16. import numpy as np
  17. import torch
  18. import torch.nn as nn
  19. import torchvision
  20. import yaml
  21. from scipy.cluster.vq import kmeans
  22. from scipy.signal import butter, filtfilt
  23. from tqdm import tqdm
  24. from utils.torch_utils import init_seeds, is_parallel
  25. # Set printoptions
  26. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  27. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  28. matplotlib.rc('font', **{'size': 11})
  29. # Prevent OpenCV from multithreading (to use PyTorch DataLoader)
  30. cv2.setNumThreads(0)
  31. @contextmanager
  32. def torch_distributed_zero_first(local_rank: int):
  33. """
  34. Decorator to make all processes in distributed training wait for each local_master to do something.
  35. """
  36. if local_rank not in [-1, 0]:
  37. torch.distributed.barrier()
  38. yield
  39. if local_rank == 0:
  40. torch.distributed.barrier()
  41. def set_logging(rank=-1):
  42. logging.basicConfig(
  43. format="%(message)s",
  44. level=logging.INFO if rank in [-1, 0] else logging.WARN)
  45. def init_seeds(seed=0):
  46. random.seed(seed)
  47. np.random.seed(seed)
  48. init_seeds(seed=seed)
  49. def get_latest_run(search_dir='./runs'):
  50. # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
  51. last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
  52. return max(last_list, key=os.path.getctime) if last_list else ''
  53. def check_git_status():
  54. # Suggest 'git pull' if repo is out of date
  55. if platform.system() in ['Linux', 'Darwin'] and not os.path.isfile('/.dockerenv'):
  56. s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
  57. if 'Your branch is behind' in s:
  58. print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
  59. def check_img_size(img_size, s=32):
  60. # Verify img_size is a multiple of stride s
  61. new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
  62. if new_size != img_size:
  63. print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
  64. return new_size
  65. def check_anchors(dataset, model, thr=4.0, imgsz=640):
  66. # Check anchor fit to data, recompute if necessary
  67. print('\nAnalyzing anchors... ', end='')
  68. m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
  69. shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
  70. scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
  71. wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
  72. def metric(k): # compute metric
  73. r = wh[:, None] / k[None]
  74. x = torch.min(r, 1. / r).min(2)[0] # ratio metric
  75. best = x.max(1)[0] # best_x
  76. aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold
  77. bpr = (best > 1. / thr).float().mean() # best possible recall
  78. return bpr, aat
  79. bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
  80. print('anchors/target = %.2f, Best Possible Recall (BPR) = %.4f' % (aat, bpr), end='')
  81. if bpr < 0.98: # threshold to recompute
  82. print('. Attempting to generate improved anchors, please wait...' % bpr)
  83. na = m.anchor_grid.numel() // 2 # number of anchors
  84. new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
  85. new_bpr = metric(new_anchors.reshape(-1, 2))[0]
  86. if new_bpr > bpr: # replace anchors
  87. new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
  88. m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
  89. m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
  90. check_anchor_order(m)
  91. print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
  92. else:
  93. print('Original anchors better than new anchors. Proceeding with original anchors.')
  94. print('') # newline
  95. def check_anchor_order(m):
  96. # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
  97. a = m.anchor_grid.prod(-1).view(-1) # anchor area
  98. da = a[-1] - a[0] # delta a
  99. ds = m.stride[-1] - m.stride[0] # delta s
  100. if da.sign() != ds.sign(): # same order
  101. print('Reversing anchor order')
  102. m.anchors[:] = m.anchors.flip(0)
  103. m.anchor_grid[:] = m.anchor_grid.flip(0)
  104. def check_file(file):
  105. # Search for file if not found
  106. if os.path.isfile(file) or file == '':
  107. return file
  108. else:
  109. files = glob.glob('./**/' + file, recursive=True) # find file
  110. assert len(files), 'File Not Found: %s' % file # assert file was found
  111. return files[0] # return first file if multiple found
  112. def check_dataset(dict):
  113. # Download dataset if not found
  114. val, s = dict.get('val'), dict.get('download')
  115. if val and len(val):
  116. val = [os.path.abspath(x) for x in (val if isinstance(val, list) else [val])] # val path
  117. if not all(os.path.exists(x) for x in val):
  118. print('\nWARNING: Dataset not found, nonexistant paths: %s' % [*val])
  119. if s and len(s): # download script
  120. print('Attempting autodownload from: %s' % s)
  121. if s.startswith('http') and s.endswith('.zip'): # URL
  122. f = Path(s).name # filename
  123. if platform.system() == 'Darwin': # avoid MacOS python requests certificate error
  124. os.system('curl -L %s -o %s' % (s, f))
  125. else:
  126. torch.hub.download_url_to_file(s, f)
  127. r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
  128. else: # bash script
  129. r = os.system(s)
  130. print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
  131. else:
  132. raise Exception('Dataset not found.')
  133. def make_divisible(x, divisor):
  134. # Returns x evenly divisble by divisor
  135. return math.ceil(x / divisor) * divisor
  136. def labels_to_class_weights(labels, nc=80):
  137. # Get class weights (inverse frequency) from training labels
  138. if labels[0] is None: # no labels loaded
  139. return torch.Tensor()
  140. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  141. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  142. weights = np.bincount(classes, minlength=nc) # occurences per class
  143. # Prepend gridpoint count (for uCE trianing)
  144. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  145. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  146. weights[weights == 0] = 1 # replace empty bins with 1
  147. weights = 1 / weights # number of targets per class
  148. weights /= weights.sum() # normalize
  149. return torch.from_numpy(weights)
  150. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  151. # Produces image weights based on class mAPs
  152. n = len(labels)
  153. class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
  154. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  155. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  156. return image_weights
  157. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  158. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  159. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  160. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  161. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  162. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  163. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  164. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  165. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  166. return x
  167. def xyxy2xywh(x):
  168. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  169. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  170. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  171. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  172. y[:, 2] = x[:, 2] - x[:, 0] # width
  173. y[:, 3] = x[:, 3] - x[:, 1] # height
  174. return y
  175. def xywh2xyxy(x):
  176. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  177. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  178. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  179. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  180. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  181. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  182. return y
  183. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  184. # Rescale coords (xyxy) from img1_shape to img0_shape
  185. if ratio_pad is None: # calculate from img0_shape
  186. gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
  187. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  188. else:
  189. gain = ratio_pad[0][0]
  190. pad = ratio_pad[1]
  191. coords[:, [0, 2]] -= pad[0] # x padding
  192. coords[:, [1, 3]] -= pad[1] # y padding
  193. coords[:, :4] /= gain
  194. clip_coords(coords, img0_shape)
  195. return coords
  196. def clip_coords(boxes, img_shape):
  197. # Clip bounding xyxy bounding boxes to image shape (height, width)
  198. boxes[:, 0].clamp_(0, img_shape[1]) # x1
  199. boxes[:, 1].clamp_(0, img_shape[0]) # y1
  200. boxes[:, 2].clamp_(0, img_shape[1]) # x2
  201. boxes[:, 3].clamp_(0, img_shape[0]) # y2
  202. def ap_per_class(tp, conf, pred_cls, target_cls):
  203. """ Compute the average precision, given the recall and precision curves.
  204. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
  205. # Arguments
  206. tp: True positives (nparray, nx1 or nx10).
  207. conf: Objectness value from 0-1 (nparray).
  208. pred_cls: Predicted object classes (nparray).
  209. target_cls: True object classes (nparray).
  210. # Returns
  211. The average precision as computed in py-faster-rcnn.
  212. """
  213. # Sort by objectness
  214. i = np.argsort(-conf)
  215. tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
  216. # Find unique classes
  217. unique_classes = np.unique(target_cls)
  218. # Create Precision-Recall curve and compute AP for each class
  219. pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
  220. s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
  221. ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
  222. for ci, c in enumerate(unique_classes):
  223. i = pred_cls == c
  224. n_gt = (target_cls == c).sum() # Number of ground truth objects
  225. n_p = i.sum() # Number of predicted objects
  226. if n_p == 0 or n_gt == 0:
  227. continue
  228. else:
  229. # Accumulate FPs and TPs
  230. fpc = (1 - tp[i]).cumsum(0)
  231. tpc = tp[i].cumsum(0)
  232. # Recall
  233. recall = tpc / (n_gt + 1e-16) # recall curve
  234. r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
  235. # Precision
  236. precision = tpc / (tpc + fpc) # precision curve
  237. p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
  238. # AP from recall-precision curve
  239. for j in range(tp.shape[1]):
  240. ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
  241. # Plot
  242. # fig, ax = plt.subplots(1, 1, figsize=(5, 5))
  243. # ax.plot(recall, precision)
  244. # ax.set_xlabel('Recall')
  245. # ax.set_ylabel('Precision')
  246. # ax.set_xlim(0, 1.01)
  247. # ax.set_ylim(0, 1.01)
  248. # fig.tight_layout()
  249. # fig.savefig('PR_curve.png', dpi=300)
  250. # Compute F1 score (harmonic mean of precision and recall)
  251. f1 = 2 * p * r / (p + r + 1e-16)
  252. return p, r, ap, f1, unique_classes.astype('int32')
  253. def compute_ap(recall, precision):
  254. """ Compute the average precision, given the recall and precision curves.
  255. Source: https://github.com/rbgirshick/py-faster-rcnn.
  256. # Arguments
  257. recall: The recall curve (list).
  258. precision: The precision curve (list).
  259. # Returns
  260. The average precision as computed in py-faster-rcnn.
  261. """
  262. # Append sentinel values to beginning and end
  263. mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
  264. mpre = np.concatenate(([0.], precision, [0.]))
  265. # Compute the precision envelope
  266. mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
  267. # Integrate area under curve
  268. method = 'interp' # methods: 'continuous', 'interp'
  269. if method == 'interp':
  270. x = np.linspace(0, 1, 101) # 101-point interp (COCO)
  271. ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
  272. else: # 'continuous'
  273. i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
  274. ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
  275. return ap
  276. def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-12):
  277. # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
  278. box2 = box2.T
  279. # Get the coordinates of bounding boxes
  280. if x1y1x2y2: # x1, y1, x2, y2 = box1
  281. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2] + eps, box1[3] + eps
  282. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2] + eps, box2[3] + eps
  283. else: # transform from xywh to xyxy
  284. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + eps
  285. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + eps
  286. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + eps
  287. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + eps
  288. # Intersection area
  289. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  290. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  291. # Union Area
  292. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
  293. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
  294. union = w1 * h1 + w2 * h2 - inter
  295. iou = inter / union # iou
  296. if GIoU or DIoU or CIoU:
  297. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  298. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  299. if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
  300. c_area = cw * ch # convex area
  301. return iou - (c_area - union) / c_area # GIoU
  302. if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  303. # convex diagonal squared
  304. c2 = cw ** 2 + ch ** 2
  305. # centerpoint distance squared
  306. rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
  307. if DIoU:
  308. return iou - rho2 / c2 # DIoU
  309. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  310. v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  311. with torch.no_grad():
  312. alpha = v / ((1 + eps) - iou + v)
  313. return iou - (rho2 / c2 + v * alpha) # CIoU
  314. return iou
  315. def box_iou(box1, box2):
  316. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  317. """
  318. Return intersection-over-union (Jaccard index) of boxes.
  319. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  320. Arguments:
  321. box1 (Tensor[N, 4])
  322. box2 (Tensor[M, 4])
  323. Returns:
  324. iou (Tensor[N, M]): the NxM matrix containing the pairwise
  325. IoU values for every element in boxes1 and boxes2
  326. """
  327. def box_area(box):
  328. # box = 4xn
  329. return (box[2] - box[0]) * (box[3] - box[1])
  330. area1 = box_area(box1.T)
  331. area2 = box_area(box2.T)
  332. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  333. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  334. return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
  335. def wh_iou(wh1, wh2):
  336. # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
  337. wh1 = wh1[:, None] # [N,1,2]
  338. wh2 = wh2[None] # [1,M,2]
  339. inter = torch.min(wh1, wh2).prod(2) # [N,M]
  340. return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
  341. class FocalLoss(nn.Module):
  342. # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
  343. def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
  344. super(FocalLoss, self).__init__()
  345. self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
  346. self.gamma = gamma
  347. self.alpha = alpha
  348. self.reduction = loss_fcn.reduction
  349. self.loss_fcn.reduction = 'none' # required to apply FL to each element
  350. def forward(self, pred, true):
  351. loss = self.loss_fcn(pred, true)
  352. # p_t = torch.exp(-loss)
  353. # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
  354. # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
  355. pred_prob = torch.sigmoid(pred) # prob from logits
  356. p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
  357. alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
  358. modulating_factor = (1.0 - p_t) ** self.gamma
  359. loss *= alpha_factor * modulating_factor
  360. if self.reduction == 'mean':
  361. return loss.mean()
  362. elif self.reduction == 'sum':
  363. return loss.sum()
  364. else: # 'none'
  365. return loss
  366. def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
  367. # return positive, negative label smoothing BCE targets
  368. return 1.0 - 0.5 * eps, 0.5 * eps
  369. class BCEBlurWithLogitsLoss(nn.Module):
  370. # BCEwithLogitLoss() with reduced missing label effects.
  371. def __init__(self, alpha=0.05):
  372. super(BCEBlurWithLogitsLoss, self).__init__()
  373. self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
  374. self.alpha = alpha
  375. def forward(self, pred, true):
  376. loss = self.loss_fcn(pred, true)
  377. pred = torch.sigmoid(pred) # prob from logits
  378. dx = pred - true # reduce only missing label effects
  379. # dx = (pred - true).abs() # reduce missing label and false label effects
  380. alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
  381. loss *= alpha_factor
  382. return loss.mean()
  383. def compute_loss(p, targets, model): # predictions, targets, model
  384. device = targets.device
  385. lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
  386. tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
  387. h = model.hyp # hyperparameters
  388. # Define criteria
  389. BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([h['cls_pw']])).to(device)
  390. BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([h['obj_pw']])).to(device)
  391. # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
  392. cp, cn = smooth_BCE(eps=0.0)
  393. # Focal loss
  394. g = h['fl_gamma'] # focal loss gamma
  395. if g > 0:
  396. BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
  397. # Losses
  398. nt = 0 # number of targets
  399. np = len(p) # number of outputs
  400. balance = [4.0, 1.0, 0.4] if np == 3 else [4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6
  401. for i, pi in enumerate(p): # layer index, layer predictions
  402. b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
  403. tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
  404. n = b.shape[0] # number of targets
  405. if n:
  406. nt += n # cumulative targets
  407. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
  408. # Regression
  409. pxy = ps[:, :2].sigmoid() * 2. - 0.5
  410. pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
  411. pbox = torch.cat((pxy, pwh), 1).to(device) # predicted box
  412. giou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # giou(prediction, target)
  413. lbox += (1.0 - giou).mean() # giou loss
  414. # Objectness
  415. tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) # giou ratio
  416. # Classification
  417. if model.nc > 1: # cls loss (only if multiple classes)
  418. t = torch.full_like(ps[:, 5:], cn, device=device) # targets
  419. t[range(n), tcls[i]] = cp
  420. lcls += BCEcls(ps[:, 5:], t) # BCE
  421. # Append targets to text file
  422. # with open('targets.txt', 'a') as file:
  423. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
  424. lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
  425. s = 3 / np # output count scaling
  426. lbox *= h['giou'] * s
  427. lobj *= h['obj'] * s * (1.4 if np == 4 else 1.)
  428. lcls *= h['cls'] * s
  429. bs = tobj.shape[0] # batch size
  430. loss = lbox + lobj + lcls
  431. return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
  432. def build_targets(p, targets, model):
  433. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  434. det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
  435. na, nt = det.na, targets.shape[0] # number of anchors, targets
  436. tcls, tbox, indices, anch = [], [], [], []
  437. gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
  438. ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
  439. targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
  440. g = 0.5 # bias
  441. off = torch.tensor([[0, 0],
  442. [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
  443. # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
  444. ], device=targets.device).float() * g # offsets
  445. for i in range(det.nl):
  446. anchors = det.anchors[i]
  447. gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  448. # Match targets to anchors
  449. t = targets * gain
  450. if nt:
  451. # Matches
  452. r = t[:, :, 4:6] / anchors[:, None] # wh ratio
  453. j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
  454. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
  455. t = t[j] # filter
  456. # Offsets
  457. gxy = t[:, 2:4] # grid xy
  458. gxi = gain[[2, 3]] - gxy # inverse
  459. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  460. l, m = ((gxi % 1. < g) & (gxi > 1.)).T
  461. j = torch.stack((torch.ones_like(j), j, k, l, m))
  462. t = t.repeat((5, 1, 1))[j]
  463. offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
  464. else:
  465. t = targets[0]
  466. offsets = 0
  467. # Define
  468. b, c = t[:, :2].long().T # image, class
  469. gxy = t[:, 2:4] # grid xy
  470. gwh = t[:, 4:6] # grid wh
  471. gij = (gxy - offsets).long()
  472. gi, gj = gij.T # grid xy indices
  473. # Append
  474. a = t[:, 6].long() # anchor indices
  475. indices.append((b, a, gj, gi)) # image, anchor, grid indices
  476. tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
  477. anch.append(anchors[a]) # anchors
  478. tcls.append(c) # class
  479. return tcls, tbox, indices, anch
  480. def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False):
  481. """Performs Non-Maximum Suppression (NMS) on inference results
  482. Returns:
  483. detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
  484. """
  485. if prediction.dtype is torch.float16:
  486. prediction = prediction.float() # to FP32
  487. nc = prediction[0].shape[1] - 5 # number of classes
  488. xc = prediction[..., 4] > conf_thres # candidates
  489. # Settings
  490. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  491. max_det = 300 # maximum number of detections per image
  492. time_limit = 10.0 # seconds to quit after
  493. redundant = True # require redundant detections
  494. multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
  495. t = time.time()
  496. output = [None] * prediction.shape[0]
  497. for xi, x in enumerate(prediction): # image index, image inference
  498. # Apply constraints
  499. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  500. x = x[xc[xi]] # confidence
  501. # If none remain process next image
  502. if not x.shape[0]:
  503. continue
  504. # Compute conf
  505. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  506. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  507. box = xywh2xyxy(x[:, :4])
  508. # Detections matrix nx6 (xyxy, conf, cls)
  509. if multi_label:
  510. i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
  511. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  512. else: # best class only
  513. conf, j = x[:, 5:].max(1, keepdim=True)
  514. x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  515. # Filter by class
  516. if classes:
  517. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  518. # Apply finite constraint
  519. # if not torch.isfinite(x).all():
  520. # x = x[torch.isfinite(x).all(1)]
  521. # If none remain process next image
  522. n = x.shape[0] # number of boxes
  523. if not n:
  524. continue
  525. # Sort by confidence
  526. # x = x[x[:, 4].argsort(descending=True)]
  527. # Batched NMS
  528. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  529. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  530. i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
  531. if i.shape[0] > max_det: # limit detections
  532. i = i[:max_det]
  533. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  534. try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  535. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  536. weights = iou * scores[None] # box weights
  537. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  538. if redundant:
  539. i = i[iou.sum(1) > 1] # require redundancy
  540. except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
  541. print(x, i, x.shape, i.shape)
  542. pass
  543. output[xi] = x[i]
  544. if (time.time() - t) > time_limit:
  545. break # time limit exceeded
  546. return output
  547. def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer()
  548. # Strip optimizer from 'f' to finalize training, optionally save as 's'
  549. x = torch.load(f, map_location=torch.device('cpu'))
  550. x['optimizer'] = None
  551. x['training_results'] = None
  552. x['epoch'] = -1
  553. x['model'].half() # to FP16
  554. for p in x['model'].parameters():
  555. p.requires_grad = False
  556. torch.save(x, s or f)
  557. mb = os.path.getsize(s or f) / 1E6 # filesize
  558. print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb))
  559. def coco_class_count(path='../coco/labels/train2014/'):
  560. # Histogram of occurrences per class
  561. nc = 80 # number classes
  562. x = np.zeros(nc, dtype='int32')
  563. files = sorted(glob.glob('%s/*.*' % path))
  564. for i, file in enumerate(files):
  565. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  566. x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
  567. print(i, len(files))
  568. def coco_only_people(path='../coco/labels/train2017/'): # from utils.general import *; coco_only_people()
  569. # Find images with only people
  570. files = sorted(glob.glob('%s/*.*' % path))
  571. for i, file in enumerate(files):
  572. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  573. if all(labels[:, 0] == 0):
  574. print(labels.shape[0], file)
  575. def crop_images_random(path='../images/', scale=0.50): # from utils.general import *; crop_images_random()
  576. # crops images into random squares up to scale fraction
  577. # WARNING: overwrites images!
  578. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  579. img = cv2.imread(file) # BGR
  580. if img is not None:
  581. h, w = img.shape[:2]
  582. # create random mask
  583. a = 30 # minimum size (pixels)
  584. mask_h = random.randint(a, int(max(a, h * scale))) # mask height
  585. mask_w = mask_h # mask width
  586. # box
  587. xmin = max(0, random.randint(0, w) - mask_w // 2)
  588. ymin = max(0, random.randint(0, h) - mask_h // 2)
  589. xmax = min(w, xmin + mask_w)
  590. ymax = min(h, ymin + mask_h)
  591. # apply random color mask
  592. cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
  593. def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
  594. # Makes single-class coco datasets. from utils.general import *; coco_single_class_labels()
  595. if os.path.exists('new/'):
  596. shutil.rmtree('new/') # delete output folder
  597. os.makedirs('new/') # make new output folder
  598. os.makedirs('new/labels/')
  599. os.makedirs('new/images/')
  600. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  601. with open(file, 'r') as f:
  602. labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
  603. i = labels[:, 0] == label_class
  604. if any(i):
  605. img_file = file.replace('labels', 'images').replace('txt', 'jpg')
  606. labels[:, 0] = 0 # reset class to 0
  607. with open('new/images.txt', 'a') as f: # add image to dataset list
  608. f.write(img_file + '\n')
  609. with open('new/labels/' + Path(file).name, 'a') as f: # write label
  610. for l in labels[i]:
  611. f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
  612. shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
  613. def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
  614. """ Creates kmeans-evolved anchors from training dataset
  615. Arguments:
  616. path: path to dataset *.yaml, or a loaded dataset
  617. n: number of anchors
  618. img_size: image size used for training
  619. thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
  620. gen: generations to evolve anchors using genetic algorithm
  621. Return:
  622. k: kmeans evolved anchors
  623. Usage:
  624. from utils.general import *; _ = kmean_anchors()
  625. """
  626. thr = 1. / thr
  627. def metric(k, wh): # compute metrics
  628. r = wh[:, None] / k[None]
  629. x = torch.min(r, 1. / r).min(2)[0] # ratio metric
  630. # x = wh_iou(wh, torch.tensor(k)) # iou metric
  631. return x, x.max(1)[0] # x, best_x
  632. def fitness(k): # mutation fitness
  633. _, best = metric(torch.tensor(k, dtype=torch.float32), wh)
  634. return (best * (best > thr).float()).mean() # fitness
  635. def print_results(k):
  636. k = k[np.argsort(k.prod(1))] # sort small to large
  637. x, best = metric(k, wh0)
  638. bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
  639. print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat))
  640. print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' %
  641. (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='')
  642. for i, x in enumerate(k):
  643. print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
  644. return k
  645. if isinstance(path, str): # *.yaml file
  646. with open(path) as f:
  647. data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
  648. from utils.datasets import LoadImagesAndLabels
  649. dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
  650. else:
  651. dataset = path # dataset
  652. # Get label wh
  653. shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
  654. wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
  655. # Filter
  656. i = (wh0 < 3.0).any(1).sum()
  657. if i:
  658. print('WARNING: Extremely small objects found. '
  659. '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0)))
  660. wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
  661. # Kmeans calculation
  662. print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
  663. s = wh.std(0) # sigmas for whitening
  664. k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
  665. k *= s
  666. wh = torch.tensor(wh, dtype=torch.float32) # filtered
  667. wh0 = torch.tensor(wh0, dtype=torch.float32) # unflitered
  668. k = print_results(k)
  669. # Plot
  670. # k, d = [None] * 20, [None] * 20
  671. # for i in tqdm(range(1, 21)):
  672. # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
  673. # fig, ax = plt.subplots(1, 2, figsize=(14, 7))
  674. # ax = ax.ravel()
  675. # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
  676. # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
  677. # ax[0].hist(wh[wh[:, 0]<100, 0],400)
  678. # ax[1].hist(wh[wh[:, 1]<100, 1],400)
  679. # fig.tight_layout()
  680. # fig.savefig('wh.png', dpi=200)
  681. # Evolve
  682. npr = np.random
  683. f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
  684. pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
  685. for _ in pbar:
  686. v = np.ones(sh)
  687. while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
  688. v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
  689. kg = (k.copy() * v).clip(min=2.0)
  690. fg = fitness(kg)
  691. if fg > f:
  692. f, k = fg, kg.copy()
  693. pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f
  694. if verbose:
  695. print_results(k)
  696. return print_results(k)
  697. def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
  698. # Print mutation results to evolve.txt (for use with train.py --evolve)
  699. a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
  700. b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
  701. c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  702. print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
  703. if bucket:
  704. os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
  705. with open('evolve.txt', 'a') as f: # append result
  706. f.write(c + b + '\n')
  707. x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
  708. x = x[np.argsort(-fitness(x))] # sort
  709. np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
  710. if bucket:
  711. os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
  712. # Save yaml
  713. for i, k in enumerate(hyp.keys()):
  714. hyp[k] = float(x[0, i + 7])
  715. with open(yaml_file, 'w') as f:
  716. results = tuple(x[0, :7])
  717. c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  718. f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
  719. yaml.dump(hyp, f, sort_keys=False)
  720. def apply_classifier(x, model, img, im0):
  721. # applies a second stage classifier to yolo outputs
  722. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  723. for i, d in enumerate(x): # per image
  724. if d is not None and len(d):
  725. d = d.clone()
  726. # Reshape and pad cutouts
  727. b = xyxy2xywh(d[:, :4]) # boxes
  728. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  729. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  730. d[:, :4] = xywh2xyxy(b).long()
  731. # Rescale boxes from img_size to im0 size
  732. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  733. # Classes
  734. pred_cls1 = d[:, 5].long()
  735. ims = []
  736. for j, a in enumerate(d): # per item
  737. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  738. im = cv2.resize(cutout, (224, 224)) # BGR
  739. # cv2.imwrite('test%i.jpg' % j, cutout)
  740. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  741. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  742. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  743. ims.append(im)
  744. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  745. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  746. return x
  747. def fitness(x):
  748. # Returns fitness (for use with results.txt or evolve.txt)
  749. w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
  750. return (x[:, :4] * w).sum(1)
  751. def output_to_target(output, width, height):
  752. # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
  753. if isinstance(output, torch.Tensor):
  754. output = output.cpu().numpy()
  755. targets = []
  756. for i, o in enumerate(output):
  757. if o is not None:
  758. for pred in o:
  759. box = pred[:4]
  760. w = (box[2] - box[0]) / width
  761. h = (box[3] - box[1]) / height
  762. x = box[0] / width + w / 2
  763. y = box[1] / height + h / 2
  764. conf = pred[4]
  765. cls = int(pred[5])
  766. targets.append([i, cls, x, y, w, h, conf])
  767. return np.array(targets)
  768. def increment_dir(dir, comment=''):
  769. # Increments a directory runs/exp1 --> runs/exp2_comment
  770. n = 0 # number
  771. dir = str(Path(dir)) # os-agnostic
  772. d = sorted(glob.glob(dir + '*')) # directories
  773. if len(d):
  774. n = max([int(x[len(dir):x.find('_') if '_' in x else None]) for x in d]) + 1 # increment
  775. return dir + str(n) + ('_' + comment if comment else '')
  776. # Plotting functions ---------------------------------------------------------------------------------------------------
  777. def hist2d(x, y, n=100):
  778. # 2d histogram used in labels.png and evolve.png
  779. xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
  780. hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
  781. xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
  782. yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
  783. return np.log(hist[xidx, yidx])
  784. def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  785. # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
  786. def butter_lowpass(cutoff, fs, order):
  787. nyq = 0.5 * fs
  788. normal_cutoff = cutoff / nyq
  789. b, a = butter(order, normal_cutoff, btype='low', analog=False)
  790. return b, a
  791. b, a = butter_lowpass(cutoff, fs, order=order)
  792. return filtfilt(b, a, data) # forward-backward filter
  793. def plot_one_box(x, img, color=None, label=None, line_thickness=None):
  794. # Plots one bounding box on image img
  795. tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
  796. color = color or [random.randint(0, 255) for _ in range(3)]
  797. c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
  798. cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
  799. if label:
  800. tf = max(tl - 1, 1) # font thickness
  801. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  802. c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
  803. cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
  804. cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
  805. def plot_wh_methods(): # from utils.general import *; plot_wh_methods()
  806. # Compares the two methods for width-height anchor multiplication
  807. # https://github.com/ultralytics/yolov3/issues/168
  808. x = np.arange(-4.0, 4.0, .1)
  809. ya = np.exp(x)
  810. yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
  811. fig = plt.figure(figsize=(6, 3), dpi=150)
  812. plt.plot(x, ya, '.-', label='YOLOv3')
  813. plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2')
  814. plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6')
  815. plt.xlim(left=-4, right=4)
  816. plt.ylim(bottom=0, top=6)
  817. plt.xlabel('input')
  818. plt.ylabel('output')
  819. plt.grid()
  820. plt.legend()
  821. fig.tight_layout()
  822. fig.savefig('comparison.png', dpi=200)
  823. def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
  824. tl = 3 # line thickness
  825. tf = max(tl - 1, 1) # font thickness
  826. if os.path.isfile(fname): # do not overwrite
  827. return None
  828. if isinstance(images, torch.Tensor):
  829. images = images.cpu().float().numpy()
  830. if isinstance(targets, torch.Tensor):
  831. targets = targets.cpu().numpy()
  832. # un-normalise
  833. if np.max(images[0]) <= 1:
  834. images *= 255
  835. bs, _, h, w = images.shape # batch size, _, height, width
  836. bs = min(bs, max_subplots) # limit plot images
  837. ns = np.ceil(bs ** 0.5) # number of subplots (square)
  838. # Check if we should resize
  839. scale_factor = max_size / max(h, w)
  840. if scale_factor < 1:
  841. h = math.ceil(scale_factor * h)
  842. w = math.ceil(scale_factor * w)
  843. # Empty array for output
  844. mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
  845. # Fix class - colour map
  846. prop_cycle = plt.rcParams['axes.prop_cycle']
  847. # https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
  848. hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
  849. color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
  850. for i, img in enumerate(images):
  851. if i == max_subplots: # if last batch has fewer images than we expect
  852. break
  853. block_x = int(w * (i // ns))
  854. block_y = int(h * (i % ns))
  855. img = img.transpose(1, 2, 0)
  856. if scale_factor < 1:
  857. img = cv2.resize(img, (w, h))
  858. mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
  859. if len(targets) > 0:
  860. image_targets = targets[targets[:, 0] == i]
  861. boxes = xywh2xyxy(image_targets[:, 2:6]).T
  862. classes = image_targets[:, 1].astype('int')
  863. gt = image_targets.shape[1] == 6 # ground truth if no conf column
  864. conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
  865. boxes[[0, 2]] *= w
  866. boxes[[0, 2]] += block_x
  867. boxes[[1, 3]] *= h
  868. boxes[[1, 3]] += block_y
  869. for j, box in enumerate(boxes.T):
  870. cls = int(classes[j])
  871. color = color_lut[cls % len(color_lut)]
  872. cls = names[cls] if names else cls
  873. if gt or conf[j] > 0.3: # 0.3 conf thresh
  874. label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
  875. plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
  876. # Draw image filename labels
  877. if paths is not None:
  878. label = os.path.basename(paths[i])[:40] # trim to 40 char
  879. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  880. cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
  881. lineType=cv2.LINE_AA)
  882. # Image border
  883. cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
  884. if fname is not None:
  885. mosaic = cv2.resize(mosaic, (int(ns * w * 0.5), int(ns * h * 0.5)), interpolation=cv2.INTER_AREA)
  886. cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))
  887. return mosaic
  888. def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
  889. # Plot LR simulating training for full epochs
  890. optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
  891. y = []
  892. for _ in range(epochs):
  893. scheduler.step()
  894. y.append(optimizer.param_groups[0]['lr'])
  895. plt.plot(y, '.-', label='LR')
  896. plt.xlabel('epoch')
  897. plt.ylabel('LR')
  898. plt.grid()
  899. plt.xlim(0, epochs)
  900. plt.ylim(0)
  901. plt.tight_layout()
  902. plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
  903. def plot_test_txt(): # from utils.general import *; plot_test()
  904. # Plot test.txt histograms
  905. x = np.loadtxt('test.txt', dtype=np.float32)
  906. box = xyxy2xywh(x[:, :4])
  907. cx, cy = box[:, 0], box[:, 1]
  908. fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
  909. ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
  910. ax.set_aspect('equal')
  911. plt.savefig('hist2d.png', dpi=300)
  912. fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
  913. ax[0].hist(cx, bins=600)
  914. ax[1].hist(cy, bins=600)
  915. plt.savefig('hist1d.png', dpi=200)
  916. def plot_targets_txt(): # from utils.general import *; plot_targets_txt()
  917. # Plot targets.txt histograms
  918. x = np.loadtxt('targets.txt', dtype=np.float32).T
  919. s = ['x targets', 'y targets', 'width targets', 'height targets']
  920. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  921. ax = ax.ravel()
  922. for i in range(4):
  923. ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
  924. ax[i].legend()
  925. ax[i].set_title(s[i])
  926. plt.savefig('targets.jpg', dpi=200)
  927. def plot_study_txt(f='study.txt', x=None): # from utils.general import *; plot_study_txt()
  928. # Plot study.txt generated by test.py
  929. fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
  930. ax = ax.ravel()
  931. fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
  932. for f in ['study/study_coco_yolov5%s.txt' % x for x in ['s', 'm', 'l', 'x']]:
  933. y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
  934. x = np.arange(y.shape[1]) if x is None else np.array(x)
  935. s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
  936. for i in range(7):
  937. ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
  938. ax[i].set_title(s[i])
  939. j = y[3].argmax() + 1
  940. ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
  941. label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
  942. ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
  943. 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
  944. ax2.grid()
  945. ax2.set_xlim(0, 30)
  946. ax2.set_ylim(28, 50)
  947. ax2.set_yticks(np.arange(30, 55, 5))
  948. ax2.set_xlabel('GPU Speed (ms/img)')
  949. ax2.set_ylabel('COCO AP val')
  950. ax2.legend(loc='lower right')
  951. plt.savefig('study_mAP_latency.png', dpi=300)
  952. plt.savefig(f.replace('.txt', '.png'), dpi=300)
  953. def plot_labels(labels, save_dir=''):
  954. # plot dataset labels
  955. c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
  956. nc = int(c.max() + 1) # number of classes
  957. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  958. ax = ax.ravel()
  959. ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
  960. ax[0].set_xlabel('classes')
  961. ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
  962. ax[1].set_xlabel('x')
  963. ax[1].set_ylabel('y')
  964. ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
  965. ax[2].set_xlabel('width')
  966. ax[2].set_ylabel('height')
  967. plt.savefig(Path(save_dir) / 'labels.png', dpi=200)
  968. plt.close()
  969. def plot_evolution(yaml_file='runs/evolve/hyp_evolved.yaml'): # from utils.general import *; plot_evolution()
  970. # Plot hyperparameter evolution results in evolve.txt
  971. with open(yaml_file) as f:
  972. hyp = yaml.load(f, Loader=yaml.FullLoader)
  973. x = np.loadtxt('evolve.txt', ndmin=2)
  974. f = fitness(x)
  975. # weights = (f - f.min()) ** 2 # for weighted results
  976. plt.figure(figsize=(10, 10), tight_layout=True)
  977. matplotlib.rc('font', **{'size': 8})
  978. for i, (k, v) in enumerate(hyp.items()):
  979. y = x[:, i + 7]
  980. # mu = (y * weights).sum() / weights.sum() # best weighted result
  981. mu = y[f.argmax()] # best single result
  982. plt.subplot(5, 5, i + 1)
  983. plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
  984. plt.plot(mu, f.max(), 'k+', markersize=15)
  985. plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
  986. if i % 5 != 0:
  987. plt.yticks([])
  988. print('%15s: %.3g' % (k, mu))
  989. plt.savefig('evolve.png', dpi=200)
  990. print('\nPlot saved as evolve.png')
  991. def plot_results_overlay(start=0, stop=0): # from utils.general import *; plot_results_overlay()
  992. # Plot training 'results*.txt', overlaying train and val losses
  993. s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
  994. t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
  995. for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
  996. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  997. n = results.shape[1] # number of rows
  998. x = range(start, min(stop, n) if stop else n)
  999. fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
  1000. ax = ax.ravel()
  1001. for i in range(5):
  1002. for j in [i, i + 5]:
  1003. y = results[j, x]
  1004. ax[i].plot(x, y, marker='.', label=s[j])
  1005. # y_smooth = butter_lowpass_filtfilt(y)
  1006. # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
  1007. ax[i].set_title(t[i])
  1008. ax[i].legend()
  1009. ax[i].set_ylabel(f) if i == 0 else None # add filename
  1010. fig.savefig(f.replace('.txt', '.png'), dpi=200)
  1011. def plot_results(start=0, stop=0, bucket='', id=(), labels=(),
  1012. save_dir=''): # from utils.general import *; plot_results()
  1013. # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
  1014. fig, ax = plt.subplots(2, 5, figsize=(12, 6))
  1015. ax = ax.ravel()
  1016. s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
  1017. 'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
  1018. if bucket:
  1019. # os.system('rm -rf storage.googleapis.com')
  1020. # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
  1021. files = ['results%g.txt' % x for x in id]
  1022. c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)
  1023. os.system(c)
  1024. else:
  1025. files = glob.glob(str(Path(save_dir) / 'results*.txt')) + glob.glob('../../Downloads/results*.txt')
  1026. for fi, f in enumerate(files):
  1027. try:
  1028. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  1029. n = results.shape[1] # number of rows
  1030. x = range(start, min(stop, n) if stop else n)
  1031. for i in range(10):
  1032. y = results[i, x]
  1033. if i in [0, 1, 2, 5, 6, 7]:
  1034. y[y == 0] = np.nan # dont show zero loss values
  1035. # y /= y[0] # normalize
  1036. label = labels[fi] if len(labels) else Path(f).stem
  1037. ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
  1038. ax[i].set_title(s[i])
  1039. # if i in [5, 6, 7]: # share train and val loss y axes
  1040. # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
  1041. except Exception as e:
  1042. print('Warning: Plotting error for %s; %s' % (f, e))
  1043. fig.tight_layout()
  1044. ax[1].legend()
  1045. fig.savefig(Path(save_dir) / 'results.png', dpi=200)