You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1307 lines
53KB

  1. import glob
  2. import logging
  3. import math
  4. import os
  5. import platform
  6. import random
  7. import re
  8. import shutil
  9. import subprocess
  10. import time
  11. from contextlib import contextmanager
  12. from copy import copy
  13. from pathlib import Path
  14. import cv2
  15. import matplotlib
  16. import matplotlib.pyplot as plt
  17. import numpy as np
  18. import torch
  19. import torch.nn as nn
  20. import yaml
  21. from PIL import Image
  22. from scipy.cluster.vq import kmeans
  23. from scipy.signal import butter, filtfilt
  24. from tqdm import tqdm
  25. from utils.google_utils import gsutil_getsize
  26. from utils.torch_utils import is_parallel, init_torch_seeds
  27. # Set printoptions
  28. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  29. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  30. matplotlib.rc('font', **{'size': 11})
  31. # Prevent OpenCV from multithreading (to use PyTorch DataLoader)
  32. cv2.setNumThreads(0)
  33. @contextmanager
  34. def torch_distributed_zero_first(local_rank: int):
  35. """
  36. Decorator to make all processes in distributed training wait for each local_master to do something.
  37. """
  38. if local_rank not in [-1, 0]:
  39. torch.distributed.barrier()
  40. yield
  41. if local_rank == 0:
  42. torch.distributed.barrier()
  43. def set_logging(rank=-1):
  44. logging.basicConfig(
  45. format="%(message)s",
  46. level=logging.INFO if rank in [-1, 0] else logging.WARN)
  47. def init_seeds(seed=0):
  48. random.seed(seed)
  49. np.random.seed(seed)
  50. init_torch_seeds(seed)
  51. def get_latest_run(search_dir='./runs'):
  52. # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
  53. last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
  54. return max(last_list, key=os.path.getctime) if last_list else ''
  55. def check_git_status():
  56. # Suggest 'git pull' if repo is out of date
  57. if platform.system() in ['Linux', 'Darwin'] and not os.path.isfile('/.dockerenv'):
  58. s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
  59. if 'Your branch is behind' in s:
  60. print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
  61. def check_img_size(img_size, s=32):
  62. # Verify img_size is a multiple of stride s
  63. new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
  64. if new_size != img_size:
  65. print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
  66. return new_size
  67. def check_anchors(dataset, model, thr=4.0, imgsz=640):
  68. # Check anchor fit to data, recompute if necessary
  69. print('\nAnalyzing anchors... ', end='')
  70. m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
  71. shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
  72. scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
  73. wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
  74. def metric(k): # compute metric
  75. r = wh[:, None] / k[None]
  76. x = torch.min(r, 1. / r).min(2)[0] # ratio metric
  77. best = x.max(1)[0] # best_x
  78. aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold
  79. bpr = (best > 1. / thr).float().mean() # best possible recall
  80. return bpr, aat
  81. bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
  82. print('anchors/target = %.2f, Best Possible Recall (BPR) = %.4f' % (aat, bpr), end='')
  83. if bpr < 0.98: # threshold to recompute
  84. print('. Attempting to generate improved anchors, please wait...' % bpr)
  85. na = m.anchor_grid.numel() // 2 # number of anchors
  86. new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
  87. new_bpr = metric(new_anchors.reshape(-1, 2))[0]
  88. if new_bpr > bpr: # replace anchors
  89. new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
  90. m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
  91. m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
  92. check_anchor_order(m)
  93. print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
  94. else:
  95. print('Original anchors better than new anchors. Proceeding with original anchors.')
  96. print('') # newline
  97. def check_anchor_order(m):
  98. # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
  99. a = m.anchor_grid.prod(-1).view(-1) # anchor area
  100. da = a[-1] - a[0] # delta a
  101. ds = m.stride[-1] - m.stride[0] # delta s
  102. if da.sign() != ds.sign(): # same order
  103. print('Reversing anchor order')
  104. m.anchors[:] = m.anchors.flip(0)
  105. m.anchor_grid[:] = m.anchor_grid.flip(0)
  106. def check_file(file):
  107. # Search for file if not found
  108. if os.path.isfile(file) or file == '':
  109. return file
  110. else:
  111. files = glob.glob('./**/' + file, recursive=True) # find file
  112. assert len(files), 'File Not Found: %s' % file # assert file was found
  113. assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
  114. return files[0] # return file
  115. def check_dataset(dict):
  116. # Download dataset if not found
  117. val, s = dict.get('val'), dict.get('download')
  118. if val and len(val):
  119. val = [os.path.abspath(x) for x in (val if isinstance(val, list) else [val])] # val path
  120. if not all(os.path.exists(x) for x in val):
  121. print('\nWARNING: Dataset not found, nonexistent paths: %s' % [*val])
  122. if s and len(s): # download script
  123. print('Downloading %s ...' % s)
  124. if s.startswith('http') and s.endswith('.zip'): # URL
  125. f = Path(s).name # filename
  126. torch.hub.download_url_to_file(s, f)
  127. r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
  128. else: # bash script
  129. r = os.system(s)
  130. print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
  131. else:
  132. raise Exception('Dataset not found.')
  133. def make_divisible(x, divisor):
  134. # Returns x evenly divisible by divisor
  135. return math.ceil(x / divisor) * divisor
  136. def labels_to_class_weights(labels, nc=80):
  137. # Get class weights (inverse frequency) from training labels
  138. if labels[0] is None: # no labels loaded
  139. return torch.Tensor()
  140. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  141. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  142. weights = np.bincount(classes, minlength=nc) # occurrences per class
  143. # Prepend gridpoint count (for uCE training)
  144. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  145. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  146. weights[weights == 0] = 1 # replace empty bins with 1
  147. weights = 1 / weights # number of targets per class
  148. weights /= weights.sum() # normalize
  149. return torch.from_numpy(weights)
  150. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  151. # Produces image weights based on class mAPs
  152. n = len(labels)
  153. class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
  154. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  155. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  156. return image_weights
  157. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  158. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  159. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  160. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  161. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  162. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  163. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  164. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  165. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  166. return x
  167. def xyxy2xywh(x):
  168. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  169. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  170. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  171. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  172. y[:, 2] = x[:, 2] - x[:, 0] # width
  173. y[:, 3] = x[:, 3] - x[:, 1] # height
  174. return y
  175. def xywh2xyxy(x):
  176. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  177. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  178. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  179. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  180. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  181. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  182. return y
  183. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  184. # Rescale coords (xyxy) from img1_shape to img0_shape
  185. if ratio_pad is None: # calculate from img0_shape
  186. gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
  187. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  188. else:
  189. gain = ratio_pad[0][0]
  190. pad = ratio_pad[1]
  191. coords[:, [0, 2]] -= pad[0] # x padding
  192. coords[:, [1, 3]] -= pad[1] # y padding
  193. coords[:, :4] /= gain
  194. clip_coords(coords, img0_shape)
  195. return coords
  196. def clip_coords(boxes, img_shape):
  197. # Clip bounding xyxy bounding boxes to image shape (height, width)
  198. boxes[:, 0].clamp_(0, img_shape[1]) # x1
  199. boxes[:, 1].clamp_(0, img_shape[0]) # y1
  200. boxes[:, 2].clamp_(0, img_shape[1]) # x2
  201. boxes[:, 3].clamp_(0, img_shape[0]) # y2
  202. def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, fname='precision-recall_curve.png'):
  203. """ Compute the average precision, given the recall and precision curves.
  204. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
  205. # Arguments
  206. tp: True positives (nparray, nx1 or nx10).
  207. conf: Objectness value from 0-1 (nparray).
  208. pred_cls: Predicted object classes (nparray).
  209. target_cls: True object classes (nparray).
  210. plot: Plot precision-recall curve at mAP@0.5
  211. fname: Plot filename
  212. # Returns
  213. The average precision as computed in py-faster-rcnn.
  214. """
  215. # Sort by objectness
  216. i = np.argsort(-conf)
  217. tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
  218. # Find unique classes
  219. unique_classes = np.unique(target_cls)
  220. # Create Precision-Recall curve and compute AP for each class
  221. px, py = np.linspace(0, 1, 1000), [] # for plotting
  222. pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
  223. s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
  224. ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
  225. for ci, c in enumerate(unique_classes):
  226. i = pred_cls == c
  227. n_gt = (target_cls == c).sum() # Number of ground truth objects
  228. n_p = i.sum() # Number of predicted objects
  229. if n_p == 0 or n_gt == 0:
  230. continue
  231. else:
  232. # Accumulate FPs and TPs
  233. fpc = (1 - tp[i]).cumsum(0)
  234. tpc = tp[i].cumsum(0)
  235. # Recall
  236. recall = tpc / (n_gt + 1e-16) # recall curve
  237. r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
  238. # Precision
  239. precision = tpc / (tpc + fpc) # precision curve
  240. p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
  241. # AP from recall-precision curve
  242. for j in range(tp.shape[1]):
  243. ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
  244. if j == 0:
  245. py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
  246. # Compute F1 score (harmonic mean of precision and recall)
  247. f1 = 2 * p * r / (p + r + 1e-16)
  248. if plot:
  249. py = np.stack(py, axis=1)
  250. fig, ax = plt.subplots(1, 1, figsize=(5, 5))
  251. ax.plot(px, py, linewidth=0.5, color='grey') # plot(recall, precision)
  252. ax.plot(px, py.mean(1), linewidth=2, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean())
  253. ax.set_xlabel('Recall')
  254. ax.set_ylabel('Precision')
  255. ax.set_xlim(0, 1)
  256. ax.set_ylim(0, 1)
  257. plt.legend()
  258. fig.tight_layout()
  259. fig.savefig(fname, dpi=200)
  260. return p, r, ap, f1, unique_classes.astype('int32')
  261. def compute_ap(recall, precision):
  262. """ Compute the average precision, given the recall and precision curves.
  263. Source: https://github.com/rbgirshick/py-faster-rcnn.
  264. # Arguments
  265. recall: The recall curve (list).
  266. precision: The precision curve (list).
  267. # Returns
  268. The average precision as computed in py-faster-rcnn.
  269. """
  270. # Append sentinel values to beginning and end
  271. mrec = recall # np.concatenate(([0.], recall, [recall[-1] + 1E-3]))
  272. mpre = precision # np.concatenate(([0.], precision, [0.]))
  273. # Compute the precision envelope
  274. mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
  275. # Integrate area under curve
  276. method = 'interp' # methods: 'continuous', 'interp'
  277. if method == 'interp':
  278. x = np.linspace(0, 1, 101) # 101-point interp (COCO)
  279. ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
  280. else: # 'continuous'
  281. i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
  282. ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
  283. return ap, mpre, mrec
  284. def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
  285. # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
  286. box2 = box2.T
  287. # Get the coordinates of bounding boxes
  288. if x1y1x2y2: # x1, y1, x2, y2 = box1
  289. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  290. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  291. else: # transform from xywh to xyxy
  292. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  293. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  294. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
  295. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
  296. # Intersection area
  297. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  298. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  299. # Union Area
  300. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
  301. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
  302. union = w1 * h1 + w2 * h2 - inter + eps
  303. iou = inter / union
  304. if GIoU or DIoU or CIoU:
  305. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  306. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  307. if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  308. c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
  309. rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
  310. (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
  311. if DIoU:
  312. return iou - rho2 / c2 # DIoU
  313. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  314. v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  315. with torch.no_grad():
  316. alpha = v / ((1 + eps) - iou + v)
  317. return iou - (rho2 / c2 + v * alpha) # CIoU
  318. else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
  319. c_area = cw * ch + eps # convex area
  320. return iou - (c_area - union) / c_area # GIoU
  321. else:
  322. return iou # IoU
  323. def box_iou(box1, box2):
  324. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  325. """
  326. Return intersection-over-union (Jaccard index) of boxes.
  327. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  328. Arguments:
  329. box1 (Tensor[N, 4])
  330. box2 (Tensor[M, 4])
  331. Returns:
  332. iou (Tensor[N, M]): the NxM matrix containing the pairwise
  333. IoU values for every element in boxes1 and boxes2
  334. """
  335. def box_area(box):
  336. # box = 4xn
  337. return (box[2] - box[0]) * (box[3] - box[1])
  338. area1 = box_area(box1.T)
  339. area2 = box_area(box2.T)
  340. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  341. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  342. return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
  343. def wh_iou(wh1, wh2):
  344. # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
  345. wh1 = wh1[:, None] # [N,1,2]
  346. wh2 = wh2[None] # [1,M,2]
  347. inter = torch.min(wh1, wh2).prod(2) # [N,M]
  348. return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
  349. class FocalLoss(nn.Module):
  350. # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
  351. def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
  352. super(FocalLoss, self).__init__()
  353. self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
  354. self.gamma = gamma
  355. self.alpha = alpha
  356. self.reduction = loss_fcn.reduction
  357. self.loss_fcn.reduction = 'none' # required to apply FL to each element
  358. def forward(self, pred, true):
  359. loss = self.loss_fcn(pred, true)
  360. # p_t = torch.exp(-loss)
  361. # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
  362. # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
  363. pred_prob = torch.sigmoid(pred) # prob from logits
  364. p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
  365. alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
  366. modulating_factor = (1.0 - p_t) ** self.gamma
  367. loss *= alpha_factor * modulating_factor
  368. if self.reduction == 'mean':
  369. return loss.mean()
  370. elif self.reduction == 'sum':
  371. return loss.sum()
  372. else: # 'none'
  373. return loss
  374. def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
  375. # return positive, negative label smoothing BCE targets
  376. return 1.0 - 0.5 * eps, 0.5 * eps
  377. class BCEBlurWithLogitsLoss(nn.Module):
  378. # BCEwithLogitLoss() with reduced missing label effects.
  379. def __init__(self, alpha=0.05):
  380. super(BCEBlurWithLogitsLoss, self).__init__()
  381. self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
  382. self.alpha = alpha
  383. def forward(self, pred, true):
  384. loss = self.loss_fcn(pred, true)
  385. pred = torch.sigmoid(pred) # prob from logits
  386. dx = pred - true # reduce only missing label effects
  387. # dx = (pred - true).abs() # reduce missing label and false label effects
  388. alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
  389. loss *= alpha_factor
  390. return loss.mean()
  391. def compute_loss(p, targets, model): # predictions, targets, model
  392. device = targets.device
  393. lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
  394. tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
  395. h = model.hyp # hyperparameters
  396. # Define criteria
  397. BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([h['cls_pw']])).to(device)
  398. BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([h['obj_pw']])).to(device)
  399. # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
  400. cp, cn = smooth_BCE(eps=0.0)
  401. # Focal loss
  402. g = h['fl_gamma'] # focal loss gamma
  403. if g > 0:
  404. BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
  405. # Losses
  406. nt = 0 # number of targets
  407. np = len(p) # number of outputs
  408. balance = [4.0, 1.0, 0.4] if np == 3 else [4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6
  409. for i, pi in enumerate(p): # layer index, layer predictions
  410. b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
  411. tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
  412. n = b.shape[0] # number of targets
  413. if n:
  414. nt += n # cumulative targets
  415. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
  416. # Regression
  417. pxy = ps[:, :2].sigmoid() * 2. - 0.5
  418. pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
  419. pbox = torch.cat((pxy, pwh), 1).to(device) # predicted box
  420. iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
  421. lbox += (1.0 - iou).mean() # iou loss
  422. # Objectness
  423. tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
  424. # Classification
  425. if model.nc > 1: # cls loss (only if multiple classes)
  426. t = torch.full_like(ps[:, 5:], cn, device=device) # targets
  427. t[range(n), tcls[i]] = cp
  428. lcls += BCEcls(ps[:, 5:], t) # BCE
  429. # Append targets to text file
  430. # with open('targets.txt', 'a') as file:
  431. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
  432. lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
  433. s = 3 / np # output count scaling
  434. lbox *= h['box'] * s
  435. lobj *= h['obj'] * s * (1.4 if np == 4 else 1.)
  436. lcls *= h['cls'] * s
  437. bs = tobj.shape[0] # batch size
  438. loss = lbox + lobj + lcls
  439. return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
  440. def build_targets(p, targets, model):
  441. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  442. det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
  443. na, nt = det.na, targets.shape[0] # number of anchors, targets
  444. tcls, tbox, indices, anch = [], [], [], []
  445. gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
  446. ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
  447. targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
  448. g = 0.5 # bias
  449. off = torch.tensor([[0, 0],
  450. [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
  451. # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
  452. ], device=targets.device).float() * g # offsets
  453. for i in range(det.nl):
  454. anchors = det.anchors[i]
  455. gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  456. # Match targets to anchors
  457. t = targets * gain
  458. if nt:
  459. # Matches
  460. r = t[:, :, 4:6] / anchors[:, None] # wh ratio
  461. j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
  462. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
  463. t = t[j] # filter
  464. # Offsets
  465. gxy = t[:, 2:4] # grid xy
  466. gxi = gain[[2, 3]] - gxy # inverse
  467. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  468. l, m = ((gxi % 1. < g) & (gxi > 1.)).T
  469. j = torch.stack((torch.ones_like(j), j, k, l, m))
  470. t = t.repeat((5, 1, 1))[j]
  471. offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
  472. else:
  473. t = targets[0]
  474. offsets = 0
  475. # Define
  476. b, c = t[:, :2].long().T # image, class
  477. gxy = t[:, 2:4] # grid xy
  478. gwh = t[:, 4:6] # grid wh
  479. gij = (gxy - offsets).long()
  480. gi, gj = gij.T # grid xy indices
  481. # Append
  482. a = t[:, 6].long() # anchor indices
  483. indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
  484. tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
  485. anch.append(anchors[a]) # anchors
  486. tcls.append(c) # class
  487. return tcls, tbox, indices, anch
  488. def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False):
  489. """Performs Non-Maximum Suppression (NMS) on inference results
  490. Returns:
  491. detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
  492. """
  493. nc = prediction[0].shape[1] - 5 # number of classes
  494. xc = prediction[..., 4] > conf_thres # candidates
  495. # Settings
  496. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  497. max_det = 300 # maximum number of detections per image
  498. time_limit = 10.0 # seconds to quit after
  499. redundant = True # require redundant detections
  500. multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
  501. t = time.time()
  502. output = [None] * prediction.shape[0]
  503. for xi, x in enumerate(prediction): # image index, image inference
  504. # Apply constraints
  505. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  506. x = x[xc[xi]] # confidence
  507. # If none remain process next image
  508. if not x.shape[0]:
  509. continue
  510. # Compute conf
  511. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  512. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  513. box = xywh2xyxy(x[:, :4])
  514. # Detections matrix nx6 (xyxy, conf, cls)
  515. if multi_label:
  516. i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
  517. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  518. else: # best class only
  519. conf, j = x[:, 5:].max(1, keepdim=True)
  520. x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  521. # Filter by class
  522. if classes:
  523. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  524. # Apply finite constraint
  525. # if not torch.isfinite(x).all():
  526. # x = x[torch.isfinite(x).all(1)]
  527. # If none remain process next image
  528. n = x.shape[0] # number of boxes
  529. if not n:
  530. continue
  531. # Sort by confidence
  532. # x = x[x[:, 4].argsort(descending=True)]
  533. # Batched NMS
  534. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  535. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  536. i = torch.ops.torchvision.nms(boxes, scores, iou_thres)
  537. if i.shape[0] > max_det: # limit detections
  538. i = i[:max_det]
  539. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  540. try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  541. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  542. weights = iou * scores[None] # box weights
  543. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  544. if redundant:
  545. i = i[iou.sum(1) > 1] # require redundancy
  546. except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
  547. print(x, i, x.shape, i.shape)
  548. pass
  549. output[xi] = x[i]
  550. if (time.time() - t) > time_limit:
  551. break # time limit exceeded
  552. return output
  553. def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer()
  554. # Strip optimizer from 'f' to finalize training, optionally save as 's'
  555. x = torch.load(f, map_location=torch.device('cpu'))
  556. x['optimizer'] = None
  557. x['training_results'] = None
  558. x['epoch'] = -1
  559. x['model'].half() # to FP16
  560. for p in x['model'].parameters():
  561. p.requires_grad = False
  562. torch.save(x, s or f)
  563. mb = os.path.getsize(s or f) / 1E6 # filesize
  564. print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb))
  565. def coco_class_count(path='../coco/labels/train2014/'):
  566. # Histogram of occurrences per class
  567. nc = 80 # number classes
  568. x = np.zeros(nc, dtype='int32')
  569. files = sorted(glob.glob('%s/*.*' % path))
  570. for i, file in enumerate(files):
  571. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  572. x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
  573. print(i, len(files))
  574. def coco_only_people(path='../coco/labels/train2017/'): # from utils.general import *; coco_only_people()
  575. # Find images with only people
  576. files = sorted(glob.glob('%s/*.*' % path))
  577. for i, file in enumerate(files):
  578. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  579. if all(labels[:, 0] == 0):
  580. print(labels.shape[0], file)
  581. def crop_images_random(path='../images/', scale=0.50): # from utils.general import *; crop_images_random()
  582. # crops images into random squares up to scale fraction
  583. # WARNING: overwrites images!
  584. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  585. img = cv2.imread(file) # BGR
  586. if img is not None:
  587. h, w = img.shape[:2]
  588. # create random mask
  589. a = 30 # minimum size (pixels)
  590. mask_h = random.randint(a, int(max(a, h * scale))) # mask height
  591. mask_w = mask_h # mask width
  592. # box
  593. xmin = max(0, random.randint(0, w) - mask_w // 2)
  594. ymin = max(0, random.randint(0, h) - mask_h // 2)
  595. xmax = min(w, xmin + mask_w)
  596. ymax = min(h, ymin + mask_h)
  597. # apply random color mask
  598. cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
  599. def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
  600. # Makes single-class coco datasets. from utils.general import *; coco_single_class_labels()
  601. if os.path.exists('new/'):
  602. shutil.rmtree('new/') # delete output folder
  603. os.makedirs('new/') # make new output folder
  604. os.makedirs('new/labels/')
  605. os.makedirs('new/images/')
  606. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  607. with open(file, 'r') as f:
  608. labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
  609. i = labels[:, 0] == label_class
  610. if any(i):
  611. img_file = file.replace('labels', 'images').replace('txt', 'jpg')
  612. labels[:, 0] = 0 # reset class to 0
  613. with open('new/images.txt', 'a') as f: # add image to dataset list
  614. f.write(img_file + '\n')
  615. with open('new/labels/' + Path(file).name, 'a') as f: # write label
  616. for l in labels[i]:
  617. f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
  618. shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
  619. def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
  620. """ Creates kmeans-evolved anchors from training dataset
  621. Arguments:
  622. path: path to dataset *.yaml, or a loaded dataset
  623. n: number of anchors
  624. img_size: image size used for training
  625. thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
  626. gen: generations to evolve anchors using genetic algorithm
  627. Return:
  628. k: kmeans evolved anchors
  629. Usage:
  630. from utils.general import *; _ = kmean_anchors()
  631. """
  632. thr = 1. / thr
  633. def metric(k, wh): # compute metrics
  634. r = wh[:, None] / k[None]
  635. x = torch.min(r, 1. / r).min(2)[0] # ratio metric
  636. # x = wh_iou(wh, torch.tensor(k)) # iou metric
  637. return x, x.max(1)[0] # x, best_x
  638. def fitness(k): # mutation fitness
  639. _, best = metric(torch.tensor(k, dtype=torch.float32), wh)
  640. return (best * (best > thr).float()).mean() # fitness
  641. def print_results(k):
  642. k = k[np.argsort(k.prod(1))] # sort small to large
  643. x, best = metric(k, wh0)
  644. bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
  645. print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat))
  646. print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' %
  647. (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='')
  648. for i, x in enumerate(k):
  649. print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
  650. return k
  651. if isinstance(path, str): # *.yaml file
  652. with open(path) as f:
  653. data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
  654. from utils.datasets import LoadImagesAndLabels
  655. dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
  656. else:
  657. dataset = path # dataset
  658. # Get label wh
  659. shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
  660. wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
  661. # Filter
  662. i = (wh0 < 3.0).any(1).sum()
  663. if i:
  664. print('WARNING: Extremely small objects found. '
  665. '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0)))
  666. wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
  667. # Kmeans calculation
  668. print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
  669. s = wh.std(0) # sigmas for whitening
  670. k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
  671. k *= s
  672. wh = torch.tensor(wh, dtype=torch.float32) # filtered
  673. wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
  674. k = print_results(k)
  675. # Plot
  676. # k, d = [None] * 20, [None] * 20
  677. # for i in tqdm(range(1, 21)):
  678. # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
  679. # fig, ax = plt.subplots(1, 2, figsize=(14, 7))
  680. # ax = ax.ravel()
  681. # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
  682. # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
  683. # ax[0].hist(wh[wh[:, 0]<100, 0],400)
  684. # ax[1].hist(wh[wh[:, 1]<100, 1],400)
  685. # fig.tight_layout()
  686. # fig.savefig('wh.png', dpi=200)
  687. # Evolve
  688. npr = np.random
  689. f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
  690. pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
  691. for _ in pbar:
  692. v = np.ones(sh)
  693. while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
  694. v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
  695. kg = (k.copy() * v).clip(min=2.0)
  696. fg = fitness(kg)
  697. if fg > f:
  698. f, k = fg, kg.copy()
  699. pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f
  700. if verbose:
  701. print_results(k)
  702. return print_results(k)
  703. def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
  704. # Print mutation results to evolve.txt (for use with train.py --evolve)
  705. a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
  706. b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
  707. c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  708. print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
  709. if bucket:
  710. url = 'gs://%s/evolve.txt' % bucket
  711. if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
  712. os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
  713. with open('evolve.txt', 'a') as f: # append result
  714. f.write(c + b + '\n')
  715. x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
  716. x = x[np.argsort(-fitness(x))] # sort
  717. np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
  718. # Save yaml
  719. for i, k in enumerate(hyp.keys()):
  720. hyp[k] = float(x[0, i + 7])
  721. with open(yaml_file, 'w') as f:
  722. results = tuple(x[0, :7])
  723. c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  724. f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
  725. yaml.dump(hyp, f, sort_keys=False)
  726. if bucket:
  727. os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
  728. def apply_classifier(x, model, img, im0):
  729. # applies a second stage classifier to yolo outputs
  730. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  731. for i, d in enumerate(x): # per image
  732. if d is not None and len(d):
  733. d = d.clone()
  734. # Reshape and pad cutouts
  735. b = xyxy2xywh(d[:, :4]) # boxes
  736. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  737. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  738. d[:, :4] = xywh2xyxy(b).long()
  739. # Rescale boxes from img_size to im0 size
  740. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  741. # Classes
  742. pred_cls1 = d[:, 5].long()
  743. ims = []
  744. for j, a in enumerate(d): # per item
  745. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  746. im = cv2.resize(cutout, (224, 224)) # BGR
  747. # cv2.imwrite('test%i.jpg' % j, cutout)
  748. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  749. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  750. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  751. ims.append(im)
  752. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  753. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  754. return x
  755. def fitness(x):
  756. # Returns fitness (for use with results.txt or evolve.txt)
  757. w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
  758. return (x[:, :4] * w).sum(1)
  759. def output_to_target(output, width, height):
  760. # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
  761. if isinstance(output, torch.Tensor):
  762. output = output.cpu().numpy()
  763. targets = []
  764. for i, o in enumerate(output):
  765. if o is not None:
  766. for pred in o:
  767. box = pred[:4]
  768. w = (box[2] - box[0]) / width
  769. h = (box[3] - box[1]) / height
  770. x = box[0] / width + w / 2
  771. y = box[1] / height + h / 2
  772. conf = pred[4]
  773. cls = int(pred[5])
  774. targets.append([i, cls, x, y, w, h, conf])
  775. return np.array(targets)
  776. def increment_dir(dir, comment=''):
  777. # Increments a directory runs/exp1 --> runs/exp2_comment
  778. n = 0 # number
  779. dir = str(Path(dir)) # os-agnostic
  780. if os.path.isdir(dir):
  781. stem = ''
  782. dir += os.sep # removed by Path
  783. else:
  784. stem = Path(dir).stem
  785. dirs = sorted(glob.glob(dir + '*')) # directories
  786. if dirs:
  787. matches = [re.search(r"%s(\d+)" % stem, d) for d in dirs]
  788. idxs = [int(m.groups()[0]) for m in matches if m]
  789. if idxs:
  790. n = max(idxs) + 1 # increment
  791. return dir + str(n) + ('_' + comment if comment else '')
  792. # Plotting functions ---------------------------------------------------------------------------------------------------
  793. def hist2d(x, y, n=100):
  794. # 2d histogram used in labels.png and evolve.png
  795. xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
  796. hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
  797. xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
  798. yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
  799. return np.log(hist[xidx, yidx])
  800. def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  801. # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
  802. def butter_lowpass(cutoff, fs, order):
  803. nyq = 0.5 * fs
  804. normal_cutoff = cutoff / nyq
  805. b, a = butter(order, normal_cutoff, btype='low', analog=False)
  806. return b, a
  807. b, a = butter_lowpass(cutoff, fs, order=order)
  808. return filtfilt(b, a, data) # forward-backward filter
  809. def plot_one_box(x, img, color=None, label=None, line_thickness=None):
  810. # Plots one bounding box on image img
  811. tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
  812. color = color or [random.randint(0, 255) for _ in range(3)]
  813. c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
  814. cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
  815. if label:
  816. tf = max(tl - 1, 1) # font thickness
  817. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  818. c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
  819. cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
  820. cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
  821. def plot_wh_methods(): # from utils.general import *; plot_wh_methods()
  822. # Compares the two methods for width-height anchor multiplication
  823. # https://github.com/ultralytics/yolov3/issues/168
  824. x = np.arange(-4.0, 4.0, .1)
  825. ya = np.exp(x)
  826. yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
  827. fig = plt.figure(figsize=(6, 3), dpi=150)
  828. plt.plot(x, ya, '.-', label='YOLOv3')
  829. plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2')
  830. plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6')
  831. plt.xlim(left=-4, right=4)
  832. plt.ylim(bottom=0, top=6)
  833. plt.xlabel('input')
  834. plt.ylabel('output')
  835. plt.grid()
  836. plt.legend()
  837. fig.tight_layout()
  838. fig.savefig('comparison.png', dpi=200)
  839. def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
  840. tl = 3 # line thickness
  841. tf = max(tl - 1, 1) # font thickness
  842. if isinstance(images, torch.Tensor):
  843. images = images.cpu().float().numpy()
  844. if isinstance(targets, torch.Tensor):
  845. targets = targets.cpu().numpy()
  846. # un-normalise
  847. if np.max(images[0]) <= 1:
  848. images *= 255
  849. bs, _, h, w = images.shape # batch size, _, height, width
  850. bs = min(bs, max_subplots) # limit plot images
  851. ns = np.ceil(bs ** 0.5) # number of subplots (square)
  852. # Check if we should resize
  853. scale_factor = max_size / max(h, w)
  854. if scale_factor < 1:
  855. h = math.ceil(scale_factor * h)
  856. w = math.ceil(scale_factor * w)
  857. # Empty array for output
  858. mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
  859. # Fix class - colour map
  860. prop_cycle = plt.rcParams['axes.prop_cycle']
  861. # https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
  862. hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
  863. color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
  864. for i, img in enumerate(images):
  865. if i == max_subplots: # if last batch has fewer images than we expect
  866. break
  867. block_x = int(w * (i // ns))
  868. block_y = int(h * (i % ns))
  869. img = img.transpose(1, 2, 0)
  870. if scale_factor < 1:
  871. img = cv2.resize(img, (w, h))
  872. mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
  873. if len(targets) > 0:
  874. image_targets = targets[targets[:, 0] == i]
  875. boxes = xywh2xyxy(image_targets[:, 2:6]).T
  876. classes = image_targets[:, 1].astype('int')
  877. gt = image_targets.shape[1] == 6 # ground truth if no conf column
  878. conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
  879. boxes[[0, 2]] *= w
  880. boxes[[0, 2]] += block_x
  881. boxes[[1, 3]] *= h
  882. boxes[[1, 3]] += block_y
  883. for j, box in enumerate(boxes.T):
  884. cls = int(classes[j])
  885. color = color_lut[cls % len(color_lut)]
  886. cls = names[cls] if names else cls
  887. if gt or conf[j] > 0.3: # 0.3 conf thresh
  888. label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
  889. plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
  890. # Draw image filename labels
  891. if paths is not None:
  892. label = os.path.basename(paths[i])[:40] # trim to 40 char
  893. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  894. cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
  895. lineType=cv2.LINE_AA)
  896. # Image border
  897. cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
  898. if fname is not None:
  899. r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size
  900. mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
  901. # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
  902. Image.fromarray(mosaic).save(fname) # PIL save
  903. return mosaic
  904. def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
  905. # Plot LR simulating training for full epochs
  906. optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
  907. y = []
  908. for _ in range(epochs):
  909. scheduler.step()
  910. y.append(optimizer.param_groups[0]['lr'])
  911. plt.plot(y, '.-', label='LR')
  912. plt.xlabel('epoch')
  913. plt.ylabel('LR')
  914. plt.grid()
  915. plt.xlim(0, epochs)
  916. plt.ylim(0)
  917. plt.tight_layout()
  918. plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
  919. def plot_test_txt(): # from utils.general import *; plot_test()
  920. # Plot test.txt histograms
  921. x = np.loadtxt('test.txt', dtype=np.float32)
  922. box = xyxy2xywh(x[:, :4])
  923. cx, cy = box[:, 0], box[:, 1]
  924. fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
  925. ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
  926. ax.set_aspect('equal')
  927. plt.savefig('hist2d.png', dpi=300)
  928. fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
  929. ax[0].hist(cx, bins=600)
  930. ax[1].hist(cy, bins=600)
  931. plt.savefig('hist1d.png', dpi=200)
  932. def plot_targets_txt(): # from utils.general import *; plot_targets_txt()
  933. # Plot targets.txt histograms
  934. x = np.loadtxt('targets.txt', dtype=np.float32).T
  935. s = ['x targets', 'y targets', 'width targets', 'height targets']
  936. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  937. ax = ax.ravel()
  938. for i in range(4):
  939. ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
  940. ax[i].legend()
  941. ax[i].set_title(s[i])
  942. plt.savefig('targets.jpg', dpi=200)
  943. def plot_study_txt(f='study.txt', x=None): # from utils.general import *; plot_study_txt()
  944. # Plot study.txt generated by test.py
  945. fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
  946. ax = ax.ravel()
  947. fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
  948. for f in ['study/study_coco_yolov5%s.txt' % x for x in ['s', 'm', 'l', 'x']]:
  949. y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
  950. x = np.arange(y.shape[1]) if x is None else np.array(x)
  951. s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
  952. for i in range(7):
  953. ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
  954. ax[i].set_title(s[i])
  955. j = y[3].argmax() + 1
  956. ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
  957. label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
  958. ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
  959. 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
  960. ax2.grid()
  961. ax2.set_xlim(0, 30)
  962. ax2.set_ylim(28, 50)
  963. ax2.set_yticks(np.arange(30, 55, 5))
  964. ax2.set_xlabel('GPU Speed (ms/img)')
  965. ax2.set_ylabel('COCO AP val')
  966. ax2.legend(loc='lower right')
  967. plt.savefig('study_mAP_latency.png', dpi=300)
  968. plt.savefig(f.replace('.txt', '.png'), dpi=300)
  969. def plot_labels(labels, save_dir=''):
  970. # plot dataset labels
  971. c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
  972. nc = int(c.max() + 1) # number of classes
  973. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  974. ax = ax.ravel()
  975. ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
  976. ax[0].set_xlabel('classes')
  977. ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
  978. ax[1].set_xlabel('x')
  979. ax[1].set_ylabel('y')
  980. ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
  981. ax[2].set_xlabel('width')
  982. ax[2].set_ylabel('height')
  983. plt.savefig(Path(save_dir) / 'labels.png', dpi=200)
  984. plt.close()
  985. # seaborn correlogram
  986. try:
  987. import seaborn as sns
  988. import pandas as pd
  989. x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
  990. sns.pairplot(x, corner=True, diag_kind='hist', kind='scatter', markers='o',
  991. plot_kws=dict(s=3, edgecolor=None, linewidth=1, alpha=0.02),
  992. diag_kws=dict(bins=50))
  993. plt.savefig(Path(save_dir) / 'labels_correlogram.png', dpi=200)
  994. plt.close()
  995. except Exception as e:
  996. pass
  997. def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.general import *; plot_evolution()
  998. # Plot hyperparameter evolution results in evolve.txt
  999. with open(yaml_file) as f:
  1000. hyp = yaml.load(f, Loader=yaml.FullLoader)
  1001. x = np.loadtxt('evolve.txt', ndmin=2)
  1002. f = fitness(x)
  1003. # weights = (f - f.min()) ** 2 # for weighted results
  1004. plt.figure(figsize=(10, 12), tight_layout=True)
  1005. matplotlib.rc('font', **{'size': 8})
  1006. for i, (k, v) in enumerate(hyp.items()):
  1007. y = x[:, i + 7]
  1008. # mu = (y * weights).sum() / weights.sum() # best weighted result
  1009. mu = y[f.argmax()] # best single result
  1010. plt.subplot(6, 5, i + 1)
  1011. plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
  1012. plt.plot(mu, f.max(), 'k+', markersize=15)
  1013. plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
  1014. if i % 5 != 0:
  1015. plt.yticks([])
  1016. print('%15s: %.3g' % (k, mu))
  1017. plt.savefig('evolve.png', dpi=200)
  1018. print('\nPlot saved as evolve.png')
  1019. def plot_results_overlay(start=0, stop=0): # from utils.general import *; plot_results_overlay()
  1020. # Plot training 'results*.txt', overlaying train and val losses
  1021. s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
  1022. t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
  1023. for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
  1024. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  1025. n = results.shape[1] # number of rows
  1026. x = range(start, min(stop, n) if stop else n)
  1027. fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
  1028. ax = ax.ravel()
  1029. for i in range(5):
  1030. for j in [i, i + 5]:
  1031. y = results[j, x]
  1032. ax[i].plot(x, y, marker='.', label=s[j])
  1033. # y_smooth = butter_lowpass_filtfilt(y)
  1034. # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
  1035. ax[i].set_title(t[i])
  1036. ax[i].legend()
  1037. ax[i].set_ylabel(f) if i == 0 else None # add filename
  1038. fig.savefig(f.replace('.txt', '.png'), dpi=200)
  1039. def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
  1040. # from utils.general import *; plot_results(save_dir='runs/train/exp0')
  1041. # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
  1042. fig, ax = plt.subplots(2, 5, figsize=(12, 6))
  1043. ax = ax.ravel()
  1044. s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',
  1045. 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
  1046. if bucket:
  1047. # os.system('rm -rf storage.googleapis.com')
  1048. # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
  1049. files = ['results%g.txt' % x for x in id]
  1050. c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)
  1051. os.system(c)
  1052. else:
  1053. files = glob.glob(str(Path(save_dir) / 'results*.txt')) + glob.glob('../../Downloads/results*.txt')
  1054. assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)
  1055. for fi, f in enumerate(files):
  1056. try:
  1057. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  1058. n = results.shape[1] # number of rows
  1059. x = range(start, min(stop, n) if stop else n)
  1060. for i in range(10):
  1061. y = results[i, x]
  1062. if i in [0, 1, 2, 5, 6, 7]:
  1063. y[y == 0] = np.nan # don't show zero loss values
  1064. # y /= y[0] # normalize
  1065. label = labels[fi] if len(labels) else Path(f).stem
  1066. ax[i].plot(x, y, marker='.', label=label, linewidth=1, markersize=6)
  1067. ax[i].set_title(s[i])
  1068. # if i in [5, 6, 7]: # share train and val loss y axes
  1069. # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
  1070. except Exception as e:
  1071. print('Warning: Plotting error for %s; %s' % (f, e))
  1072. fig.tight_layout()
  1073. ax[1].legend()
  1074. fig.savefig(Path(save_dir) / 'results.png', dpi=200)