You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1169 lines
47KB

  1. import glob
  2. import math
  3. import os
  4. import random
  5. import shutil
  6. import subprocess
  7. import time
  8. from copy import copy
  9. from pathlib import Path
  10. from sys import platform
  11. import cv2
  12. import matplotlib
  13. import matplotlib.pyplot as plt
  14. import numpy as np
  15. import torch
  16. import torch.nn as nn
  17. import torchvision
  18. from scipy.signal import butter, filtfilt
  19. from tqdm import tqdm
  20. from . import torch_utils, google_utils #  torch_utils, google_utils
  21. # Set printoptions
  22. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  23. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  24. matplotlib.rc('font', **{'size': 11})
  25. # Prevent OpenCV from multithreading (to use PyTorch DataLoader)
  26. cv2.setNumThreads(0)
  27. def init_seeds(seed=0):
  28. random.seed(seed)
  29. np.random.seed(seed)
  30. torch_utils.init_seeds(seed=seed)
  31. def check_git_status():
  32. # Suggest 'git pull' if repo is out of date
  33. if platform in ['linux', 'darwin']:
  34. s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
  35. if 'Your branch is behind' in s:
  36. print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
  37. def check_img_size(img_size, s=32):
  38. # Verify img_size is a multiple of stride s
  39. if img_size % s != 0:
  40. print('WARNING: --img-size %g must be multiple of max stride %g' % (img_size, s))
  41. return make_divisible(img_size, s) # nearest gs-multiple
  42. def check_best_possible_recall(dataset, anchors, thr):
  43. # Check best possible recall of dataset with current anchors
  44. wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(dataset.shapes, dataset.labels)])).float() # wh
  45. ratio = wh[:, None] / anchors.view(-1, 2).cpu()[None] # ratio
  46. m = torch.max(ratio, 1. / ratio).max(2)[0] # max ratio
  47. bpr = (m.min(1)[0] < thr).float().mean() # best possible recall
  48. mr = (m < thr).float().mean() # match ratio
  49. print(('Label width-height:' + '%10s' * 6) % ('n', 'mean', 'min', 'max', 'matching', 'recall'))
  50. print((' ' + '%10.4g' * 6) % (wh.shape[0], wh.mean(), wh.min(), wh.max(), mr, bpr))
  51. assert bpr > 0.9, 'Best possible recall %.3g (BPR) below 0.9 threshold. Training cancelled. ' \
  52. 'Compute new anchors with utils.utils.kmeans_anchors() and update model before training.' % bpr
  53. def check_file(file):
  54. # Searches for file if not found locally
  55. if os.path.isfile(file):
  56. return file
  57. else:
  58. files = glob.glob('./**/' + file, recursive=True) # find file
  59. assert len(files), 'File Not Found: %s' % file # assert file was found
  60. return files[0] # return first file if multiple found
  61. def make_divisible(x, divisor):
  62. # Returns x evenly divisble by divisor
  63. return math.ceil(x / divisor) * divisor
  64. def labels_to_class_weights(labels, nc=80):
  65. # Get class weights (inverse frequency) from training labels
  66. if labels[0] is None: # no labels loaded
  67. return torch.Tensor()
  68. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  69. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  70. weights = np.bincount(classes, minlength=nc) # occurences per class
  71. # Prepend gridpoint count (for uCE trianing)
  72. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  73. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  74. weights[weights == 0] = 1 # replace empty bins with 1
  75. weights = 1 / weights # number of targets per class
  76. weights /= weights.sum() # normalize
  77. return torch.from_numpy(weights)
  78. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  79. # Produces image weights based on class mAPs
  80. n = len(labels)
  81. class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
  82. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  83. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  84. return image_weights
  85. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  86. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  87. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  88. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  89. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  90. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  91. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  92. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  93. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  94. return x
  95. def xyxy2xywh(x):
  96. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  97. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  98. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  99. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  100. y[:, 2] = x[:, 2] - x[:, 0] # width
  101. y[:, 3] = x[:, 3] - x[:, 1] # height
  102. return y
  103. def xywh2xyxy(x):
  104. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  105. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  106. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  107. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  108. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  109. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  110. return y
  111. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  112. # Rescale coords (xyxy) from img1_shape to img0_shape
  113. if ratio_pad is None: # calculate from img0_shape
  114. gain = max(img1_shape) / max(img0_shape) # gain = old / new
  115. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  116. else:
  117. gain = ratio_pad[0][0]
  118. pad = ratio_pad[1]
  119. coords[:, [0, 2]] -= pad[0] # x padding
  120. coords[:, [1, 3]] -= pad[1] # y padding
  121. coords[:, :4] /= gain
  122. clip_coords(coords, img0_shape)
  123. return coords
  124. def clip_coords(boxes, img_shape):
  125. # Clip bounding xyxy bounding boxes to image shape (height, width)
  126. boxes[:, 0].clamp_(0, img_shape[1]) # x1
  127. boxes[:, 1].clamp_(0, img_shape[0]) # y1
  128. boxes[:, 2].clamp_(0, img_shape[1]) # x2
  129. boxes[:, 3].clamp_(0, img_shape[0]) # y2
  130. def ap_per_class(tp, conf, pred_cls, target_cls):
  131. """ Compute the average precision, given the recall and precision curves.
  132. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
  133. # Arguments
  134. tp: True positives (nparray, nx1 or nx10).
  135. conf: Objectness value from 0-1 (nparray).
  136. pred_cls: Predicted object classes (nparray).
  137. target_cls: True object classes (nparray).
  138. # Returns
  139. The average precision as computed in py-faster-rcnn.
  140. """
  141. # Sort by objectness
  142. i = np.argsort(-conf)
  143. tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
  144. # Find unique classes
  145. unique_classes = np.unique(target_cls)
  146. # Create Precision-Recall curve and compute AP for each class
  147. pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
  148. s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
  149. ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
  150. for ci, c in enumerate(unique_classes):
  151. i = pred_cls == c
  152. n_gt = (target_cls == c).sum() # Number of ground truth objects
  153. n_p = i.sum() # Number of predicted objects
  154. if n_p == 0 or n_gt == 0:
  155. continue
  156. else:
  157. # Accumulate FPs and TPs
  158. fpc = (1 - tp[i]).cumsum(0)
  159. tpc = tp[i].cumsum(0)
  160. # Recall
  161. recall = tpc / (n_gt + 1e-16) # recall curve
  162. r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
  163. # Precision
  164. precision = tpc / (tpc + fpc) # precision curve
  165. p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
  166. # AP from recall-precision curve
  167. for j in range(tp.shape[1]):
  168. ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
  169. # Plot
  170. # fig, ax = plt.subplots(1, 1, figsize=(5, 5))
  171. # ax.plot(recall, precision)
  172. # ax.set_xlabel('Recall')
  173. # ax.set_ylabel('Precision')
  174. # ax.set_xlim(0, 1.01)
  175. # ax.set_ylim(0, 1.01)
  176. # fig.tight_layout()
  177. # fig.savefig('PR_curve.png', dpi=300)
  178. # Compute F1 score (harmonic mean of precision and recall)
  179. f1 = 2 * p * r / (p + r + 1e-16)
  180. return p, r, ap, f1, unique_classes.astype('int32')
  181. def compute_ap(recall, precision):
  182. """ Compute the average precision, given the recall and precision curves.
  183. Source: https://github.com/rbgirshick/py-faster-rcnn.
  184. # Arguments
  185. recall: The recall curve (list).
  186. precision: The precision curve (list).
  187. # Returns
  188. The average precision as computed in py-faster-rcnn.
  189. """
  190. # Append sentinel values to beginning and end
  191. mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
  192. mpre = np.concatenate(([0.], precision, [0.]))
  193. # Compute the precision envelope
  194. mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
  195. # Integrate area under curve
  196. method = 'interp' # methods: 'continuous', 'interp'
  197. if method == 'interp':
  198. x = np.linspace(0, 1, 101) # 101-point interp (COCO)
  199. ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
  200. else: # 'continuous'
  201. i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
  202. ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
  203. return ap
  204. def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
  205. # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
  206. box2 = box2.t()
  207. # Get the coordinates of bounding boxes
  208. if x1y1x2y2: # x1, y1, x2, y2 = box1
  209. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  210. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  211. else: # transform from xywh to xyxy
  212. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  213. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  214. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
  215. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
  216. # Intersection area
  217. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  218. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  219. # Union Area
  220. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
  221. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
  222. union = (w1 * h1 + 1e-16) + w2 * h2 - inter
  223. iou = inter / union # iou
  224. if GIoU or DIoU or CIoU:
  225. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  226. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  227. if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
  228. c_area = cw * ch + 1e-16 # convex area
  229. return iou - (c_area - union) / c_area # GIoU
  230. if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  231. # convex diagonal squared
  232. c2 = cw ** 2 + ch ** 2 + 1e-16
  233. # centerpoint distance squared
  234. rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
  235. if DIoU:
  236. return iou - rho2 / c2 # DIoU
  237. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  238. v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  239. with torch.no_grad():
  240. alpha = v / (1 - iou + v)
  241. return iou - (rho2 / c2 + v * alpha) # CIoU
  242. return iou
  243. def box_iou(box1, box2):
  244. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  245. """
  246. Return intersection-over-union (Jaccard index) of boxes.
  247. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  248. Arguments:
  249. box1 (Tensor[N, 4])
  250. box2 (Tensor[M, 4])
  251. Returns:
  252. iou (Tensor[N, M]): the NxM matrix containing the pairwise
  253. IoU values for every element in boxes1 and boxes2
  254. """
  255. def box_area(box):
  256. # box = 4xn
  257. return (box[2] - box[0]) * (box[3] - box[1])
  258. area1 = box_area(box1.t())
  259. area2 = box_area(box2.t())
  260. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  261. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  262. return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
  263. def wh_iou(wh1, wh2):
  264. # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
  265. wh1 = wh1[:, None] # [N,1,2]
  266. wh2 = wh2[None] # [1,M,2]
  267. inter = torch.min(wh1, wh2).prod(2) # [N,M]
  268. return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
  269. class FocalLoss(nn.Module):
  270. # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
  271. def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
  272. super(FocalLoss, self).__init__()
  273. self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
  274. self.gamma = gamma
  275. self.alpha = alpha
  276. self.reduction = loss_fcn.reduction
  277. self.loss_fcn.reduction = 'none' # required to apply FL to each element
  278. def forward(self, pred, true):
  279. loss = self.loss_fcn(pred, true)
  280. # p_t = torch.exp(-loss)
  281. # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
  282. # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
  283. pred_prob = torch.sigmoid(pred) # prob from logits
  284. p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
  285. alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
  286. modulating_factor = (1.0 - p_t) ** self.gamma
  287. loss *= alpha_factor * modulating_factor
  288. if self.reduction == 'mean':
  289. return loss.mean()
  290. elif self.reduction == 'sum':
  291. return loss.sum()
  292. else: # 'none'
  293. return loss
  294. def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
  295. # return positive, negative label smoothing BCE targets
  296. return 1.0 - 0.5 * eps, 0.5 * eps
  297. class BCEBlurWithLogitsLoss(nn.Module):
  298. # BCEwithLogitLoss() with reduced missing label effects.
  299. def __init__(self, alpha=0.05):
  300. super(BCEBlurWithLogitsLoss, self).__init__()
  301. self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
  302. self.alpha = alpha
  303. def forward(self, pred, true):
  304. loss = self.loss_fcn(pred, true)
  305. pred = torch.sigmoid(pred) # prob from logits
  306. dx = pred - true # reduce only missing label effects
  307. # dx = (pred - true).abs() # reduce missing label and false label effects
  308. alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
  309. loss *= alpha_factor
  310. return loss.mean()
  311. def compute_loss(p, targets, model): # predictions, targets, model
  312. ft = torch.cuda.FloatTensor if p[0].is_cuda else torch.Tensor
  313. lcls, lbox, lobj = ft([0]), ft([0]), ft([0])
  314. tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
  315. h = model.hyp # hyperparameters
  316. red = 'mean' # Loss reduction (sum or mean)
  317. # Define criteria
  318. BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)
  319. BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)
  320. # class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
  321. cp, cn = smooth_BCE(eps=0.0)
  322. # focal loss
  323. g = h['fl_gamma'] # focal loss gamma
  324. if g > 0:
  325. BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
  326. # per output
  327. nt = 0 # targets
  328. for i, pi in enumerate(p): # layer index, layer predictions
  329. b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
  330. tobj = torch.zeros_like(pi[..., 0]) # target obj
  331. nb = b.shape[0] # number of targets
  332. if nb:
  333. nt += nb # cumulative targets
  334. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
  335. # GIoU
  336. pxy = ps[:, :2].sigmoid() * 2. - 0.5
  337. pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
  338. pbox = torch.cat((pxy, pwh), 1) # predicted box
  339. giou = bbox_iou(pbox.t(), tbox[i], x1y1x2y2=False, GIoU=True) # giou(prediction, target)
  340. lbox += (1.0 - giou).sum() if red == 'sum' else (1.0 - giou).mean() # giou loss
  341. # Obj
  342. tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) # giou ratio
  343. # Class
  344. if model.nc > 1: # cls loss (only if multiple classes)
  345. t = torch.full_like(ps[:, 5:], cn) # targets
  346. t[range(nb), tcls[i]] = cp
  347. lcls += BCEcls(ps[:, 5:], t) # BCE
  348. # Append targets to text file
  349. # with open('targets.txt', 'a') as file:
  350. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
  351. lobj += BCEobj(pi[..., 4], tobj) # obj loss
  352. lbox *= h['giou']
  353. lobj *= h['obj']
  354. lcls *= h['cls']
  355. bs = tobj.shape[0] # batch size
  356. if red == 'sum':
  357. g = 3.0 # loss gain
  358. lobj *= g / bs
  359. if nt:
  360. lcls *= g / nt / model.nc
  361. lbox *= g / nt
  362. loss = lbox + lobj + lcls
  363. return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
  364. def build_targets(p, targets, model):
  365. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  366. det = model.module.model[-1] if type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) \
  367. else model.model[-1] # Detect() module
  368. na, nt = det.na, targets.shape[0] # number of anchors, targets
  369. tcls, tbox, indices, anch = [], [], [], []
  370. gain = torch.ones(6, device=targets.device) # normalized to gridspace gain
  371. off = torch.tensor([[1, 0], [0, 1], [-1, 0], [0, -1]], device=targets.device).float() # overlap offsets
  372. at = torch.arange(na).view(na, 1).repeat(1, nt) # anchor tensor, same as .repeat_interleave(nt)
  373. style = 'rect4'
  374. for i in range(det.nl):
  375. anchors = det.anchors[i]
  376. gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  377. # Match targets to anchors
  378. a, t, offsets = [], targets * gain, 0
  379. if nt:
  380. r = t[None, :, 4:6] / anchors[:, None] # wh ratio
  381. j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
  382. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
  383. a, t = at[j], t.repeat(na, 1, 1)[j] # filter
  384. # overlaps
  385. gxy = t[:, 2:4] # grid xy
  386. z = torch.zeros_like(gxy)
  387. if style == 'rect2':
  388. g = 0.2 # offset
  389. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  390. a, t = torch.cat((a, a[j], a[k]), 0), torch.cat((t, t[j], t[k]), 0)
  391. offsets = torch.cat((z, z[j] + off[0], z[k] + off[1]), 0) * g
  392. elif style == 'rect4':
  393. g = 0.5 # offset
  394. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  395. l, m = ((gxy % 1. > (1 - g)) & (gxy < (gain[[2, 3]] - 1.))).T
  396. a, t = torch.cat((a, a[j], a[k], a[l], a[m]), 0), torch.cat((t, t[j], t[k], t[l], t[m]), 0)
  397. offsets = torch.cat((z, z[j] + off[0], z[k] + off[1], z[l] + off[2], z[m] + off[3]), 0) * g
  398. # Define
  399. b, c = t[:, :2].long().T # image, class
  400. gxy = t[:, 2:4] # grid xy
  401. gwh = t[:, 4:6] # grid wh
  402. gij = (gxy - offsets).long()
  403. gi, gj = gij.T # grid xy indices
  404. # Append
  405. indices.append((b, a, gj, gi)) # image, anchor, grid indices
  406. tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
  407. anch.append(anchors[a]) # anchors
  408. tcls.append(c) # class
  409. return tcls, tbox, indices, anch
  410. def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, fast=False, classes=None, agnostic=False):
  411. """
  412. Performs Non-Maximum Suppression on inference results
  413. Returns detections with shape:
  414. nx6 (x1, y1, x2, y2, conf, cls)
  415. """
  416. if prediction.dtype is torch.float16:
  417. prediction = prediction.float() # to FP32
  418. nc = prediction[0].shape[1] - 5 # number of classes
  419. xc = prediction[..., 4] > conf_thres # candidates
  420. # Settings
  421. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  422. max_det = 300 # maximum number of detections per image
  423. time_limit = 10.0 # seconds to quit after
  424. redundant = True # require redundant detections
  425. fast |= conf_thres > 0.001 # fast mode
  426. if fast:
  427. merge = False
  428. multi_label = False
  429. else:
  430. merge = True # merge for best mAP (adds 0.5ms/img)
  431. multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
  432. t = time.time()
  433. output = [None] * prediction.shape[0]
  434. for xi, x in enumerate(prediction): # image index, image inference
  435. # Apply constraints
  436. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  437. x = x[xc[xi]] # confidence
  438. # If none remain process next image
  439. if not x.shape[0]:
  440. continue
  441. # Compute conf
  442. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  443. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  444. box = xywh2xyxy(x[:, :4])
  445. # Detections matrix nx6 (xyxy, conf, cls)
  446. if multi_label:
  447. i, j = (x[:, 5:] > conf_thres).nonzero().t()
  448. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  449. else: # best class only
  450. conf, j = x[:, 5:].max(1, keepdim=True)
  451. x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  452. # Filter by class
  453. if classes:
  454. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  455. # Apply finite constraint
  456. # if not torch.isfinite(x).all():
  457. # x = x[torch.isfinite(x).all(1)]
  458. # If none remain process next image
  459. n = x.shape[0] # number of boxes
  460. if not n:
  461. continue
  462. # Sort by confidence
  463. # x = x[x[:, 4].argsort(descending=True)]
  464. # Batched NMS
  465. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  466. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  467. i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
  468. if i.shape[0] > max_det: # limit detections
  469. i = i[:max_det]
  470. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  471. try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  472. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  473. weights = iou * scores[None] # box weights
  474. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  475. if redundant:
  476. i = i[iou.sum(1) > 1] # require redundancy
  477. except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
  478. print(x, i, x.shape, i.shape)
  479. pass
  480. output[xi] = x[i]
  481. if (time.time() - t) > time_limit:
  482. break # time limit exceeded
  483. return output
  484. def strip_optimizer(f='weights/best.pt'): # from utils.utils import *; strip_optimizer()
  485. # Strip optimizer from *.pt files for lighter files (reduced by 1/2 size)
  486. x = torch.load(f, map_location=torch.device('cpu'))
  487. x['optimizer'] = None
  488. torch.save(x, f)
  489. print('Optimizer stripped from %s' % f)
  490. def create_backbone(f='weights/best.pt', s='weights/backbone.pt'): # from utils.utils import *; create_backbone()
  491. # create backbone 's' from 'f'
  492. device = torch.device('cpu')
  493. x = torch.load(f, map_location=device)
  494. torch.save(x, s) # update model if SourceChangeWarning
  495. x = torch.load(s, map_location=device)
  496. x['optimizer'] = None
  497. x['training_results'] = None
  498. x['epoch'] = -1
  499. for p in x['model'].parameters():
  500. p.requires_grad = True
  501. torch.save(x, s)
  502. print('%s modified for backbone use and saved as %s' % (f, s))
  503. def coco_class_count(path='../coco/labels/train2014/'):
  504. # Histogram of occurrences per class
  505. nc = 80 # number classes
  506. x = np.zeros(nc, dtype='int32')
  507. files = sorted(glob.glob('%s/*.*' % path))
  508. for i, file in enumerate(files):
  509. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  510. x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
  511. print(i, len(files))
  512. def coco_only_people(path='../coco/labels/train2017/'): # from utils.utils import *; coco_only_people()
  513. # Find images with only people
  514. files = sorted(glob.glob('%s/*.*' % path))
  515. for i, file in enumerate(files):
  516. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  517. if all(labels[:, 0] == 0):
  518. print(labels.shape[0], file)
  519. def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
  520. # crops images into random squares up to scale fraction
  521. # WARNING: overwrites images!
  522. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  523. img = cv2.imread(file) # BGR
  524. if img is not None:
  525. h, w = img.shape[:2]
  526. # create random mask
  527. a = 30 # minimum size (pixels)
  528. mask_h = random.randint(a, int(max(a, h * scale))) # mask height
  529. mask_w = mask_h # mask width
  530. # box
  531. xmin = max(0, random.randint(0, w) - mask_w // 2)
  532. ymin = max(0, random.randint(0, h) - mask_h // 2)
  533. xmax = min(w, xmin + mask_w)
  534. ymax = min(h, ymin + mask_h)
  535. # apply random color mask
  536. cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
  537. def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
  538. # Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
  539. if os.path.exists('new/'):
  540. shutil.rmtree('new/') # delete output folder
  541. os.makedirs('new/') # make new output folder
  542. os.makedirs('new/labels/')
  543. os.makedirs('new/images/')
  544. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  545. with open(file, 'r') as f:
  546. labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
  547. i = labels[:, 0] == label_class
  548. if any(i):
  549. img_file = file.replace('labels', 'images').replace('txt', 'jpg')
  550. labels[:, 0] = 0 # reset class to 0
  551. with open('new/images.txt', 'a') as f: # add image to dataset list
  552. f.write(img_file + '\n')
  553. with open('new/labels/' + Path(file).name, 'a') as f: # write label
  554. for l in labels[i]:
  555. f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
  556. shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
  557. def kmean_anchors(path='./data/coco128.txt', n=9, img_size=(640, 640), thr=0.20, gen=1000):
  558. # Creates kmeans anchors for use in *.cfg files: from utils.utils import *; _ = kmean_anchors()
  559. # n: number of anchors
  560. # img_size: (min, max) image size used for multi-scale training (can be same values)
  561. # thr: IoU threshold hyperparameter used for training (0.0 - 1.0)
  562. # gen: generations to evolve anchors using genetic algorithm
  563. from utils.datasets import LoadImagesAndLabels
  564. def print_results(k):
  565. k = k[np.argsort(k.prod(1))] # sort small to large
  566. iou = wh_iou(wh, torch.Tensor(k))
  567. max_iou = iou.max(1)[0]
  568. bpr, aat = (max_iou > thr).float().mean(), (iou > thr).float().mean() * n # best possible recall, anch > thr
  569. # thr = 5.0
  570. # r = wh[:, None] / k[None]
  571. # ar = torch.max(r, 1. / r).max(2)[0]
  572. # max_ar = ar.min(1)[0]
  573. # bpr, aat = (max_ar < thr).float().mean(), (ar < thr).float().mean() * n # best possible recall, anch > thr
  574. print('%.2f iou_thr: %.3f best possible recall, %.2f anchors > thr' % (thr, bpr, aat))
  575. print('n=%g, img_size=%s, IoU_all=%.3f/%.3f-mean/best, IoU>thr=%.3f-mean: ' %
  576. (n, img_size, iou.mean(), max_iou.mean(), iou[iou > thr].mean()), end='')
  577. for i, x in enumerate(k):
  578. print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
  579. return k
  580. def fitness(k): # mutation fitness
  581. iou = wh_iou(wh, torch.Tensor(k)) # iou
  582. max_iou = iou.max(1)[0]
  583. return (max_iou * (max_iou > thr).float()).mean() # product
  584. # def fitness_ratio(k): # mutation fitness
  585. # # wh(5316,2), k(9,2)
  586. # r = wh[:, None] / k[None]
  587. # x = torch.max(r, 1. / r).max(2)[0]
  588. # m = x.min(1)[0]
  589. # return 1. / (m * (m < 5).float()).mean() # product
  590. # Get label wh
  591. wh = []
  592. dataset = LoadImagesAndLabels(path, augment=True, rect=True)
  593. nr = 1 if img_size[0] == img_size[1] else 3 # number augmentation repetitions
  594. for s, l in zip(dataset.shapes, dataset.labels):
  595. # wh.append(l[:, 3:5] * (s / s.max())) # image normalized to letterbox normalized wh
  596. wh.append(l[:, 3:5] * s) # image normalized to pixels
  597. wh = np.concatenate(wh, 0).repeat(nr, axis=0) # augment 3x
  598. # wh *= np.random.uniform(img_size[0], img_size[1], size=(wh.shape[0], 1)) # normalized to pixels (multi-scale)
  599. wh = wh[(wh > 2.0).all(1)] # remove below threshold boxes (< 2 pixels wh)
  600. # Kmeans calculation
  601. from scipy.cluster.vq import kmeans
  602. print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
  603. s = wh.std(0) # sigmas for whitening
  604. k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
  605. k *= s
  606. wh = torch.Tensor(wh)
  607. k = print_results(k)
  608. # # Plot
  609. # k, d = [None] * 20, [None] * 20
  610. # for i in tqdm(range(1, 21)):
  611. # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
  612. # fig, ax = plt.subplots(1, 2, figsize=(14, 7))
  613. # ax = ax.ravel()
  614. # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
  615. # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
  616. # ax[0].hist(wh[wh[:, 0]<100, 0],400)
  617. # ax[1].hist(wh[wh[:, 1]<100, 1],400)
  618. # fig.tight_layout()
  619. # fig.savefig('wh.png', dpi=200)
  620. # Evolve
  621. npr = np.random
  622. f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
  623. for _ in tqdm(range(gen), desc='Evolving anchors'):
  624. v = np.ones(sh)
  625. while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
  626. v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
  627. kg = (k.copy() * v).clip(min=2.0)
  628. fg = fitness(kg)
  629. if fg > f:
  630. f, k = fg, kg.copy()
  631. print_results(k)
  632. k = print_results(k)
  633. return k
  634. def print_mutation(hyp, results, bucket=''):
  635. # Print mutation results to evolve.txt (for use with train.py --evolve)
  636. a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
  637. b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
  638. c = '%10.4g' * len(results) % results # results (P, R, mAP, F1, test_loss)
  639. print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
  640. if bucket:
  641. os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
  642. with open('evolve.txt', 'a') as f: # append result
  643. f.write(c + b + '\n')
  644. x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
  645. np.savetxt('evolve.txt', x[np.argsort(-fitness(x))], '%10.3g') # save sort by fitness
  646. if bucket:
  647. os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
  648. def apply_classifier(x, model, img, im0):
  649. # applies a second stage classifier to yolo outputs
  650. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  651. for i, d in enumerate(x): # per image
  652. if d is not None and len(d):
  653. d = d.clone()
  654. # Reshape and pad cutouts
  655. b = xyxy2xywh(d[:, :4]) # boxes
  656. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  657. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  658. d[:, :4] = xywh2xyxy(b).long()
  659. # Rescale boxes from img_size to im0 size
  660. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  661. # Classes
  662. pred_cls1 = d[:, 5].long()
  663. ims = []
  664. for j, a in enumerate(d): # per item
  665. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  666. im = cv2.resize(cutout, (224, 224)) # BGR
  667. # cv2.imwrite('test%i.jpg' % j, cutout)
  668. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  669. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  670. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  671. ims.append(im)
  672. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  673. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  674. return x
  675. def fitness(x):
  676. # Returns fitness (for use with results.txt or evolve.txt)
  677. w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
  678. return (x[:, :4] * w).sum(1)
  679. def output_to_target(output, width, height):
  680. """
  681. Convert a YOLO model output to target format
  682. [batch_id, class_id, x, y, w, h, conf]
  683. """
  684. if isinstance(output, torch.Tensor):
  685. output = output.cpu().numpy()
  686. targets = []
  687. for i, o in enumerate(output):
  688. if o is not None:
  689. for pred in o:
  690. box = pred[:4]
  691. w = (box[2] - box[0]) / width
  692. h = (box[3] - box[1]) / height
  693. x = box[0] / width + w / 2
  694. y = box[1] / height + h / 2
  695. conf = pred[4]
  696. cls = int(pred[5])
  697. targets.append([i, cls, x, y, w, h, conf])
  698. return np.array(targets)
  699. # Plotting functions ---------------------------------------------------------------------------------------------------
  700. def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  701. # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
  702. def butter_lowpass(cutoff, fs, order):
  703. nyq = 0.5 * fs
  704. normal_cutoff = cutoff / nyq
  705. b, a = butter(order, normal_cutoff, btype='low', analog=False)
  706. return b, a
  707. b, a = butter_lowpass(cutoff, fs, order=order)
  708. return filtfilt(b, a, data) # forward-backward filter
  709. def plot_one_box(x, img, color=None, label=None, line_thickness=None):
  710. # Plots one bounding box on image img
  711. tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
  712. color = color or [random.randint(0, 255) for _ in range(3)]
  713. c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
  714. cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
  715. if label:
  716. tf = max(tl - 1, 1) # font thickness
  717. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  718. c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
  719. cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
  720. cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
  721. def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
  722. # Compares the two methods for width-height anchor multiplication
  723. # https://github.com/ultralytics/yolov3/issues/168
  724. x = np.arange(-4.0, 4.0, .1)
  725. ya = np.exp(x)
  726. yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
  727. fig = plt.figure(figsize=(6, 3), dpi=150)
  728. plt.plot(x, ya, '.-', label='yolo method')
  729. plt.plot(x, yb ** 2, '.-', label='^2 power method')
  730. plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
  731. plt.xlim(left=-4, right=4)
  732. plt.ylim(bottom=0, top=6)
  733. plt.xlabel('input')
  734. plt.ylabel('output')
  735. plt.legend()
  736. fig.tight_layout()
  737. fig.savefig('comparison.png', dpi=200)
  738. def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
  739. tl = 3 # line thickness
  740. tf = max(tl - 1, 1) # font thickness
  741. if os.path.isfile(fname): # do not overwrite
  742. return None
  743. if isinstance(images, torch.Tensor):
  744. images = images.cpu().float().numpy()
  745. if isinstance(targets, torch.Tensor):
  746. targets = targets.cpu().numpy()
  747. # un-normalise
  748. if np.max(images[0]) <= 1:
  749. images *= 255
  750. bs, _, h, w = images.shape # batch size, _, height, width
  751. bs = min(bs, max_subplots) # limit plot images
  752. ns = np.ceil(bs ** 0.5) # number of subplots (square)
  753. # Check if we should resize
  754. scale_factor = max_size / max(h, w)
  755. if scale_factor < 1:
  756. h = math.ceil(scale_factor * h)
  757. w = math.ceil(scale_factor * w)
  758. # Empty array for output
  759. mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
  760. # Fix class - colour map
  761. prop_cycle = plt.rcParams['axes.prop_cycle']
  762. # https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
  763. hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
  764. color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
  765. for i, img in enumerate(images):
  766. if i == max_subplots: # if last batch has fewer images than we expect
  767. break
  768. block_x = int(w * (i // ns))
  769. block_y = int(h * (i % ns))
  770. img = img.transpose(1, 2, 0)
  771. if scale_factor < 1:
  772. img = cv2.resize(img, (w, h))
  773. mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
  774. if len(targets) > 0:
  775. image_targets = targets[targets[:, 0] == i]
  776. boxes = xywh2xyxy(image_targets[:, 2:6]).T
  777. classes = image_targets[:, 1].astype('int')
  778. gt = image_targets.shape[1] == 6 # ground truth if no conf column
  779. conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
  780. boxes[[0, 2]] *= w
  781. boxes[[0, 2]] += block_x
  782. boxes[[1, 3]] *= h
  783. boxes[[1, 3]] += block_y
  784. for j, box in enumerate(boxes.T):
  785. cls = int(classes[j])
  786. color = color_lut[cls % len(color_lut)]
  787. cls = names[cls] if names else cls
  788. if gt or conf[j] > 0.3: # 0.3 conf thresh
  789. label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
  790. plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
  791. # Draw image filename labels
  792. if paths is not None:
  793. label = os.path.basename(paths[i])[:40] # trim to 40 char
  794. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  795. cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
  796. lineType=cv2.LINE_AA)
  797. # Image border
  798. cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
  799. if fname is not None:
  800. mosaic = cv2.resize(mosaic, (int(ns * w * 0.5), int(ns * h * 0.5)), interpolation=cv2.INTER_AREA)
  801. cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))
  802. return mosaic
  803. def plot_lr_scheduler(optimizer, scheduler, epochs=300):
  804. # Plot LR simulating training for full epochs
  805. optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
  806. y = []
  807. for _ in range(epochs):
  808. scheduler.step()
  809. y.append(optimizer.param_groups[0]['lr'])
  810. plt.plot(y, '.-', label='LR')
  811. plt.xlabel('epoch')
  812. plt.ylabel('LR')
  813. plt.grid()
  814. plt.xlim(0, epochs)
  815. plt.ylim(0)
  816. plt.tight_layout()
  817. plt.savefig('LR.png', dpi=200)
  818. def plot_test_txt(): # from utils.utils import *; plot_test()
  819. # Plot test.txt histograms
  820. x = np.loadtxt('test.txt', dtype=np.float32)
  821. box = xyxy2xywh(x[:, :4])
  822. cx, cy = box[:, 0], box[:, 1]
  823. fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
  824. ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
  825. ax.set_aspect('equal')
  826. plt.savefig('hist2d.png', dpi=300)
  827. fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
  828. ax[0].hist(cx, bins=600)
  829. ax[1].hist(cy, bins=600)
  830. plt.savefig('hist1d.png', dpi=200)
  831. def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
  832. # Plot targets.txt histograms
  833. x = np.loadtxt('targets.txt', dtype=np.float32).T
  834. s = ['x targets', 'y targets', 'width targets', 'height targets']
  835. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  836. ax = ax.ravel()
  837. for i in range(4):
  838. ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
  839. ax[i].legend()
  840. ax[i].set_title(s[i])
  841. plt.savefig('targets.jpg', dpi=200)
  842. def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_study_txt()
  843. # Plot study.txt generated by test.py
  844. fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
  845. ax = ax.ravel()
  846. fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
  847. for f in ['coco_study/study_coco_yolov5%s.txt' % x for x in ['s', 'm', 'l', 'x']]:
  848. y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
  849. x = np.arange(y.shape[1]) if x is None else np.array(x)
  850. s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
  851. for i in range(7):
  852. ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
  853. ax[i].set_title(s[i])
  854. j = y[3].argmax() + 1
  855. ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
  856. label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
  857. ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [33.5, 39.1, 42.5, 45.9, 49., 50.5],
  858. 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
  859. ax2.set_xlim(0, 30)
  860. ax2.set_ylim(25, 50)
  861. ax2.set_xlabel('GPU Latency (ms)')
  862. ax2.set_ylabel('COCO AP val')
  863. ax2.legend(loc='lower right')
  864. ax2.grid()
  865. plt.savefig('study_mAP_latency.png', dpi=300)
  866. plt.savefig(f.replace('.txt', '.png'), dpi=200)
  867. def plot_labels(labels):
  868. # plot dataset labels
  869. c, b = labels[:, 0], labels[:, 1:].transpose() # classees, boxes
  870. def hist2d(x, y, n=100):
  871. xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
  872. hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
  873. xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
  874. yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
  875. return np.log(hist[xidx, yidx])
  876. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  877. ax = ax.ravel()
  878. ax[0].hist(c, bins=int(c.max() + 1))
  879. ax[0].set_xlabel('classes')
  880. ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
  881. ax[1].set_xlabel('x')
  882. ax[1].set_ylabel('y')
  883. ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
  884. ax[2].set_xlabel('width')
  885. ax[2].set_ylabel('height')
  886. plt.savefig('labels.png', dpi=200)
  887. def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
  888. # Plot hyperparameter evolution results in evolve.txt
  889. x = np.loadtxt('evolve.txt', ndmin=2)
  890. f = fitness(x)
  891. # weights = (f - f.min()) ** 2 # for weighted results
  892. plt.figure(figsize=(12, 10), tight_layout=True)
  893. matplotlib.rc('font', **{'size': 8})
  894. for i, (k, v) in enumerate(hyp.items()):
  895. y = x[:, i + 7]
  896. # mu = (y * weights).sum() / weights.sum() # best weighted result
  897. mu = y[f.argmax()] # best single result
  898. plt.subplot(4, 5, i + 1)
  899. plt.plot(mu, f.max(), 'o', markersize=10)
  900. plt.plot(y, f, '.')
  901. plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
  902. print('%15s: %.3g' % (k, mu))
  903. plt.savefig('evolve.png', dpi=200)
  904. def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
  905. # Plot training 'results*.txt', overlaying train and val losses
  906. s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
  907. t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
  908. for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
  909. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  910. n = results.shape[1] # number of rows
  911. x = range(start, min(stop, n) if stop else n)
  912. fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
  913. ax = ax.ravel()
  914. for i in range(5):
  915. for j in [i, i + 5]:
  916. y = results[j, x]
  917. ax[i].plot(x, y, marker='.', label=s[j])
  918. # y_smooth = butter_lowpass_filtfilt(y)
  919. # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
  920. ax[i].set_title(t[i])
  921. ax[i].legend()
  922. ax[i].set_ylabel(f) if i == 0 else None # add filename
  923. fig.savefig(f.replace('.txt', '.png'), dpi=200)
  924. def plot_results(start=0, stop=0, bucket='', id=(), labels=()): # from utils.utils import *; plot_results()
  925. # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
  926. fig, ax = plt.subplots(2, 5, figsize=(12, 6))
  927. ax = ax.ravel()
  928. s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
  929. 'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
  930. if bucket:
  931. os.system('rm -rf storage.googleapis.com')
  932. files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
  933. else:
  934. files = glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')
  935. for fi, f in enumerate(files):
  936. try:
  937. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  938. n = results.shape[1] # number of rows
  939. x = range(start, min(stop, n) if stop else n)
  940. for i in range(10):
  941. y = results[i, x]
  942. if i in [0, 1, 2, 5, 6, 7]:
  943. y[y == 0] = np.nan # dont show zero loss values
  944. # y /= y[0] # normalize
  945. label = labels[fi] if len(labels) else Path(f).stem
  946. ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
  947. ax[i].set_title(s[i])
  948. # if i in [5, 6, 7]: # share train and val loss y axes
  949. # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
  950. except:
  951. print('Warning: Plotting error for %s, skipping file' % f)
  952. fig.tight_layout()
  953. ax[1].legend()
  954. fig.savefig('results.png', dpi=200)