Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

1183 lines
48KB

  1. import glob
  2. import math
  3. import os
  4. import random
  5. import shutil
  6. import subprocess
  7. import time
  8. from copy import copy
  9. from pathlib import Path
  10. from sys import platform
  11. import cv2
  12. import matplotlib
  13. import matplotlib.pyplot as plt
  14. import numpy as np
  15. import torch
  16. import torch.nn as nn
  17. import torchvision
  18. import yaml
  19. from scipy.signal import butter, filtfilt
  20. from tqdm import tqdm
  21. from . import torch_utils, google_utils #  torch_utils, google_utils
  22. # Set printoptions
  23. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  24. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  25. matplotlib.rc('font', **{'size': 11})
  26. # Prevent OpenCV from multithreading (to use PyTorch DataLoader)
  27. cv2.setNumThreads(0)
  28. def init_seeds(seed=0):
  29. random.seed(seed)
  30. np.random.seed(seed)
  31. torch_utils.init_seeds(seed=seed)
  32. def check_git_status():
  33. # Suggest 'git pull' if repo is out of date
  34. if platform in ['linux', 'darwin']:
  35. s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
  36. if 'Your branch is behind' in s:
  37. print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
  38. def check_img_size(img_size, s=32):
  39. # Verify img_size is a multiple of stride s
  40. if img_size % s != 0:
  41. print('WARNING: --img-size %g must be multiple of max stride %g' % (img_size, s))
  42. return make_divisible(img_size, s) # nearest gs-multiple
  43. def check_best_possible_recall(dataset, anchors, thr=4.0, imgsz=640):
  44. # Check best possible recall of dataset with current anchors
  45. shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
  46. wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)])).float() # wh
  47. ratio = wh[:, None] / anchors.view(-1, 2).cpu()[None] # ratio
  48. m = torch.max(ratio, 1. / ratio).max(2)[0] # max ratio
  49. bpr = (m.min(1)[0] < thr).float().mean() # best possible recall
  50. mr = (m < thr).float().mean() # match ratio
  51. print(('AutoAnchor labels:' + '%10s' * 6) % ('n', 'mean', 'min', 'max', 'matching', 'recall'))
  52. print((' ' + '%10.4g' * 6) % (wh.shape[0], wh.mean(), wh.min(), wh.max(), mr, bpr))
  53. assert bpr > 0.9, 'Best possible recall %.3g (BPR) below 0.9 threshold. Training cancelled. ' \
  54. 'Compute new anchors with utils.utils.kmeans_anchors() and update model before training.' % bpr
  55. def check_file(file):
  56. # Searches for file if not found locally
  57. if os.path.isfile(file):
  58. return file
  59. else:
  60. files = glob.glob('./**/' + file, recursive=True) # find file
  61. assert len(files), 'File Not Found: %s' % file # assert file was found
  62. return files[0] # return first file if multiple found
  63. def make_divisible(x, divisor):
  64. # Returns x evenly divisble by divisor
  65. return math.ceil(x / divisor) * divisor
  66. def labels_to_class_weights(labels, nc=80):
  67. # Get class weights (inverse frequency) from training labels
  68. if labels[0] is None: # no labels loaded
  69. return torch.Tensor()
  70. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  71. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  72. weights = np.bincount(classes, minlength=nc) # occurences per class
  73. # Prepend gridpoint count (for uCE trianing)
  74. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  75. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  76. weights[weights == 0] = 1 # replace empty bins with 1
  77. weights = 1 / weights # number of targets per class
  78. weights /= weights.sum() # normalize
  79. return torch.from_numpy(weights)
  80. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  81. # Produces image weights based on class mAPs
  82. n = len(labels)
  83. class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
  84. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  85. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  86. return image_weights
  87. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  88. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  89. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  90. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  91. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  92. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  93. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  94. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  95. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  96. return x
  97. def xyxy2xywh(x):
  98. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  99. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  100. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  101. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  102. y[:, 2] = x[:, 2] - x[:, 0] # width
  103. y[:, 3] = x[:, 3] - x[:, 1] # height
  104. return y
  105. def xywh2xyxy(x):
  106. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  107. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  108. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  109. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  110. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  111. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  112. return y
  113. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  114. # Rescale coords (xyxy) from img1_shape to img0_shape
  115. if ratio_pad is None: # calculate from img0_shape
  116. gain = max(img1_shape) / max(img0_shape) # gain = old / new
  117. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  118. else:
  119. gain = ratio_pad[0][0]
  120. pad = ratio_pad[1]
  121. coords[:, [0, 2]] -= pad[0] # x padding
  122. coords[:, [1, 3]] -= pad[1] # y padding
  123. coords[:, :4] /= gain
  124. clip_coords(coords, img0_shape)
  125. return coords
  126. def clip_coords(boxes, img_shape):
  127. # Clip bounding xyxy bounding boxes to image shape (height, width)
  128. boxes[:, 0].clamp_(0, img_shape[1]) # x1
  129. boxes[:, 1].clamp_(0, img_shape[0]) # y1
  130. boxes[:, 2].clamp_(0, img_shape[1]) # x2
  131. boxes[:, 3].clamp_(0, img_shape[0]) # y2
  132. def ap_per_class(tp, conf, pred_cls, target_cls):
  133. """ Compute the average precision, given the recall and precision curves.
  134. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
  135. # Arguments
  136. tp: True positives (nparray, nx1 or nx10).
  137. conf: Objectness value from 0-1 (nparray).
  138. pred_cls: Predicted object classes (nparray).
  139. target_cls: True object classes (nparray).
  140. # Returns
  141. The average precision as computed in py-faster-rcnn.
  142. """
  143. # Sort by objectness
  144. i = np.argsort(-conf)
  145. tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
  146. # Find unique classes
  147. unique_classes = np.unique(target_cls)
  148. # Create Precision-Recall curve and compute AP for each class
  149. pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
  150. s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
  151. ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
  152. for ci, c in enumerate(unique_classes):
  153. i = pred_cls == c
  154. n_gt = (target_cls == c).sum() # Number of ground truth objects
  155. n_p = i.sum() # Number of predicted objects
  156. if n_p == 0 or n_gt == 0:
  157. continue
  158. else:
  159. # Accumulate FPs and TPs
  160. fpc = (1 - tp[i]).cumsum(0)
  161. tpc = tp[i].cumsum(0)
  162. # Recall
  163. recall = tpc / (n_gt + 1e-16) # recall curve
  164. r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
  165. # Precision
  166. precision = tpc / (tpc + fpc) # precision curve
  167. p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
  168. # AP from recall-precision curve
  169. for j in range(tp.shape[1]):
  170. ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
  171. # Plot
  172. # fig, ax = plt.subplots(1, 1, figsize=(5, 5))
  173. # ax.plot(recall, precision)
  174. # ax.set_xlabel('Recall')
  175. # ax.set_ylabel('Precision')
  176. # ax.set_xlim(0, 1.01)
  177. # ax.set_ylim(0, 1.01)
  178. # fig.tight_layout()
  179. # fig.savefig('PR_curve.png', dpi=300)
  180. # Compute F1 score (harmonic mean of precision and recall)
  181. f1 = 2 * p * r / (p + r + 1e-16)
  182. return p, r, ap, f1, unique_classes.astype('int32')
  183. def compute_ap(recall, precision):
  184. """ Compute the average precision, given the recall and precision curves.
  185. Source: https://github.com/rbgirshick/py-faster-rcnn.
  186. # Arguments
  187. recall: The recall curve (list).
  188. precision: The precision curve (list).
  189. # Returns
  190. The average precision as computed in py-faster-rcnn.
  191. """
  192. # Append sentinel values to beginning and end
  193. mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
  194. mpre = np.concatenate(([0.], precision, [0.]))
  195. # Compute the precision envelope
  196. mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
  197. # Integrate area under curve
  198. method = 'interp' # methods: 'continuous', 'interp'
  199. if method == 'interp':
  200. x = np.linspace(0, 1, 101) # 101-point interp (COCO)
  201. ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
  202. else: # 'continuous'
  203. i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
  204. ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
  205. return ap
  206. def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
  207. # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
  208. box2 = box2.t()
  209. # Get the coordinates of bounding boxes
  210. if x1y1x2y2: # x1, y1, x2, y2 = box1
  211. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  212. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  213. else: # transform from xywh to xyxy
  214. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  215. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  216. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
  217. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
  218. # Intersection area
  219. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  220. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  221. # Union Area
  222. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
  223. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
  224. union = (w1 * h1 + 1e-16) + w2 * h2 - inter
  225. iou = inter / union # iou
  226. if GIoU or DIoU or CIoU:
  227. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  228. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  229. if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
  230. c_area = cw * ch + 1e-16 # convex area
  231. return iou - (c_area - union) / c_area # GIoU
  232. if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  233. # convex diagonal squared
  234. c2 = cw ** 2 + ch ** 2 + 1e-16
  235. # centerpoint distance squared
  236. rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
  237. if DIoU:
  238. return iou - rho2 / c2 # DIoU
  239. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  240. v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  241. with torch.no_grad():
  242. alpha = v / (1 - iou + v)
  243. return iou - (rho2 / c2 + v * alpha) # CIoU
  244. return iou
  245. def box_iou(box1, box2):
  246. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  247. """
  248. Return intersection-over-union (Jaccard index) of boxes.
  249. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  250. Arguments:
  251. box1 (Tensor[N, 4])
  252. box2 (Tensor[M, 4])
  253. Returns:
  254. iou (Tensor[N, M]): the NxM matrix containing the pairwise
  255. IoU values for every element in boxes1 and boxes2
  256. """
  257. def box_area(box):
  258. # box = 4xn
  259. return (box[2] - box[0]) * (box[3] - box[1])
  260. area1 = box_area(box1.t())
  261. area2 = box_area(box2.t())
  262. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  263. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  264. return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
  265. def wh_iou(wh1, wh2):
  266. # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
  267. wh1 = wh1[:, None] # [N,1,2]
  268. wh2 = wh2[None] # [1,M,2]
  269. inter = torch.min(wh1, wh2).prod(2) # [N,M]
  270. return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
  271. class FocalLoss(nn.Module):
  272. # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
  273. def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
  274. super(FocalLoss, self).__init__()
  275. self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
  276. self.gamma = gamma
  277. self.alpha = alpha
  278. self.reduction = loss_fcn.reduction
  279. self.loss_fcn.reduction = 'none' # required to apply FL to each element
  280. def forward(self, pred, true):
  281. loss = self.loss_fcn(pred, true)
  282. # p_t = torch.exp(-loss)
  283. # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
  284. # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
  285. pred_prob = torch.sigmoid(pred) # prob from logits
  286. p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
  287. alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
  288. modulating_factor = (1.0 - p_t) ** self.gamma
  289. loss *= alpha_factor * modulating_factor
  290. if self.reduction == 'mean':
  291. return loss.mean()
  292. elif self.reduction == 'sum':
  293. return loss.sum()
  294. else: # 'none'
  295. return loss
  296. def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
  297. # return positive, negative label smoothing BCE targets
  298. return 1.0 - 0.5 * eps, 0.5 * eps
  299. class BCEBlurWithLogitsLoss(nn.Module):
  300. # BCEwithLogitLoss() with reduced missing label effects.
  301. def __init__(self, alpha=0.05):
  302. super(BCEBlurWithLogitsLoss, self).__init__()
  303. self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
  304. self.alpha = alpha
  305. def forward(self, pred, true):
  306. loss = self.loss_fcn(pred, true)
  307. pred = torch.sigmoid(pred) # prob from logits
  308. dx = pred - true # reduce only missing label effects
  309. # dx = (pred - true).abs() # reduce missing label and false label effects
  310. alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
  311. loss *= alpha_factor
  312. return loss.mean()
  313. def compute_loss(p, targets, model): # predictions, targets, model
  314. ft = torch.cuda.FloatTensor if p[0].is_cuda else torch.Tensor
  315. lcls, lbox, lobj = ft([0]), ft([0]), ft([0])
  316. tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
  317. h = model.hyp # hyperparameters
  318. red = 'mean' # Loss reduction (sum or mean)
  319. # Define criteria
  320. BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)
  321. BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)
  322. # class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
  323. cp, cn = smooth_BCE(eps=0.0)
  324. # focal loss
  325. g = h['fl_gamma'] # focal loss gamma
  326. if g > 0:
  327. BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
  328. # per output
  329. nt = 0 # targets
  330. for i, pi in enumerate(p): # layer index, layer predictions
  331. b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
  332. tobj = torch.zeros_like(pi[..., 0]) # target obj
  333. nb = b.shape[0] # number of targets
  334. if nb:
  335. nt += nb # cumulative targets
  336. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
  337. # GIoU
  338. pxy = ps[:, :2].sigmoid() * 2. - 0.5
  339. pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
  340. pbox = torch.cat((pxy, pwh), 1) # predicted box
  341. giou = bbox_iou(pbox.t(), tbox[i], x1y1x2y2=False, GIoU=True) # giou(prediction, target)
  342. lbox += (1.0 - giou).sum() if red == 'sum' else (1.0 - giou).mean() # giou loss
  343. # Obj
  344. tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) # giou ratio
  345. # Class
  346. if model.nc > 1: # cls loss (only if multiple classes)
  347. t = torch.full_like(ps[:, 5:], cn) # targets
  348. t[range(nb), tcls[i]] = cp
  349. lcls += BCEcls(ps[:, 5:], t) # BCE
  350. # Append targets to text file
  351. # with open('targets.txt', 'a') as file:
  352. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
  353. lobj += BCEobj(pi[..., 4], tobj) # obj loss
  354. lbox *= h['giou']
  355. lobj *= h['obj']
  356. lcls *= h['cls']
  357. bs = tobj.shape[0] # batch size
  358. if red == 'sum':
  359. g = 3.0 # loss gain
  360. lobj *= g / bs
  361. if nt:
  362. lcls *= g / nt / model.nc
  363. lbox *= g / nt
  364. loss = lbox + lobj + lcls
  365. return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
  366. def build_targets(p, targets, model):
  367. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  368. det = model.module.model[-1] if type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) \
  369. else model.model[-1] # Detect() module
  370. na, nt = det.na, targets.shape[0] # number of anchors, targets
  371. tcls, tbox, indices, anch = [], [], [], []
  372. gain = torch.ones(6, device=targets.device) # normalized to gridspace gain
  373. off = torch.tensor([[1, 0], [0, 1], [-1, 0], [0, -1]], device=targets.device).float() # overlap offsets
  374. at = torch.arange(na).view(na, 1).repeat(1, nt) # anchor tensor, same as .repeat_interleave(nt)
  375. style = 'rect4'
  376. for i in range(det.nl):
  377. anchors = det.anchors[i]
  378. gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  379. # Match targets to anchors
  380. a, t, offsets = [], targets * gain, 0
  381. if nt:
  382. r = t[None, :, 4:6] / anchors[:, None] # wh ratio
  383. j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
  384. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
  385. a, t = at[j], t.repeat(na, 1, 1)[j] # filter
  386. # overlaps
  387. gxy = t[:, 2:4] # grid xy
  388. z = torch.zeros_like(gxy)
  389. if style == 'rect2':
  390. g = 0.2 # offset
  391. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  392. a, t = torch.cat((a, a[j], a[k]), 0), torch.cat((t, t[j], t[k]), 0)
  393. offsets = torch.cat((z, z[j] + off[0], z[k] + off[1]), 0) * g
  394. elif style == 'rect4':
  395. g = 0.5 # offset
  396. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  397. l, m = ((gxy % 1. > (1 - g)) & (gxy < (gain[[2, 3]] - 1.))).T
  398. a, t = torch.cat((a, a[j], a[k], a[l], a[m]), 0), torch.cat((t, t[j], t[k], t[l], t[m]), 0)
  399. offsets = torch.cat((z, z[j] + off[0], z[k] + off[1], z[l] + off[2], z[m] + off[3]), 0) * g
  400. # Define
  401. b, c = t[:, :2].long().T # image, class
  402. gxy = t[:, 2:4] # grid xy
  403. gwh = t[:, 4:6] # grid wh
  404. gij = (gxy - offsets).long()
  405. gi, gj = gij.T # grid xy indices
  406. # Append
  407. indices.append((b, a, gj, gi)) # image, anchor, grid indices
  408. tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
  409. anch.append(anchors[a]) # anchors
  410. tcls.append(c) # class
  411. return tcls, tbox, indices, anch
  412. def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, fast=False, classes=None, agnostic=False):
  413. """
  414. Performs Non-Maximum Suppression on inference results
  415. Returns detections with shape:
  416. nx6 (x1, y1, x2, y2, conf, cls)
  417. """
  418. if prediction.dtype is torch.float16:
  419. prediction = prediction.float() # to FP32
  420. nc = prediction[0].shape[1] - 5 # number of classes
  421. xc = prediction[..., 4] > conf_thres # candidates
  422. # Settings
  423. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  424. max_det = 300 # maximum number of detections per image
  425. time_limit = 10.0 # seconds to quit after
  426. redundant = True # require redundant detections
  427. fast |= conf_thres > 0.001 # fast mode
  428. multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
  429. if fast:
  430. merge = False
  431. else:
  432. merge = True # merge for best mAP (adds 0.5ms/img)
  433. t = time.time()
  434. output = [None] * prediction.shape[0]
  435. for xi, x in enumerate(prediction): # image index, image inference
  436. # Apply constraints
  437. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  438. x = x[xc[xi]] # confidence
  439. # If none remain process next image
  440. if not x.shape[0]:
  441. continue
  442. # Compute conf
  443. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  444. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  445. box = xywh2xyxy(x[:, :4])
  446. # Detections matrix nx6 (xyxy, conf, cls)
  447. if multi_label:
  448. i, j = (x[:, 5:] > conf_thres).nonzero().t()
  449. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  450. else: # best class only
  451. conf, j = x[:, 5:].max(1, keepdim=True)
  452. x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  453. # Filter by class
  454. if classes:
  455. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  456. # Apply finite constraint
  457. # if not torch.isfinite(x).all():
  458. # x = x[torch.isfinite(x).all(1)]
  459. # If none remain process next image
  460. n = x.shape[0] # number of boxes
  461. if not n:
  462. continue
  463. # Sort by confidence
  464. # x = x[x[:, 4].argsort(descending=True)]
  465. # Batched NMS
  466. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  467. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  468. i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
  469. if i.shape[0] > max_det: # limit detections
  470. i = i[:max_det]
  471. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  472. try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  473. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  474. weights = iou * scores[None] # box weights
  475. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  476. if redundant:
  477. i = i[iou.sum(1) > 1] # require redundancy
  478. except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
  479. print(x, i, x.shape, i.shape)
  480. pass
  481. output[xi] = x[i]
  482. if (time.time() - t) > time_limit:
  483. break # time limit exceeded
  484. return output
  485. def strip_optimizer(f='weights/best.pt'): # from utils.utils import *; strip_optimizer()
  486. # Strip optimizer from *.pt files for lighter files (reduced by 1/2 size)
  487. x = torch.load(f, map_location=torch.device('cpu'))
  488. x['optimizer'] = None
  489. torch.save(x, f)
  490. print('Optimizer stripped from %s' % f)
  491. def create_backbone(f='weights/best.pt', s='weights/backbone.pt'): # from utils.utils import *; create_backbone()
  492. # create backbone 's' from 'f'
  493. device = torch.device('cpu')
  494. x = torch.load(f, map_location=device)
  495. torch.save(x, s) # update model if SourceChangeWarning
  496. x = torch.load(s, map_location=device)
  497. x['optimizer'] = None
  498. x['training_results'] = None
  499. x['epoch'] = -1
  500. for p in x['model'].parameters():
  501. p.requires_grad = True
  502. torch.save(x, s)
  503. print('%s modified for backbone use and saved as %s' % (f, s))
  504. def coco_class_count(path='../coco/labels/train2014/'):
  505. # Histogram of occurrences per class
  506. nc = 80 # number classes
  507. x = np.zeros(nc, dtype='int32')
  508. files = sorted(glob.glob('%s/*.*' % path))
  509. for i, file in enumerate(files):
  510. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  511. x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
  512. print(i, len(files))
  513. def coco_only_people(path='../coco/labels/train2017/'): # from utils.utils import *; coco_only_people()
  514. # Find images with only people
  515. files = sorted(glob.glob('%s/*.*' % path))
  516. for i, file in enumerate(files):
  517. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  518. if all(labels[:, 0] == 0):
  519. print(labels.shape[0], file)
  520. def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
  521. # crops images into random squares up to scale fraction
  522. # WARNING: overwrites images!
  523. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  524. img = cv2.imread(file) # BGR
  525. if img is not None:
  526. h, w = img.shape[:2]
  527. # create random mask
  528. a = 30 # minimum size (pixels)
  529. mask_h = random.randint(a, int(max(a, h * scale))) # mask height
  530. mask_w = mask_h # mask width
  531. # box
  532. xmin = max(0, random.randint(0, w) - mask_w // 2)
  533. ymin = max(0, random.randint(0, h) - mask_h // 2)
  534. xmax = min(w, xmin + mask_w)
  535. ymax = min(h, ymin + mask_h)
  536. # apply random color mask
  537. cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
  538. def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
  539. # Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
  540. if os.path.exists('new/'):
  541. shutil.rmtree('new/') # delete output folder
  542. os.makedirs('new/') # make new output folder
  543. os.makedirs('new/labels/')
  544. os.makedirs('new/images/')
  545. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  546. with open(file, 'r') as f:
  547. labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
  548. i = labels[:, 0] == label_class
  549. if any(i):
  550. img_file = file.replace('labels', 'images').replace('txt', 'jpg')
  551. labels[:, 0] = 0 # reset class to 0
  552. with open('new/images.txt', 'a') as f: # add image to dataset list
  553. f.write(img_file + '\n')
  554. with open('new/labels/' + Path(file).name, 'a') as f: # write label
  555. for l in labels[i]:
  556. f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
  557. shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
  558. def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=(640, 640), thr=0.20, gen=1000):
  559. """ Creates kmeans-evolved anchors from training dataset
  560. Arguments:
  561. path: path to dataset *.yaml
  562. n: number of anchors
  563. img_size: (min, max) image size used for multi-scale training (can be same values)
  564. thr: IoU threshold hyperparameter used for training (0.0 - 1.0)
  565. gen: generations to evolve anchors using genetic algorithm
  566. Return:
  567. k: kmeans evolved anchors
  568. Usage:
  569. from utils.utils import *; _ = kmean_anchors()
  570. """
  571. from utils.datasets import LoadImagesAndLabels
  572. def print_results(k):
  573. k = k[np.argsort(k.prod(1))] # sort small to large
  574. iou = wh_iou(wh, torch.Tensor(k))
  575. max_iou = iou.max(1)[0]
  576. bpr, aat = (max_iou > thr).float().mean(), (iou > thr).float().mean() * n # best possible recall, anch > thr
  577. # thr = 5.0
  578. # r = wh[:, None] / k[None]
  579. # ar = torch.max(r, 1. / r).max(2)[0]
  580. # max_ar = ar.min(1)[0]
  581. # bpr, aat = (max_ar < thr).float().mean(), (ar < thr).float().mean() * n # best possible recall, anch > thr
  582. print('%.2f iou_thr: %.3f best possible recall, %.2f anchors > thr' % (thr, bpr, aat))
  583. print('n=%g, img_size=%s, IoU_all=%.3f/%.3f-mean/best, IoU>thr=%.3f-mean: ' %
  584. (n, img_size, iou.mean(), max_iou.mean(), iou[iou > thr].mean()), end='')
  585. for i, x in enumerate(k):
  586. print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
  587. return k
  588. def fitness(k): # mutation fitness
  589. iou = wh_iou(wh, torch.Tensor(k)) # iou
  590. max_iou = iou.max(1)[0]
  591. return (max_iou * (max_iou > thr).float()).mean() # product
  592. # def fitness_ratio(k): # mutation fitness
  593. # # wh(5316,2), k(9,2)
  594. # r = wh[:, None] / k[None]
  595. # x = torch.max(r, 1. / r).max(2)[0]
  596. # m = x.min(1)[0]
  597. # return 1. / (m * (m < 5).float()).mean() # product
  598. # Get label wh
  599. wh = []
  600. with open(path) as f:
  601. data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
  602. dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
  603. nr = 1 if img_size[0] == img_size[1] else 3 # number augmentation repetitions
  604. for s, l in zip(dataset.shapes, dataset.labels):
  605. # wh.append(l[:, 3:5] * (s / s.max())) # image normalized to letterbox normalized wh
  606. wh.append(l[:, 3:5] * s) # image normalized to pixels
  607. wh = np.concatenate(wh, 0).repeat(nr, axis=0) # augment 3x
  608. # wh *= np.random.uniform(img_size[0], img_size[1], size=(wh.shape[0], 1)) # normalized to pixels (multi-scale)
  609. wh = wh[(wh > 2.0).all(1)] # remove below threshold boxes (< 2 pixels wh)
  610. # Kmeans calculation
  611. from scipy.cluster.vq import kmeans
  612. print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
  613. s = wh.std(0) # sigmas for whitening
  614. k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
  615. k *= s
  616. wh = torch.Tensor(wh)
  617. k = print_results(k)
  618. # # Plot
  619. # k, d = [None] * 20, [None] * 20
  620. # for i in tqdm(range(1, 21)):
  621. # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
  622. # fig, ax = plt.subplots(1, 2, figsize=(14, 7))
  623. # ax = ax.ravel()
  624. # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
  625. # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
  626. # ax[0].hist(wh[wh[:, 0]<100, 0],400)
  627. # ax[1].hist(wh[wh[:, 1]<100, 1],400)
  628. # fig.tight_layout()
  629. # fig.savefig('wh.png', dpi=200)
  630. # Evolve
  631. npr = np.random
  632. f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
  633. for _ in tqdm(range(gen), desc='Evolving anchors'):
  634. v = np.ones(sh)
  635. while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
  636. v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
  637. kg = (k.copy() * v).clip(min=2.0)
  638. fg = fitness(kg)
  639. if fg > f:
  640. f, k = fg, kg.copy()
  641. print_results(k)
  642. k = print_results(k)
  643. return k
  644. def print_mutation(hyp, results, bucket=''):
  645. # Print mutation results to evolve.txt (for use with train.py --evolve)
  646. a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
  647. b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
  648. c = '%10.4g' * len(results) % results # results (P, R, mAP, F1, test_loss)
  649. print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
  650. if bucket:
  651. os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
  652. with open('evolve.txt', 'a') as f: # append result
  653. f.write(c + b + '\n')
  654. x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
  655. np.savetxt('evolve.txt', x[np.argsort(-fitness(x))], '%10.3g') # save sort by fitness
  656. if bucket:
  657. os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
  658. def apply_classifier(x, model, img, im0):
  659. # applies a second stage classifier to yolo outputs
  660. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  661. for i, d in enumerate(x): # per image
  662. if d is not None and len(d):
  663. d = d.clone()
  664. # Reshape and pad cutouts
  665. b = xyxy2xywh(d[:, :4]) # boxes
  666. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  667. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  668. d[:, :4] = xywh2xyxy(b).long()
  669. # Rescale boxes from img_size to im0 size
  670. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  671. # Classes
  672. pred_cls1 = d[:, 5].long()
  673. ims = []
  674. for j, a in enumerate(d): # per item
  675. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  676. im = cv2.resize(cutout, (224, 224)) # BGR
  677. # cv2.imwrite('test%i.jpg' % j, cutout)
  678. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  679. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  680. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  681. ims.append(im)
  682. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  683. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  684. return x
  685. def fitness(x):
  686. # Returns fitness (for use with results.txt or evolve.txt)
  687. w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
  688. return (x[:, :4] * w).sum(1)
  689. def output_to_target(output, width, height):
  690. """
  691. Convert a YOLO model output to target format
  692. [batch_id, class_id, x, y, w, h, conf]
  693. """
  694. if isinstance(output, torch.Tensor):
  695. output = output.cpu().numpy()
  696. targets = []
  697. for i, o in enumerate(output):
  698. if o is not None:
  699. for pred in o:
  700. box = pred[:4]
  701. w = (box[2] - box[0]) / width
  702. h = (box[3] - box[1]) / height
  703. x = box[0] / width + w / 2
  704. y = box[1] / height + h / 2
  705. conf = pred[4]
  706. cls = int(pred[5])
  707. targets.append([i, cls, x, y, w, h, conf])
  708. return np.array(targets)
  709. # Plotting functions ---------------------------------------------------------------------------------------------------
  710. def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  711. # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
  712. def butter_lowpass(cutoff, fs, order):
  713. nyq = 0.5 * fs
  714. normal_cutoff = cutoff / nyq
  715. b, a = butter(order, normal_cutoff, btype='low', analog=False)
  716. return b, a
  717. b, a = butter_lowpass(cutoff, fs, order=order)
  718. return filtfilt(b, a, data) # forward-backward filter
  719. def plot_one_box(x, img, color=None, label=None, line_thickness=None):
  720. # Plots one bounding box on image img
  721. tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
  722. color = color or [random.randint(0, 255) for _ in range(3)]
  723. c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
  724. cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
  725. if label:
  726. tf = max(tl - 1, 1) # font thickness
  727. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  728. c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
  729. cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
  730. cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
  731. def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
  732. # Compares the two methods for width-height anchor multiplication
  733. # https://github.com/ultralytics/yolov3/issues/168
  734. x = np.arange(-4.0, 4.0, .1)
  735. ya = np.exp(x)
  736. yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
  737. fig = plt.figure(figsize=(6, 3), dpi=150)
  738. plt.plot(x, ya, '.-', label='yolo method')
  739. plt.plot(x, yb ** 2, '.-', label='^2 power method')
  740. plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
  741. plt.xlim(left=-4, right=4)
  742. plt.ylim(bottom=0, top=6)
  743. plt.xlabel('input')
  744. plt.ylabel('output')
  745. plt.legend()
  746. fig.tight_layout()
  747. fig.savefig('comparison.png', dpi=200)
  748. def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
  749. tl = 3 # line thickness
  750. tf = max(tl - 1, 1) # font thickness
  751. if os.path.isfile(fname): # do not overwrite
  752. return None
  753. if isinstance(images, torch.Tensor):
  754. images = images.cpu().float().numpy()
  755. if isinstance(targets, torch.Tensor):
  756. targets = targets.cpu().numpy()
  757. # un-normalise
  758. if np.max(images[0]) <= 1:
  759. images *= 255
  760. bs, _, h, w = images.shape # batch size, _, height, width
  761. bs = min(bs, max_subplots) # limit plot images
  762. ns = np.ceil(bs ** 0.5) # number of subplots (square)
  763. # Check if we should resize
  764. scale_factor = max_size / max(h, w)
  765. if scale_factor < 1:
  766. h = math.ceil(scale_factor * h)
  767. w = math.ceil(scale_factor * w)
  768. # Empty array for output
  769. mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
  770. # Fix class - colour map
  771. prop_cycle = plt.rcParams['axes.prop_cycle']
  772. # https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
  773. hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
  774. color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
  775. for i, img in enumerate(images):
  776. if i == max_subplots: # if last batch has fewer images than we expect
  777. break
  778. block_x = int(w * (i // ns))
  779. block_y = int(h * (i % ns))
  780. img = img.transpose(1, 2, 0)
  781. if scale_factor < 1:
  782. img = cv2.resize(img, (w, h))
  783. mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
  784. if len(targets) > 0:
  785. image_targets = targets[targets[:, 0] == i]
  786. boxes = xywh2xyxy(image_targets[:, 2:6]).T
  787. classes = image_targets[:, 1].astype('int')
  788. gt = image_targets.shape[1] == 6 # ground truth if no conf column
  789. conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
  790. boxes[[0, 2]] *= w
  791. boxes[[0, 2]] += block_x
  792. boxes[[1, 3]] *= h
  793. boxes[[1, 3]] += block_y
  794. for j, box in enumerate(boxes.T):
  795. cls = int(classes[j])
  796. color = color_lut[cls % len(color_lut)]
  797. cls = names[cls] if names else cls
  798. if gt or conf[j] > 0.3: # 0.3 conf thresh
  799. label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
  800. plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
  801. # Draw image filename labels
  802. if paths is not None:
  803. label = os.path.basename(paths[i])[:40] # trim to 40 char
  804. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  805. cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
  806. lineType=cv2.LINE_AA)
  807. # Image border
  808. cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
  809. if fname is not None:
  810. mosaic = cv2.resize(mosaic, (int(ns * w * 0.5), int(ns * h * 0.5)), interpolation=cv2.INTER_AREA)
  811. cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))
  812. return mosaic
  813. def plot_lr_scheduler(optimizer, scheduler, epochs=300):
  814. # Plot LR simulating training for full epochs
  815. optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
  816. y = []
  817. for _ in range(epochs):
  818. scheduler.step()
  819. y.append(optimizer.param_groups[0]['lr'])
  820. plt.plot(y, '.-', label='LR')
  821. plt.xlabel('epoch')
  822. plt.ylabel('LR')
  823. plt.grid()
  824. plt.xlim(0, epochs)
  825. plt.ylim(0)
  826. plt.tight_layout()
  827. plt.savefig('LR.png', dpi=200)
  828. def plot_test_txt(): # from utils.utils import *; plot_test()
  829. # Plot test.txt histograms
  830. x = np.loadtxt('test.txt', dtype=np.float32)
  831. box = xyxy2xywh(x[:, :4])
  832. cx, cy = box[:, 0], box[:, 1]
  833. fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
  834. ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
  835. ax.set_aspect('equal')
  836. plt.savefig('hist2d.png', dpi=300)
  837. fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
  838. ax[0].hist(cx, bins=600)
  839. ax[1].hist(cy, bins=600)
  840. plt.savefig('hist1d.png', dpi=200)
  841. def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
  842. # Plot targets.txt histograms
  843. x = np.loadtxt('targets.txt', dtype=np.float32).T
  844. s = ['x targets', 'y targets', 'width targets', 'height targets']
  845. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  846. ax = ax.ravel()
  847. for i in range(4):
  848. ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
  849. ax[i].legend()
  850. ax[i].set_title(s[i])
  851. plt.savefig('targets.jpg', dpi=200)
  852. def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_study_txt()
  853. # Plot study.txt generated by test.py
  854. fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
  855. ax = ax.ravel()
  856. fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
  857. for f in ['coco_study/study_coco_yolov5%s.txt' % x for x in ['s', 'm', 'l', 'x']]:
  858. y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
  859. x = np.arange(y.shape[1]) if x is None else np.array(x)
  860. s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
  861. for i in range(7):
  862. ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
  863. ax[i].set_title(s[i])
  864. j = y[3].argmax() + 1
  865. ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
  866. label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
  867. ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [33.5, 39.1, 42.5, 45.9, 49., 50.5],
  868. 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
  869. ax2.set_xlim(0, 30)
  870. ax2.set_ylim(25, 50)
  871. ax2.set_xlabel('GPU Latency (ms)')
  872. ax2.set_ylabel('COCO AP val')
  873. ax2.legend(loc='lower right')
  874. ax2.grid()
  875. plt.savefig('study_mAP_latency.png', dpi=300)
  876. plt.savefig(f.replace('.txt', '.png'), dpi=200)
  877. def plot_labels(labels):
  878. # plot dataset labels
  879. c, b = labels[:, 0], labels[:, 1:].transpose() # classees, boxes
  880. def hist2d(x, y, n=100):
  881. xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
  882. hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
  883. xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
  884. yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
  885. return np.log(hist[xidx, yidx])
  886. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  887. ax = ax.ravel()
  888. ax[0].hist(c, bins=int(c.max() + 1))
  889. ax[0].set_xlabel('classes')
  890. ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
  891. ax[1].set_xlabel('x')
  892. ax[1].set_ylabel('y')
  893. ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
  894. ax[2].set_xlabel('width')
  895. ax[2].set_ylabel('height')
  896. plt.savefig('labels.png', dpi=200)
  897. def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
  898. # Plot hyperparameter evolution results in evolve.txt
  899. x = np.loadtxt('evolve.txt', ndmin=2)
  900. f = fitness(x)
  901. # weights = (f - f.min()) ** 2 # for weighted results
  902. plt.figure(figsize=(12, 10), tight_layout=True)
  903. matplotlib.rc('font', **{'size': 8})
  904. for i, (k, v) in enumerate(hyp.items()):
  905. y = x[:, i + 7]
  906. # mu = (y * weights).sum() / weights.sum() # best weighted result
  907. mu = y[f.argmax()] # best single result
  908. plt.subplot(4, 5, i + 1)
  909. plt.plot(mu, f.max(), 'o', markersize=10)
  910. plt.plot(y, f, '.')
  911. plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
  912. print('%15s: %.3g' % (k, mu))
  913. plt.savefig('evolve.png', dpi=200)
  914. def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
  915. # Plot training 'results*.txt', overlaying train and val losses
  916. s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
  917. t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
  918. for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
  919. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  920. n = results.shape[1] # number of rows
  921. x = range(start, min(stop, n) if stop else n)
  922. fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
  923. ax = ax.ravel()
  924. for i in range(5):
  925. for j in [i, i + 5]:
  926. y = results[j, x]
  927. ax[i].plot(x, y, marker='.', label=s[j])
  928. # y_smooth = butter_lowpass_filtfilt(y)
  929. # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
  930. ax[i].set_title(t[i])
  931. ax[i].legend()
  932. ax[i].set_ylabel(f) if i == 0 else None # add filename
  933. fig.savefig(f.replace('.txt', '.png'), dpi=200)
  934. def plot_results(start=0, stop=0, bucket='', id=(), labels=()): # from utils.utils import *; plot_results()
  935. # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
  936. fig, ax = plt.subplots(2, 5, figsize=(12, 6))
  937. ax = ax.ravel()
  938. s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
  939. 'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
  940. if bucket:
  941. os.system('rm -rf storage.googleapis.com')
  942. files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
  943. else:
  944. files = glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')
  945. for fi, f in enumerate(files):
  946. try:
  947. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  948. n = results.shape[1] # number of rows
  949. x = range(start, min(stop, n) if stop else n)
  950. for i in range(10):
  951. y = results[i, x]
  952. if i in [0, 1, 2, 5, 6, 7]:
  953. y[y == 0] = np.nan # dont show zero loss values
  954. # y /= y[0] # normalize
  955. label = labels[fi] if len(labels) else Path(f).stem
  956. ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
  957. ax[i].set_title(s[i])
  958. # if i in [5, 6, 7]: # share train and val loss y axes
  959. # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
  960. except:
  961. print('Warning: Plotting error for %s, skipping file' % f)
  962. fig.tight_layout()
  963. ax[1].legend()
  964. fig.savefig('results.png', dpi=200)