Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

1189 lines
48KB

  1. import glob
  2. import math
  3. import os
  4. import random
  5. import shutil
  6. import subprocess
  7. import time
  8. from copy import copy
  9. from pathlib import Path
  10. from sys import platform
  11. import cv2
  12. import matplotlib
  13. import matplotlib.pyplot as plt
  14. import numpy as np
  15. import torch
  16. import torch.nn as nn
  17. import torchvision
  18. import yaml
  19. from scipy.signal import butter, filtfilt
  20. from tqdm import tqdm
  21. from . import torch_utils, google_utils #  torch_utils, google_utils
  22. # Set printoptions
  23. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  24. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  25. matplotlib.rc('font', **{'size': 11})
  26. # Prevent OpenCV from multithreading (to use PyTorch DataLoader)
  27. cv2.setNumThreads(0)
  28. def init_seeds(seed=0):
  29. random.seed(seed)
  30. np.random.seed(seed)
  31. torch_utils.init_seeds(seed=seed)
  32. def get_latest_run(search_dir = './runs'):
  33. # get path to most recent 'last.pt' in run dirs
  34. # assumes most recently saved 'last.pt' is the desired weights to --resume from
  35. last_list = glob.glob(f'{search_dir}/**/last.pt', recursive=True)
  36. latest = max(last_list, key = os.path.getctime)
  37. return latest
  38. def check_git_status():
  39. # Suggest 'git pull' if repo is out of date
  40. if platform in ['linux', 'darwin']:
  41. s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
  42. if 'Your branch is behind' in s:
  43. print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
  44. def check_img_size(img_size, s=32):
  45. # Verify img_size is a multiple of stride s
  46. if img_size % s != 0:
  47. print('WARNING: --img-size %g must be multiple of max stride %g' % (img_size, s))
  48. return make_divisible(img_size, s) # nearest gs-multiple
  49. def check_best_possible_recall(dataset, anchors, thr=4.0, imgsz=640):
  50. # Check best possible recall of dataset with current anchors
  51. shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
  52. wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)])).float() # wh
  53. ratio = wh[:, None] / anchors.view(-1, 2).cpu()[None] # ratio
  54. m = torch.max(ratio, 1. / ratio).max(2)[0] # max ratio
  55. bpr = (m.min(1)[0] < thr).float().mean() # best possible recall
  56. mr = (m < thr).float().mean() # match ratio
  57. print(('AutoAnchor labels:' + '%10s' * 6) % ('n', 'mean', 'min', 'max', 'matching', 'recall'))
  58. print((' ' + '%10.4g' * 6) % (wh.shape[0], wh.mean(), wh.min(), wh.max(), mr, bpr))
  59. assert bpr > 0.9, 'Best possible recall %.3g (BPR) below 0.9 threshold. Training cancelled. ' \
  60. 'Compute new anchors with utils.utils.kmeans_anchors() and update model before training.' % bpr
  61. def check_file(file):
  62. # Searches for file if not found locally
  63. if os.path.isfile(file):
  64. return file
  65. else:
  66. files = glob.glob('./**/' + file, recursive=True) # find file
  67. assert len(files), 'File Not Found: %s' % file # assert file was found
  68. return files[0] # return first file if multiple found
  69. def make_divisible(x, divisor):
  70. # Returns x evenly divisble by divisor
  71. return math.ceil(x / divisor) * divisor
  72. def labels_to_class_weights(labels, nc=80):
  73. # Get class weights (inverse frequency) from training labels
  74. if labels[0] is None: # no labels loaded
  75. return torch.Tensor()
  76. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  77. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  78. weights = np.bincount(classes, minlength=nc) # occurences per class
  79. # Prepend gridpoint count (for uCE trianing)
  80. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  81. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  82. weights[weights == 0] = 1 # replace empty bins with 1
  83. weights = 1 / weights # number of targets per class
  84. weights /= weights.sum() # normalize
  85. return torch.from_numpy(weights)
  86. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  87. # Produces image weights based on class mAPs
  88. n = len(labels)
  89. class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
  90. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  91. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  92. return image_weights
  93. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  94. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  95. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  96. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  97. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  98. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  99. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  100. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  101. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  102. return x
  103. def xyxy2xywh(x):
  104. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  105. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  106. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  107. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  108. y[:, 2] = x[:, 2] - x[:, 0] # width
  109. y[:, 3] = x[:, 3] - x[:, 1] # height
  110. return y
  111. def xywh2xyxy(x):
  112. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  113. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  114. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  115. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  116. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  117. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  118. return y
  119. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  120. # Rescale coords (xyxy) from img1_shape to img0_shape
  121. if ratio_pad is None: # calculate from img0_shape
  122. gain = max(img1_shape) / max(img0_shape) # gain = old / new
  123. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  124. else:
  125. gain = ratio_pad[0][0]
  126. pad = ratio_pad[1]
  127. coords[:, [0, 2]] -= pad[0] # x padding
  128. coords[:, [1, 3]] -= pad[1] # y padding
  129. coords[:, :4] /= gain
  130. clip_coords(coords, img0_shape)
  131. return coords
  132. def clip_coords(boxes, img_shape):
  133. # Clip bounding xyxy bounding boxes to image shape (height, width)
  134. boxes[:, 0].clamp_(0, img_shape[1]) # x1
  135. boxes[:, 1].clamp_(0, img_shape[0]) # y1
  136. boxes[:, 2].clamp_(0, img_shape[1]) # x2
  137. boxes[:, 3].clamp_(0, img_shape[0]) # y2
  138. def ap_per_class(tp, conf, pred_cls, target_cls):
  139. """ Compute the average precision, given the recall and precision curves.
  140. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
  141. # Arguments
  142. tp: True positives (nparray, nx1 or nx10).
  143. conf: Objectness value from 0-1 (nparray).
  144. pred_cls: Predicted object classes (nparray).
  145. target_cls: True object classes (nparray).
  146. # Returns
  147. The average precision as computed in py-faster-rcnn.
  148. """
  149. # Sort by objectness
  150. i = np.argsort(-conf)
  151. tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
  152. # Find unique classes
  153. unique_classes = np.unique(target_cls)
  154. # Create Precision-Recall curve and compute AP for each class
  155. pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
  156. s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
  157. ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
  158. for ci, c in enumerate(unique_classes):
  159. i = pred_cls == c
  160. n_gt = (target_cls == c).sum() # Number of ground truth objects
  161. n_p = i.sum() # Number of predicted objects
  162. if n_p == 0 or n_gt == 0:
  163. continue
  164. else:
  165. # Accumulate FPs and TPs
  166. fpc = (1 - tp[i]).cumsum(0)
  167. tpc = tp[i].cumsum(0)
  168. # Recall
  169. recall = tpc / (n_gt + 1e-16) # recall curve
  170. r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
  171. # Precision
  172. precision = tpc / (tpc + fpc) # precision curve
  173. p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
  174. # AP from recall-precision curve
  175. for j in range(tp.shape[1]):
  176. ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
  177. # Plot
  178. # fig, ax = plt.subplots(1, 1, figsize=(5, 5))
  179. # ax.plot(recall, precision)
  180. # ax.set_xlabel('Recall')
  181. # ax.set_ylabel('Precision')
  182. # ax.set_xlim(0, 1.01)
  183. # ax.set_ylim(0, 1.01)
  184. # fig.tight_layout()
  185. # fig.savefig('PR_curve.png', dpi=300)
  186. # Compute F1 score (harmonic mean of precision and recall)
  187. f1 = 2 * p * r / (p + r + 1e-16)
  188. return p, r, ap, f1, unique_classes.astype('int32')
  189. def compute_ap(recall, precision):
  190. """ Compute the average precision, given the recall and precision curves.
  191. Source: https://github.com/rbgirshick/py-faster-rcnn.
  192. # Arguments
  193. recall: The recall curve (list).
  194. precision: The precision curve (list).
  195. # Returns
  196. The average precision as computed in py-faster-rcnn.
  197. """
  198. # Append sentinel values to beginning and end
  199. mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
  200. mpre = np.concatenate(([0.], precision, [0.]))
  201. # Compute the precision envelope
  202. mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
  203. # Integrate area under curve
  204. method = 'interp' # methods: 'continuous', 'interp'
  205. if method == 'interp':
  206. x = np.linspace(0, 1, 101) # 101-point interp (COCO)
  207. ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
  208. else: # 'continuous'
  209. i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
  210. ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
  211. return ap
  212. def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
  213. # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
  214. box2 = box2.t()
  215. # Get the coordinates of bounding boxes
  216. if x1y1x2y2: # x1, y1, x2, y2 = box1
  217. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  218. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  219. else: # transform from xywh to xyxy
  220. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  221. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  222. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
  223. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
  224. # Intersection area
  225. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  226. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  227. # Union Area
  228. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
  229. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
  230. union = (w1 * h1 + 1e-16) + w2 * h2 - inter
  231. iou = inter / union # iou
  232. if GIoU or DIoU or CIoU:
  233. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  234. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  235. if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
  236. c_area = cw * ch + 1e-16 # convex area
  237. return iou - (c_area - union) / c_area # GIoU
  238. if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  239. # convex diagonal squared
  240. c2 = cw ** 2 + ch ** 2 + 1e-16
  241. # centerpoint distance squared
  242. rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
  243. if DIoU:
  244. return iou - rho2 / c2 # DIoU
  245. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  246. v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  247. with torch.no_grad():
  248. alpha = v / (1 - iou + v)
  249. return iou - (rho2 / c2 + v * alpha) # CIoU
  250. return iou
  251. def box_iou(box1, box2):
  252. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  253. """
  254. Return intersection-over-union (Jaccard index) of boxes.
  255. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  256. Arguments:
  257. box1 (Tensor[N, 4])
  258. box2 (Tensor[M, 4])
  259. Returns:
  260. iou (Tensor[N, M]): the NxM matrix containing the pairwise
  261. IoU values for every element in boxes1 and boxes2
  262. """
  263. def box_area(box):
  264. # box = 4xn
  265. return (box[2] - box[0]) * (box[3] - box[1])
  266. area1 = box_area(box1.t())
  267. area2 = box_area(box2.t())
  268. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  269. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  270. return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
  271. def wh_iou(wh1, wh2):
  272. # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
  273. wh1 = wh1[:, None] # [N,1,2]
  274. wh2 = wh2[None] # [1,M,2]
  275. inter = torch.min(wh1, wh2).prod(2) # [N,M]
  276. return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
  277. class FocalLoss(nn.Module):
  278. # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
  279. def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
  280. super(FocalLoss, self).__init__()
  281. self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
  282. self.gamma = gamma
  283. self.alpha = alpha
  284. self.reduction = loss_fcn.reduction
  285. self.loss_fcn.reduction = 'none' # required to apply FL to each element
  286. def forward(self, pred, true):
  287. loss = self.loss_fcn(pred, true)
  288. # p_t = torch.exp(-loss)
  289. # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
  290. # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
  291. pred_prob = torch.sigmoid(pred) # prob from logits
  292. p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
  293. alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
  294. modulating_factor = (1.0 - p_t) ** self.gamma
  295. loss *= alpha_factor * modulating_factor
  296. if self.reduction == 'mean':
  297. return loss.mean()
  298. elif self.reduction == 'sum':
  299. return loss.sum()
  300. else: # 'none'
  301. return loss
  302. def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
  303. # return positive, negative label smoothing BCE targets
  304. return 1.0 - 0.5 * eps, 0.5 * eps
  305. class BCEBlurWithLogitsLoss(nn.Module):
  306. # BCEwithLogitLoss() with reduced missing label effects.
  307. def __init__(self, alpha=0.05):
  308. super(BCEBlurWithLogitsLoss, self).__init__()
  309. self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
  310. self.alpha = alpha
  311. def forward(self, pred, true):
  312. loss = self.loss_fcn(pred, true)
  313. pred = torch.sigmoid(pred) # prob from logits
  314. dx = pred - true # reduce only missing label effects
  315. # dx = (pred - true).abs() # reduce missing label and false label effects
  316. alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
  317. loss *= alpha_factor
  318. return loss.mean()
  319. def compute_loss(p, targets, model): # predictions, targets, model
  320. ft = torch.cuda.FloatTensor if p[0].is_cuda else torch.Tensor
  321. lcls, lbox, lobj = ft([0]), ft([0]), ft([0])
  322. tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
  323. h = model.hyp # hyperparameters
  324. red = 'mean' # Loss reduction (sum or mean)
  325. # Define criteria
  326. BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)
  327. BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)
  328. # class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
  329. cp, cn = smooth_BCE(eps=0.0)
  330. # focal loss
  331. g = h['fl_gamma'] # focal loss gamma
  332. if g > 0:
  333. BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
  334. # per output
  335. nt = 0 # targets
  336. for i, pi in enumerate(p): # layer index, layer predictions
  337. b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
  338. tobj = torch.zeros_like(pi[..., 0]) # target obj
  339. nb = b.shape[0] # number of targets
  340. if nb:
  341. nt += nb # cumulative targets
  342. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
  343. # GIoU
  344. pxy = ps[:, :2].sigmoid() * 2. - 0.5
  345. pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
  346. pbox = torch.cat((pxy, pwh), 1) # predicted box
  347. giou = bbox_iou(pbox.t(), tbox[i], x1y1x2y2=False, GIoU=True) # giou(prediction, target)
  348. lbox += (1.0 - giou).sum() if red == 'sum' else (1.0 - giou).mean() # giou loss
  349. # Obj
  350. tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) # giou ratio
  351. # Class
  352. if model.nc > 1: # cls loss (only if multiple classes)
  353. t = torch.full_like(ps[:, 5:], cn) # targets
  354. t[range(nb), tcls[i]] = cp
  355. lcls += BCEcls(ps[:, 5:], t) # BCE
  356. # Append targets to text file
  357. # with open('targets.txt', 'a') as file:
  358. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
  359. lobj += BCEobj(pi[..., 4], tobj) # obj loss
  360. lbox *= h['giou']
  361. lobj *= h['obj']
  362. lcls *= h['cls']
  363. bs = tobj.shape[0] # batch size
  364. if red == 'sum':
  365. g = 3.0 # loss gain
  366. lobj *= g / bs
  367. if nt:
  368. lcls *= g / nt / model.nc
  369. lbox *= g / nt
  370. loss = lbox + lobj + lcls
  371. return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
  372. def build_targets(p, targets, model):
  373. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  374. det = model.module.model[-1] if type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) \
  375. else model.model[-1] # Detect() module
  376. na, nt = det.na, targets.shape[0] # number of anchors, targets
  377. tcls, tbox, indices, anch = [], [], [], []
  378. gain = torch.ones(6, device=targets.device) # normalized to gridspace gain
  379. off = torch.tensor([[1, 0], [0, 1], [-1, 0], [0, -1]], device=targets.device).float() # overlap offsets
  380. at = torch.arange(na).view(na, 1).repeat(1, nt) # anchor tensor, same as .repeat_interleave(nt)
  381. style = 'rect4'
  382. for i in range(det.nl):
  383. anchors = det.anchors[i]
  384. gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  385. # Match targets to anchors
  386. a, t, offsets = [], targets * gain, 0
  387. if nt:
  388. r = t[None, :, 4:6] / anchors[:, None] # wh ratio
  389. j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
  390. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
  391. a, t = at[j], t.repeat(na, 1, 1)[j] # filter
  392. # overlaps
  393. gxy = t[:, 2:4] # grid xy
  394. z = torch.zeros_like(gxy)
  395. if style == 'rect2':
  396. g = 0.2 # offset
  397. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  398. a, t = torch.cat((a, a[j], a[k]), 0), torch.cat((t, t[j], t[k]), 0)
  399. offsets = torch.cat((z, z[j] + off[0], z[k] + off[1]), 0) * g
  400. elif style == 'rect4':
  401. g = 0.5 # offset
  402. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  403. l, m = ((gxy % 1. > (1 - g)) & (gxy < (gain[[2, 3]] - 1.))).T
  404. a, t = torch.cat((a, a[j], a[k], a[l], a[m]), 0), torch.cat((t, t[j], t[k], t[l], t[m]), 0)
  405. offsets = torch.cat((z, z[j] + off[0], z[k] + off[1], z[l] + off[2], z[m] + off[3]), 0) * g
  406. # Define
  407. b, c = t[:, :2].long().T # image, class
  408. gxy = t[:, 2:4] # grid xy
  409. gwh = t[:, 4:6] # grid wh
  410. gij = (gxy - offsets).long()
  411. gi, gj = gij.T # grid xy indices
  412. # Append
  413. indices.append((b, a, gj, gi)) # image, anchor, grid indices
  414. tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
  415. anch.append(anchors[a]) # anchors
  416. tcls.append(c) # class
  417. return tcls, tbox, indices, anch
  418. def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, fast=False, classes=None, agnostic=False):
  419. """
  420. Performs Non-Maximum Suppression on inference results
  421. Returns detections with shape:
  422. nx6 (x1, y1, x2, y2, conf, cls)
  423. """
  424. if prediction.dtype is torch.float16:
  425. prediction = prediction.float() # to FP32
  426. nc = prediction[0].shape[1] - 5 # number of classes
  427. xc = prediction[..., 4] > conf_thres # candidates
  428. # Settings
  429. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  430. max_det = 300 # maximum number of detections per image
  431. time_limit = 10.0 # seconds to quit after
  432. redundant = True # require redundant detections
  433. fast |= conf_thres > 0.001 # fast mode
  434. multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
  435. if fast:
  436. merge = False
  437. else:
  438. merge = True # merge for best mAP (adds 0.5ms/img)
  439. t = time.time()
  440. output = [None] * prediction.shape[0]
  441. for xi, x in enumerate(prediction): # image index, image inference
  442. # Apply constraints
  443. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  444. x = x[xc[xi]] # confidence
  445. # If none remain process next image
  446. if not x.shape[0]:
  447. continue
  448. # Compute conf
  449. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  450. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  451. box = xywh2xyxy(x[:, :4])
  452. # Detections matrix nx6 (xyxy, conf, cls)
  453. if multi_label:
  454. i, j = (x[:, 5:] > conf_thres).nonzero().t()
  455. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  456. else: # best class only
  457. conf, j = x[:, 5:].max(1, keepdim=True)
  458. x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  459. # Filter by class
  460. if classes:
  461. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  462. # Apply finite constraint
  463. # if not torch.isfinite(x).all():
  464. # x = x[torch.isfinite(x).all(1)]
  465. # If none remain process next image
  466. n = x.shape[0] # number of boxes
  467. if not n:
  468. continue
  469. # Sort by confidence
  470. # x = x[x[:, 4].argsort(descending=True)]
  471. # Batched NMS
  472. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  473. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  474. i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
  475. if i.shape[0] > max_det: # limit detections
  476. i = i[:max_det]
  477. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  478. try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  479. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  480. weights = iou * scores[None] # box weights
  481. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  482. if redundant:
  483. i = i[iou.sum(1) > 1] # require redundancy
  484. except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
  485. print(x, i, x.shape, i.shape)
  486. pass
  487. output[xi] = x[i]
  488. if (time.time() - t) > time_limit:
  489. break # time limit exceeded
  490. return output
  491. def strip_optimizer(f='weights/best.pt'): # from utils.utils import *; strip_optimizer()
  492. # Strip optimizer from *.pt files for lighter files (reduced by 1/2 size)
  493. x = torch.load(f, map_location=torch.device('cpu'))
  494. x['optimizer'] = None
  495. torch.save(x, f)
  496. print('Optimizer stripped from %s' % f)
  497. def create_backbone(f='weights/best.pt', s='weights/backbone.pt'): # from utils.utils import *; create_backbone()
  498. # create backbone 's' from 'f'
  499. device = torch.device('cpu')
  500. x = torch.load(f, map_location=device)
  501. torch.save(x, s) # update model if SourceChangeWarning
  502. x = torch.load(s, map_location=device)
  503. x['optimizer'] = None
  504. x['training_results'] = None
  505. x['epoch'] = -1
  506. for p in x['model'].parameters():
  507. p.requires_grad = True
  508. torch.save(x, s)
  509. print('%s modified for backbone use and saved as %s' % (f, s))
  510. def coco_class_count(path='../coco/labels/train2014/'):
  511. # Histogram of occurrences per class
  512. nc = 80 # number classes
  513. x = np.zeros(nc, dtype='int32')
  514. files = sorted(glob.glob('%s/*.*' % path))
  515. for i, file in enumerate(files):
  516. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  517. x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
  518. print(i, len(files))
  519. def coco_only_people(path='../coco/labels/train2017/'): # from utils.utils import *; coco_only_people()
  520. # Find images with only people
  521. files = sorted(glob.glob('%s/*.*' % path))
  522. for i, file in enumerate(files):
  523. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  524. if all(labels[:, 0] == 0):
  525. print(labels.shape[0], file)
  526. def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
  527. # crops images into random squares up to scale fraction
  528. # WARNING: overwrites images!
  529. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  530. img = cv2.imread(file) # BGR
  531. if img is not None:
  532. h, w = img.shape[:2]
  533. # create random mask
  534. a = 30 # minimum size (pixels)
  535. mask_h = random.randint(a, int(max(a, h * scale))) # mask height
  536. mask_w = mask_h # mask width
  537. # box
  538. xmin = max(0, random.randint(0, w) - mask_w // 2)
  539. ymin = max(0, random.randint(0, h) - mask_h // 2)
  540. xmax = min(w, xmin + mask_w)
  541. ymax = min(h, ymin + mask_h)
  542. # apply random color mask
  543. cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
  544. def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
  545. # Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
  546. if os.path.exists('new/'):
  547. shutil.rmtree('new/') # delete output folder
  548. os.makedirs('new/') # make new output folder
  549. os.makedirs('new/labels/')
  550. os.makedirs('new/images/')
  551. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  552. with open(file, 'r') as f:
  553. labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
  554. i = labels[:, 0] == label_class
  555. if any(i):
  556. img_file = file.replace('labels', 'images').replace('txt', 'jpg')
  557. labels[:, 0] = 0 # reset class to 0
  558. with open('new/images.txt', 'a') as f: # add image to dataset list
  559. f.write(img_file + '\n')
  560. with open('new/labels/' + Path(file).name, 'a') as f: # write label
  561. for l in labels[i]:
  562. f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
  563. shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
  564. def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=(640, 640), thr=0.20, gen=1000):
  565. """ Creates kmeans-evolved anchors from training dataset
  566. Arguments:
  567. path: path to dataset *.yaml
  568. n: number of anchors
  569. img_size: (min, max) image size used for multi-scale training (can be same values)
  570. thr: IoU threshold hyperparameter used for training (0.0 - 1.0)
  571. gen: generations to evolve anchors using genetic algorithm
  572. Return:
  573. k: kmeans evolved anchors
  574. Usage:
  575. from utils.utils import *; _ = kmean_anchors()
  576. """
  577. from utils.datasets import LoadImagesAndLabels
  578. def print_results(k):
  579. k = k[np.argsort(k.prod(1))] # sort small to large
  580. iou = wh_iou(wh, torch.Tensor(k))
  581. max_iou = iou.max(1)[0]
  582. bpr, aat = (max_iou > thr).float().mean(), (iou > thr).float().mean() * n # best possible recall, anch > thr
  583. # thr = 5.0
  584. # r = wh[:, None] / k[None]
  585. # ar = torch.max(r, 1. / r).max(2)[0]
  586. # max_ar = ar.min(1)[0]
  587. # bpr, aat = (max_ar < thr).float().mean(), (ar < thr).float().mean() * n # best possible recall, anch > thr
  588. print('%.2f iou_thr: %.3f best possible recall, %.2f anchors > thr' % (thr, bpr, aat))
  589. print('n=%g, img_size=%s, IoU_all=%.3f/%.3f-mean/best, IoU>thr=%.3f-mean: ' %
  590. (n, img_size, iou.mean(), max_iou.mean(), iou[iou > thr].mean()), end='')
  591. for i, x in enumerate(k):
  592. print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
  593. return k
  594. def fitness(k): # mutation fitness
  595. iou = wh_iou(wh, torch.Tensor(k)) # iou
  596. max_iou = iou.max(1)[0]
  597. return (max_iou * (max_iou > thr).float()).mean() # product
  598. # def fitness_ratio(k): # mutation fitness
  599. # # wh(5316,2), k(9,2)
  600. # r = wh[:, None] / k[None]
  601. # x = torch.max(r, 1. / r).max(2)[0]
  602. # m = x.min(1)[0]
  603. # return 1. / (m * (m < 5).float()).mean() # product
  604. # Get label wh
  605. wh = []
  606. with open(path) as f:
  607. data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
  608. dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
  609. nr = 1 if img_size[0] == img_size[1] else 3 # number augmentation repetitions
  610. for s, l in zip(dataset.shapes, dataset.labels):
  611. # wh.append(l[:, 3:5] * (s / s.max())) # image normalized to letterbox normalized wh
  612. wh.append(l[:, 3:5] * s) # image normalized to pixels
  613. wh = np.concatenate(wh, 0).repeat(nr, axis=0) # augment 3x
  614. # wh *= np.random.uniform(img_size[0], img_size[1], size=(wh.shape[0], 1)) # normalized to pixels (multi-scale)
  615. wh = wh[(wh > 2.0).all(1)] # remove below threshold boxes (< 2 pixels wh)
  616. # Kmeans calculation
  617. from scipy.cluster.vq import kmeans
  618. print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
  619. s = wh.std(0) # sigmas for whitening
  620. k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
  621. k *= s
  622. wh = torch.Tensor(wh)
  623. k = print_results(k)
  624. # # Plot
  625. # k, d = [None] * 20, [None] * 20
  626. # for i in tqdm(range(1, 21)):
  627. # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
  628. # fig, ax = plt.subplots(1, 2, figsize=(14, 7))
  629. # ax = ax.ravel()
  630. # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
  631. # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
  632. # ax[0].hist(wh[wh[:, 0]<100, 0],400)
  633. # ax[1].hist(wh[wh[:, 1]<100, 1],400)
  634. # fig.tight_layout()
  635. # fig.savefig('wh.png', dpi=200)
  636. # Evolve
  637. npr = np.random
  638. f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
  639. for _ in tqdm(range(gen), desc='Evolving anchors'):
  640. v = np.ones(sh)
  641. while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
  642. v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
  643. kg = (k.copy() * v).clip(min=2.0)
  644. fg = fitness(kg)
  645. if fg > f:
  646. f, k = fg, kg.copy()
  647. print_results(k)
  648. k = print_results(k)
  649. return k
  650. def print_mutation(hyp, results, bucket=''):
  651. # Print mutation results to evolve.txt (for use with train.py --evolve)
  652. a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
  653. b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
  654. c = '%10.4g' * len(results) % results # results (P, R, mAP, F1, test_loss)
  655. print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
  656. if bucket:
  657. os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
  658. with open('evolve.txt', 'a') as f: # append result
  659. f.write(c + b + '\n')
  660. x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
  661. np.savetxt('evolve.txt', x[np.argsort(-fitness(x))], '%10.3g') # save sort by fitness
  662. if bucket:
  663. os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
  664. def apply_classifier(x, model, img, im0):
  665. # applies a second stage classifier to yolo outputs
  666. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  667. for i, d in enumerate(x): # per image
  668. if d is not None and len(d):
  669. d = d.clone()
  670. # Reshape and pad cutouts
  671. b = xyxy2xywh(d[:, :4]) # boxes
  672. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  673. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  674. d[:, :4] = xywh2xyxy(b).long()
  675. # Rescale boxes from img_size to im0 size
  676. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  677. # Classes
  678. pred_cls1 = d[:, 5].long()
  679. ims = []
  680. for j, a in enumerate(d): # per item
  681. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  682. im = cv2.resize(cutout, (224, 224)) # BGR
  683. # cv2.imwrite('test%i.jpg' % j, cutout)
  684. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  685. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  686. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  687. ims.append(im)
  688. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  689. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  690. return x
  691. def fitness(x):
  692. # Returns fitness (for use with results.txt or evolve.txt)
  693. w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
  694. return (x[:, :4] * w).sum(1)
  695. def output_to_target(output, width, height):
  696. """
  697. Convert a YOLO model output to target format
  698. [batch_id, class_id, x, y, w, h, conf]
  699. """
  700. if isinstance(output, torch.Tensor):
  701. output = output.cpu().numpy()
  702. targets = []
  703. for i, o in enumerate(output):
  704. if o is not None:
  705. for pred in o:
  706. box = pred[:4]
  707. w = (box[2] - box[0]) / width
  708. h = (box[3] - box[1]) / height
  709. x = box[0] / width + w / 2
  710. y = box[1] / height + h / 2
  711. conf = pred[4]
  712. cls = int(pred[5])
  713. targets.append([i, cls, x, y, w, h, conf])
  714. return np.array(targets)
  715. # Plotting functions ---------------------------------------------------------------------------------------------------
  716. def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  717. # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
  718. def butter_lowpass(cutoff, fs, order):
  719. nyq = 0.5 * fs
  720. normal_cutoff = cutoff / nyq
  721. b, a = butter(order, normal_cutoff, btype='low', analog=False)
  722. return b, a
  723. b, a = butter_lowpass(cutoff, fs, order=order)
  724. return filtfilt(b, a, data) # forward-backward filter
  725. def plot_one_box(x, img, color=None, label=None, line_thickness=None):
  726. # Plots one bounding box on image img
  727. tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
  728. color = color or [random.randint(0, 255) for _ in range(3)]
  729. c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
  730. cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
  731. if label:
  732. tf = max(tl - 1, 1) # font thickness
  733. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  734. c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
  735. cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
  736. cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
  737. def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
  738. # Compares the two methods for width-height anchor multiplication
  739. # https://github.com/ultralytics/yolov3/issues/168
  740. x = np.arange(-4.0, 4.0, .1)
  741. ya = np.exp(x)
  742. yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
  743. fig = plt.figure(figsize=(6, 3), dpi=150)
  744. plt.plot(x, ya, '.-', label='yolo method')
  745. plt.plot(x, yb ** 2, '.-', label='^2 power method')
  746. plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
  747. plt.xlim(left=-4, right=4)
  748. plt.ylim(bottom=0, top=6)
  749. plt.xlabel('input')
  750. plt.ylabel('output')
  751. plt.legend()
  752. fig.tight_layout()
  753. fig.savefig('comparison.png', dpi=200)
  754. def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
  755. tl = 3 # line thickness
  756. tf = max(tl - 1, 1) # font thickness
  757. if os.path.isfile(fname): # do not overwrite
  758. return None
  759. if isinstance(images, torch.Tensor):
  760. images = images.cpu().float().numpy()
  761. if isinstance(targets, torch.Tensor):
  762. targets = targets.cpu().numpy()
  763. # un-normalise
  764. if np.max(images[0]) <= 1:
  765. images *= 255
  766. bs, _, h, w = images.shape # batch size, _, height, width
  767. bs = min(bs, max_subplots) # limit plot images
  768. ns = np.ceil(bs ** 0.5) # number of subplots (square)
  769. # Check if we should resize
  770. scale_factor = max_size / max(h, w)
  771. if scale_factor < 1:
  772. h = math.ceil(scale_factor * h)
  773. w = math.ceil(scale_factor * w)
  774. # Empty array for output
  775. mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
  776. # Fix class - colour map
  777. prop_cycle = plt.rcParams['axes.prop_cycle']
  778. # https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
  779. hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
  780. color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
  781. for i, img in enumerate(images):
  782. if i == max_subplots: # if last batch has fewer images than we expect
  783. break
  784. block_x = int(w * (i // ns))
  785. block_y = int(h * (i % ns))
  786. img = img.transpose(1, 2, 0)
  787. if scale_factor < 1:
  788. img = cv2.resize(img, (w, h))
  789. mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
  790. if len(targets) > 0:
  791. image_targets = targets[targets[:, 0] == i]
  792. boxes = xywh2xyxy(image_targets[:, 2:6]).T
  793. classes = image_targets[:, 1].astype('int')
  794. gt = image_targets.shape[1] == 6 # ground truth if no conf column
  795. conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
  796. boxes[[0, 2]] *= w
  797. boxes[[0, 2]] += block_x
  798. boxes[[1, 3]] *= h
  799. boxes[[1, 3]] += block_y
  800. for j, box in enumerate(boxes.T):
  801. cls = int(classes[j])
  802. color = color_lut[cls % len(color_lut)]
  803. cls = names[cls] if names else cls
  804. if gt or conf[j] > 0.3: # 0.3 conf thresh
  805. label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
  806. plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
  807. # Draw image filename labels
  808. if paths is not None:
  809. label = os.path.basename(paths[i])[:40] # trim to 40 char
  810. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  811. cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
  812. lineType=cv2.LINE_AA)
  813. # Image border
  814. cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
  815. if fname is not None:
  816. mosaic = cv2.resize(mosaic, (int(ns * w * 0.5), int(ns * h * 0.5)), interpolation=cv2.INTER_AREA)
  817. cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))
  818. return mosaic
  819. def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir='./'):
  820. # Plot LR simulating training for full epochs
  821. optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
  822. y = []
  823. for _ in range(epochs):
  824. scheduler.step()
  825. y.append(optimizer.param_groups[0]['lr'])
  826. plt.plot(y, '.-', label='LR')
  827. plt.xlabel('epoch')
  828. plt.ylabel('LR')
  829. plt.grid()
  830. plt.xlim(0, epochs)
  831. plt.ylim(0)
  832. plt.tight_layout()
  833. plt.savefig(os.path.join(save_dir, 'LR.png'), dpi=200)
  834. def plot_test_txt(): # from utils.utils import *; plot_test()
  835. # Plot test.txt histograms
  836. x = np.loadtxt('test.txt', dtype=np.float32)
  837. box = xyxy2xywh(x[:, :4])
  838. cx, cy = box[:, 0], box[:, 1]
  839. fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
  840. ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
  841. ax.set_aspect('equal')
  842. plt.savefig('hist2d.png', dpi=300)
  843. fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
  844. ax[0].hist(cx, bins=600)
  845. ax[1].hist(cy, bins=600)
  846. plt.savefig('hist1d.png', dpi=200)
  847. def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
  848. # Plot targets.txt histograms
  849. x = np.loadtxt('targets.txt', dtype=np.float32).T
  850. s = ['x targets', 'y targets', 'width targets', 'height targets']
  851. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  852. ax = ax.ravel()
  853. for i in range(4):
  854. ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
  855. ax[i].legend()
  856. ax[i].set_title(s[i])
  857. plt.savefig('targets.jpg', dpi=200)
  858. def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_study_txt()
  859. # Plot study.txt generated by test.py
  860. fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
  861. ax = ax.ravel()
  862. fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
  863. for f in ['coco_study/study_coco_yolov5%s.txt' % x for x in ['s', 'm', 'l', 'x']]:
  864. y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
  865. x = np.arange(y.shape[1]) if x is None else np.array(x)
  866. s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
  867. for i in range(7):
  868. ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
  869. ax[i].set_title(s[i])
  870. j = y[3].argmax() + 1
  871. ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
  872. label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
  873. ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [33.5, 39.1, 42.5, 45.9, 49., 50.5],
  874. 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
  875. ax2.set_xlim(0, 30)
  876. ax2.set_ylim(25, 50)
  877. ax2.set_xlabel('GPU Latency (ms)')
  878. ax2.set_ylabel('COCO AP val')
  879. ax2.legend(loc='lower right')
  880. ax2.grid()
  881. plt.savefig('study_mAP_latency.png', dpi=300)
  882. plt.savefig(f.replace('.txt', '.png'), dpi=200)
  883. def plot_labels(labels, save_dir= '.'):
  884. # plot dataset labels
  885. c, b = labels[:, 0], labels[:, 1:].transpose() # classees, boxes
  886. def hist2d(x, y, n=100):
  887. xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
  888. hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
  889. xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
  890. yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
  891. return np.log(hist[xidx, yidx])
  892. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  893. ax = ax.ravel()
  894. ax[0].hist(c, bins=int(c.max() + 1))
  895. ax[0].set_xlabel('classes')
  896. ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
  897. ax[1].set_xlabel('x')
  898. ax[1].set_ylabel('y')
  899. ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
  900. ax[2].set_xlabel('width')
  901. ax[2].set_ylabel('height')
  902. plt.savefig(os.path.join(save_dir,'labels.png'), dpi=200)
  903. def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
  904. # Plot hyperparameter evolution results in evolve.txt
  905. x = np.loadtxt('evolve.txt', ndmin=2)
  906. f = fitness(x)
  907. # weights = (f - f.min()) ** 2 # for weighted results
  908. plt.figure(figsize=(12, 10), tight_layout=True)
  909. matplotlib.rc('font', **{'size': 8})
  910. for i, (k, v) in enumerate(hyp.items()):
  911. y = x[:, i + 7]
  912. # mu = (y * weights).sum() / weights.sum() # best weighted result
  913. mu = y[f.argmax()] # best single result
  914. plt.subplot(4, 5, i + 1)
  915. plt.plot(mu, f.max(), 'o', markersize=10)
  916. plt.plot(y, f, '.')
  917. plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
  918. print('%15s: %.3g' % (k, mu))
  919. plt.savefig('evolve.png', dpi=200)
  920. def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
  921. # Plot training 'results*.txt', overlaying train and val losses
  922. s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
  923. t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
  924. for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
  925. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  926. n = results.shape[1] # number of rows
  927. x = range(start, min(stop, n) if stop else n)
  928. fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
  929. ax = ax.ravel()
  930. for i in range(5):
  931. for j in [i, i + 5]:
  932. y = results[j, x]
  933. ax[i].plot(x, y, marker='.', label=s[j])
  934. # y_smooth = butter_lowpass_filtfilt(y)
  935. # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
  936. ax[i].set_title(t[i])
  937. ax[i].legend()
  938. ax[i].set_ylabel(f) if i == 0 else None # add filename
  939. fig.savefig(f.replace('.txt', '.png'), dpi=200)
  940. def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir= '.'): # from utils.utils import *; plot_results()
  941. # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
  942. fig, ax = plt.subplots(2, 5, figsize=(12, 6))
  943. ax = ax.ravel()
  944. s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
  945. 'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
  946. if bucket:
  947. os.system('rm -rf storage.googleapis.com')
  948. files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
  949. else:
  950. files = glob.glob(os.path.join(save_dir,'results*.txt')) + glob.glob('../../Downloads/results*.txt')
  951. for fi, f in enumerate(files):
  952. try:
  953. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  954. n = results.shape[1] # number of rows
  955. x = range(start, min(stop, n) if stop else n)
  956. for i in range(10):
  957. y = results[i, x]
  958. if i in [0, 1, 2, 5, 6, 7]:
  959. y[y == 0] = np.nan # dont show zero loss values
  960. # y /= y[0] # normalize
  961. label = labels[fi] if len(labels) else Path(f).stem
  962. ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
  963. ax[i].set_title(s[i])
  964. # if i in [5, 6, 7]: # share train and val loss y axes
  965. # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
  966. except:
  967. print('Warning: Plotting error for %s, skipping file' % f)
  968. fig.tight_layout()
  969. ax[1].legend()
  970. fig.savefig('results.png', dpi=200)