Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

pirms 4 gadiem
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085
  1. import glob
  2. import math
  3. import os
  4. import random
  5. import shutil
  6. import subprocess
  7. import time
  8. from copy import copy
  9. from pathlib import Path
  10. from sys import platform
  11. import cv2
  12. import matplotlib
  13. import matplotlib.pyplot as plt
  14. import numpy as np
  15. import torch
  16. import torch.nn as nn
  17. import torchvision
  18. from scipy.signal import butter, filtfilt
  19. from tqdm import tqdm
  20. from . import torch_utils, google_utils # torch_utils, google_utils
  21. # Set printoptions
  22. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  23. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  24. matplotlib.rc('font', **{'size': 11})
  25. # Prevent OpenCV from multithreading (to use PyTorch DataLoader)
  26. cv2.setNumThreads(0)
  27. def init_seeds(seed=0):
  28. random.seed(seed)
  29. np.random.seed(seed)
  30. torch_utils.init_seeds(seed=seed)
  31. def check_git_status():
  32. if platform in ['linux', 'darwin']:
  33. # Suggest 'git pull' if repo is out of date
  34. s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
  35. if 'Your branch is behind' in s:
  36. print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
  37. def make_divisible(x, divisor):
  38. # Returns x evenly divisble by divisor
  39. return math.ceil(x / divisor) * divisor
  40. def labels_to_class_weights(labels, nc=80):
  41. # Get class weights (inverse frequency) from training labels
  42. if labels[0] is None: # no labels loaded
  43. return torch.Tensor()
  44. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  45. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  46. weights = np.bincount(classes, minlength=nc) # occurences per class
  47. # Prepend gridpoint count (for uCE trianing)
  48. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  49. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  50. weights[weights == 0] = 1 # replace empty bins with 1
  51. weights = 1 / weights # number of targets per class
  52. weights /= weights.sum() # normalize
  53. return torch.from_numpy(weights)
  54. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  55. # Produces image weights based on class mAPs
  56. n = len(labels)
  57. class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
  58. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  59. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  60. return image_weights
  61. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  62. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  63. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  64. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  65. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  66. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  67. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  68. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  69. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  70. return x
  71. def xyxy2xywh(x):
  72. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  73. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  74. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  75. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  76. y[:, 2] = x[:, 2] - x[:, 0] # width
  77. y[:, 3] = x[:, 3] - x[:, 1] # height
  78. return y
  79. def xywh2xyxy(x):
  80. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  81. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  82. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  83. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  84. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  85. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  86. return y
  87. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  88. # Rescale coords (xyxy) from img1_shape to img0_shape
  89. if ratio_pad is None: # calculate from img0_shape
  90. gain = max(img1_shape) / max(img0_shape) # gain = old / new
  91. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  92. else:
  93. gain = ratio_pad[0][0]
  94. pad = ratio_pad[1]
  95. coords[:, [0, 2]] -= pad[0] # x padding
  96. coords[:, [1, 3]] -= pad[1] # y padding
  97. coords[:, :4] /= gain
  98. clip_coords(coords, img0_shape)
  99. return coords
  100. def clip_coords(boxes, img_shape):
  101. # Clip bounding xyxy bounding boxes to image shape (height, width)
  102. boxes[:, 0].clamp_(0, img_shape[1]) # x1
  103. boxes[:, 1].clamp_(0, img_shape[0]) # y1
  104. boxes[:, 2].clamp_(0, img_shape[1]) # x2
  105. boxes[:, 3].clamp_(0, img_shape[0]) # y2
  106. def ap_per_class(tp, conf, pred_cls, target_cls):
  107. """ Compute the average precision, given the recall and precision curves.
  108. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
  109. # Arguments
  110. tp: True positives (nparray, nx1 or nx10).
  111. conf: Objectness value from 0-1 (nparray).
  112. pred_cls: Predicted object classes (nparray).
  113. target_cls: True object classes (nparray).
  114. # Returns
  115. The average precision as computed in py-faster-rcnn.
  116. """
  117. # Sort by objectness
  118. i = np.argsort(-conf)
  119. tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
  120. # Find unique classes
  121. unique_classes = np.unique(target_cls)
  122. # Create Precision-Recall curve and compute AP for each class
  123. pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
  124. s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
  125. ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
  126. for ci, c in enumerate(unique_classes):
  127. i = pred_cls == c
  128. n_gt = (target_cls == c).sum() # Number of ground truth objects
  129. n_p = i.sum() # Number of predicted objects
  130. if n_p == 0 or n_gt == 0:
  131. continue
  132. else:
  133. # Accumulate FPs and TPs
  134. fpc = (1 - tp[i]).cumsum(0)
  135. tpc = tp[i].cumsum(0)
  136. # Recall
  137. recall = tpc / (n_gt + 1e-16) # recall curve
  138. r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
  139. # Precision
  140. precision = tpc / (tpc + fpc) # precision curve
  141. p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
  142. # AP from recall-precision curve
  143. for j in range(tp.shape[1]):
  144. ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
  145. # Plot
  146. # fig, ax = plt.subplots(1, 1, figsize=(5, 5))
  147. # ax.plot(recall, precision)
  148. # ax.set_xlabel('Recall')
  149. # ax.set_ylabel('Precision')
  150. # ax.set_xlim(0, 1.01)
  151. # ax.set_ylim(0, 1.01)
  152. # fig.tight_layout()
  153. # fig.savefig('PR_curve.png', dpi=300)
  154. # Compute F1 score (harmonic mean of precision and recall)
  155. f1 = 2 * p * r / (p + r + 1e-16)
  156. return p, r, ap, f1, unique_classes.astype('int32')
  157. def compute_ap(recall, precision):
  158. """ Compute the average precision, given the recall and precision curves.
  159. Source: https://github.com/rbgirshick/py-faster-rcnn.
  160. # Arguments
  161. recall: The recall curve (list).
  162. precision: The precision curve (list).
  163. # Returns
  164. The average precision as computed in py-faster-rcnn.
  165. """
  166. # Append sentinel values to beginning and end
  167. mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
  168. mpre = np.concatenate(([0.], precision, [0.]))
  169. # Compute the precision envelope
  170. mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
  171. # Integrate area under curve
  172. method = 'interp' # methods: 'continuous', 'interp'
  173. if method == 'interp':
  174. x = np.linspace(0, 1, 101) # 101-point interp (COCO)
  175. ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
  176. else: # 'continuous'
  177. i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
  178. ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
  179. return ap
  180. def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
  181. # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
  182. box2 = box2.t()
  183. # Get the coordinates of bounding boxes
  184. if x1y1x2y2: # x1, y1, x2, y2 = box1
  185. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  186. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  187. else: # transform from xywh to xyxy
  188. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  189. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  190. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
  191. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
  192. # Intersection area
  193. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  194. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  195. # Union Area
  196. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
  197. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
  198. union = (w1 * h1 + 1e-16) + w2 * h2 - inter
  199. iou = inter / union # iou
  200. if GIoU or DIoU or CIoU:
  201. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  202. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  203. if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
  204. c_area = cw * ch + 1e-16 # convex area
  205. return iou - (c_area - union) / c_area # GIoU
  206. if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  207. # convex diagonal squared
  208. c2 = cw ** 2 + ch ** 2 + 1e-16
  209. # centerpoint distance squared
  210. rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
  211. if DIoU:
  212. return iou - rho2 / c2 # DIoU
  213. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  214. v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  215. with torch.no_grad():
  216. alpha = v / (1 - iou + v)
  217. return iou - (rho2 / c2 + v * alpha) # CIoU
  218. return iou
  219. def box_iou(box1, box2):
  220. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  221. """
  222. Return intersection-over-union (Jaccard index) of boxes.
  223. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  224. Arguments:
  225. box1 (Tensor[N, 4])
  226. box2 (Tensor[M, 4])
  227. Returns:
  228. iou (Tensor[N, M]): the NxM matrix containing the pairwise
  229. IoU values for every element in boxes1 and boxes2
  230. """
  231. def box_area(box):
  232. # box = 4xn
  233. return (box[2] - box[0]) * (box[3] - box[1])
  234. area1 = box_area(box1.t())
  235. area2 = box_area(box2.t())
  236. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  237. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  238. return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
  239. def wh_iou(wh1, wh2):
  240. # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
  241. wh1 = wh1[:, None] # [N,1,2]
  242. wh2 = wh2[None] # [1,M,2]
  243. inter = torch.min(wh1, wh2).prod(2) # [N,M]
  244. return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
  245. class FocalLoss(nn.Module):
  246. # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
  247. def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
  248. super(FocalLoss, self).__init__()
  249. self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
  250. self.gamma = gamma
  251. self.alpha = alpha
  252. self.reduction = loss_fcn.reduction
  253. self.loss_fcn.reduction = 'none' # required to apply FL to each element
  254. def forward(self, pred, true):
  255. loss = self.loss_fcn(pred, true)
  256. # p_t = torch.exp(-loss)
  257. # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
  258. # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
  259. pred_prob = torch.sigmoid(pred) # prob from logits
  260. p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
  261. alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
  262. modulating_factor = (1.0 - p_t) ** self.gamma
  263. loss *= alpha_factor * modulating_factor
  264. if self.reduction == 'mean':
  265. return loss.mean()
  266. elif self.reduction == 'sum':
  267. return loss.sum()
  268. else: # 'none'
  269. return loss
  270. def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
  271. # return positive, negative label smoothing BCE targets
  272. return 1.0 - 0.5 * eps, 0.5 * eps
  273. def compute_loss(p, targets, model): # predictions, targets, model
  274. ft = torch.cuda.FloatTensor if p[0].is_cuda else torch.Tensor
  275. lcls, lbox, lobj = ft([0]), ft([0]), ft([0])
  276. tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
  277. h = model.hyp # hyperparameters
  278. red = 'mean' # Loss reduction (sum or mean)
  279. # Define criteria
  280. BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)
  281. BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)
  282. # class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
  283. cp, cn = smooth_BCE(eps=0.0)
  284. # focal loss
  285. g = h['fl_gamma'] # focal loss gamma
  286. if g > 0:
  287. BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
  288. # per output
  289. nt = 0 # targets
  290. for i, pi in enumerate(p): # layer index, layer predictions
  291. b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
  292. tobj = torch.zeros_like(pi[..., 0]) # target obj
  293. nb = b.shape[0] # number of targets
  294. if nb:
  295. nt += nb # cumulative targets
  296. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
  297. # GIoU
  298. pxy = ps[:, :2].sigmoid() * 2. - 0.5
  299. pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
  300. pbox = torch.cat((pxy, pwh), 1) # predicted box
  301. giou = bbox_iou(pbox.t(), tbox[i], x1y1x2y2=False, GIoU=True) # giou(prediction, target)
  302. lbox += (1.0 - giou).sum() if red == 'sum' else (1.0 - giou).mean() # giou loss
  303. # Obj
  304. tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) # giou ratio
  305. # Class
  306. if model.nc > 1: # cls loss (only if multiple classes)
  307. t = torch.full_like(ps[:, 5:], cn) # targets
  308. t[range(nb), tcls[i]] = cp
  309. lcls += BCEcls(ps[:, 5:], t) # BCE
  310. # Append targets to text file
  311. # with open('targets.txt', 'a') as file:
  312. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
  313. lobj += BCEobj(pi[..., 4], tobj) # obj loss
  314. lbox *= h['giou']
  315. lobj *= h['obj']
  316. lcls *= h['cls']
  317. bs = tobj.shape[0] # batch size
  318. if red == 'sum':
  319. g = 3.0 # loss gain
  320. lobj *= g / bs
  321. if nt:
  322. lcls *= g / nt / model.nc
  323. lbox *= g / nt
  324. loss = lbox + lobj + lcls
  325. return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
  326. def build_targets(p, targets, model):
  327. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  328. det = model.module.model[-1] if type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) \
  329. else model.model[-1] # Detect() module
  330. na, nt = det.na, targets.shape[0] # number of anchors, targets
  331. tcls, tbox, indices, anch = [], [], [], []
  332. gain = torch.ones(6, device=targets.device) # normalized to gridspace gain
  333. off = torch.tensor([[1, 0], [0, 1], [-1, 0], [0, -1]], device=targets.device).float() # overlap offsets
  334. at = torch.arange(na).view(na, 1).repeat(1, nt) # anchor tensor, same as .repeat_interleave(nt)
  335. style = 'rect4'
  336. for i in range(det.nl):
  337. anchors = det.anchors[i]
  338. gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  339. # Match targets to anchors
  340. a, t, offsets = [], targets * gain, 0
  341. if nt:
  342. r = t[None, :, 4:6] / anchors[:, None] # wh ratio
  343. j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
  344. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
  345. a, t = at[j], t.repeat(na, 1, 1)[j] # filter
  346. # overlaps
  347. gxy = t[:, 2:4] # grid xy
  348. z = torch.zeros_like(gxy)
  349. if style == 'rect2':
  350. g = 0.2 # offset
  351. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  352. a, t = torch.cat((a, a[j], a[k]), 0), torch.cat((t, t[j], t[k]), 0)
  353. offsets = torch.cat((z, z[j] + off[0], z[k] + off[1]), 0) * g
  354. elif style == 'rect4':
  355. g = 0.5 # offset
  356. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  357. l, m = ((gxy % 1. > (1 - g)) & (gxy < (gain[[2, 3]] - 1.))).T
  358. a, t = torch.cat((a, a[j], a[k], a[l], a[m]), 0), torch.cat((t, t[j], t[k], t[l], t[m]), 0)
  359. offsets = torch.cat((z, z[j] + off[0], z[k] + off[1], z[l] + off[2], z[m] + off[3]), 0) * g
  360. # Define
  361. b, c = t[:, :2].long().T # image, class
  362. gxy = t[:, 2:4] # grid xy
  363. gwh = t[:, 4:6] # grid wh
  364. gij = (gxy - offsets).long()
  365. gi, gj = gij.T # grid xy indices
  366. # Append
  367. indices.append((b, a, gj, gi)) # image, anchor, grid indices
  368. tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
  369. anch.append(anchors[a]) # anchors
  370. tcls.append(c) # class
  371. return tcls, tbox, indices, anch
  372. def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, multi_label=True, classes=None, agnostic=False):
  373. """
  374. Performs Non-Maximum Suppression on inference results
  375. Returns detections with shape:
  376. nx6 (x1, y1, x2, y2, conf, cls)
  377. """
  378. # Settings
  379. merge = True # merge for best mAP
  380. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  381. time_limit = 10.0 # seconds to quit after
  382. t = time.time()
  383. nc = prediction[0].shape[1] - 5 # number of classes
  384. multi_label &= nc > 1 # multiple labels per box
  385. output = [None] * prediction.shape[0]
  386. for xi, x in enumerate(prediction): # image index, image inference
  387. # Apply constraints
  388. x = x[x[:, 4] > conf_thres] # confidence
  389. # x = x[((x[:, 2:4] > min_wh) & (x[:, 2:4] < max_wh)).all(1)] # width-height
  390. # If none remain process next image
  391. if not x.shape[0]:
  392. continue
  393. # Compute conf
  394. x[..., 5:] *= x[..., 4:5] # conf = obj_conf * cls_conf
  395. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  396. box = xywh2xyxy(x[:, :4])
  397. # Detections matrix nx6 (xyxy, conf, cls)
  398. if multi_label:
  399. i, j = (x[:, 5:] > conf_thres).nonzero().t()
  400. x = torch.cat((box[i], x[i, j + 5].unsqueeze(1), j.float().unsqueeze(1)), 1)
  401. else: # best class only
  402. conf, j = x[:, 5:].max(1)
  403. x = torch.cat((box, conf.unsqueeze(1), j.float().unsqueeze(1)), 1)[conf > conf_thres]
  404. # Filter by class
  405. if classes:
  406. x = x[(j.view(-1, 1) == torch.tensor(classes, device=j.device)).any(1)]
  407. # Apply finite constraint
  408. # if not torch.isfinite(x).all():
  409. # x = x[torch.isfinite(x).all(1)]
  410. # If none remain process next image
  411. n = x.shape[0] # number of boxes
  412. if not n:
  413. continue
  414. # Sort by confidence
  415. # x = x[x[:, 4].argsort(descending=True)]
  416. # Batched NMS
  417. c = x[:, 5] * 0 if agnostic else x[:, 5] # classes
  418. boxes, scores = x[:, :4].clone() + c.view(-1, 1) * max_wh, x[:, 4] # boxes (offset by class), scores
  419. i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
  420. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  421. try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  422. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  423. weights = iou * scores[None] # box weights
  424. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  425. # i = i[iou.sum(1) > 1] # require redundancy
  426. except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
  427. print(x, i, x.shape, i.shape)
  428. pass
  429. output[xi] = x[i]
  430. if (time.time() - t) > time_limit:
  431. break # time limit exceeded
  432. return output
  433. def strip_optimizer(f='weights/best.pt'): # from utils.utils import *; strip_optimizer()
  434. # Strip optimizer from *.pt files for lighter files (reduced by 2/3 size)
  435. x = torch.load(f, map_location=torch.device('cpu'))
  436. x['optimizer'] = None
  437. torch.save(x, f)
  438. print('Optimizer stripped from %s' % f)
  439. def create_backbone(f='weights/best.pt', s='weights/backbone.pt'): # from utils.utils import *; create_backbone()
  440. # create backbone 's' from 'f'
  441. x = torch.load(f, map_location=torch.device('cpu'))
  442. x['optimizer'] = None
  443. x['training_results'] = None
  444. x['epoch'] = -1
  445. for p in x['model'].parameters():
  446. p.requires_grad = True
  447. torch.save(x, s)
  448. print('%s modified for backbone use and saved as %s' % (f, s))
  449. def coco_class_count(path='../coco/labels/train2014/'):
  450. # Histogram of occurrences per class
  451. nc = 80 # number classes
  452. x = np.zeros(nc, dtype='int32')
  453. files = sorted(glob.glob('%s/*.*' % path))
  454. for i, file in enumerate(files):
  455. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  456. x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
  457. print(i, len(files))
  458. def coco_only_people(path='../coco/labels/train2017/'): # from utils.utils import *; coco_only_people()
  459. # Find images with only people
  460. files = sorted(glob.glob('%s/*.*' % path))
  461. for i, file in enumerate(files):
  462. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  463. if all(labels[:, 0] == 0):
  464. print(labels.shape[0], file)
  465. def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
  466. # crops images into random squares up to scale fraction
  467. # WARNING: overwrites images!
  468. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  469. img = cv2.imread(file) # BGR
  470. if img is not None:
  471. h, w = img.shape[:2]
  472. # create random mask
  473. a = 30 # minimum size (pixels)
  474. mask_h = random.randint(a, int(max(a, h * scale))) # mask height
  475. mask_w = mask_h # mask width
  476. # box
  477. xmin = max(0, random.randint(0, w) - mask_w // 2)
  478. ymin = max(0, random.randint(0, h) - mask_h // 2)
  479. xmax = min(w, xmin + mask_w)
  480. ymax = min(h, ymin + mask_h)
  481. # apply random color mask
  482. cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
  483. def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
  484. # Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
  485. if os.path.exists('new/'):
  486. shutil.rmtree('new/') # delete output folder
  487. os.makedirs('new/') # make new output folder
  488. os.makedirs('new/labels/')
  489. os.makedirs('new/images/')
  490. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  491. with open(file, 'r') as f:
  492. labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
  493. i = labels[:, 0] == label_class
  494. if any(i):
  495. img_file = file.replace('labels', 'images').replace('txt', 'jpg')
  496. labels[:, 0] = 0 # reset class to 0
  497. with open('new/images.txt', 'a') as f: # add image to dataset list
  498. f.write(img_file + '\n')
  499. with open('new/labels/' + Path(file).name, 'a') as f: # write label
  500. for l in labels[i]:
  501. f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
  502. shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
  503. def kmean_anchors(path='./data/coco128.txt', n=9, img_size=(640, 640), thr=0.20, gen=1000):
  504. # Creates kmeans anchors for use in *.cfg files: from utils.utils import *; _ = kmean_anchors()
  505. # n: number of anchors
  506. # img_size: (min, max) image size used for multi-scale training (can be same values)
  507. # thr: IoU threshold hyperparameter used for training (0.0 - 1.0)
  508. # gen: generations to evolve anchors using genetic algorithm
  509. from utils.datasets import LoadImagesAndLabels
  510. def print_results(k):
  511. k = k[np.argsort(k.prod(1))] # sort small to large
  512. iou = wh_iou(wh, torch.Tensor(k))
  513. max_iou = iou.max(1)[0]
  514. bpr, aat = (max_iou > thr).float().mean(), (iou > thr).float().mean() * n # best possible recall, anch > thr
  515. # thr = 5.0
  516. # r = wh[:, None] / k[None]
  517. # ar = torch.max(r, 1. / r).max(2)[0]
  518. # max_ar = ar.min(1)[0]
  519. # bpr, aat = (max_ar < thr).float().mean(), (ar < thr).float().mean() * n # best possible recall, anch > thr
  520. print('%.2f iou_thr: %.3f best possible recall, %.2f anchors > thr' % (thr, bpr, aat))
  521. print('n=%g, img_size=%s, IoU_all=%.3f/%.3f-mean/best, IoU>thr=%.3f-mean: ' %
  522. (n, img_size, iou.mean(), max_iou.mean(), iou[iou > thr].mean()), end='')
  523. for i, x in enumerate(k):
  524. print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
  525. return k
  526. def fitness(k): # mutation fitness
  527. iou = wh_iou(wh, torch.Tensor(k)) # iou
  528. max_iou = iou.max(1)[0]
  529. return (max_iou * (max_iou > thr).float()).mean() # product
  530. # def fitness_ratio(k): # mutation fitness
  531. # # wh(5316,2), k(9,2)
  532. # r = wh[:, None] / k[None]
  533. # x = torch.max(r, 1. / r).max(2)[0]
  534. # m = x.min(1)[0]
  535. # return 1. / (m * (m < 5).float()).mean() # product
  536. # Get label wh
  537. wh = []
  538. dataset = LoadImagesAndLabels(path, augment=True, rect=True)
  539. nr = 1 if img_size[0] == img_size[1] else 3 # number augmentation repetitions
  540. for s, l in zip(dataset.shapes, dataset.labels):
  541. # wh.append(l[:, 3:5] * (s / s.max())) # image normalized to letterbox normalized wh
  542. wh.append(l[:, 3:5] * s) # image normalized to pixels
  543. wh = np.concatenate(wh, 0).repeat(nr, axis=0) # augment 3x
  544. # wh *= np.random.uniform(img_size[0], img_size[1], size=(wh.shape[0], 1)) # normalized to pixels (multi-scale)
  545. wh = wh[(wh > 2.0).all(1)] # remove below threshold boxes (< 2 pixels wh)
  546. # Kmeans calculation
  547. from scipy.cluster.vq import kmeans
  548. print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
  549. s = wh.std(0) # sigmas for whitening
  550. k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
  551. k *= s
  552. wh = torch.Tensor(wh)
  553. k = print_results(k)
  554. # # Plot
  555. # k, d = [None] * 20, [None] * 20
  556. # for i in tqdm(range(1, 21)):
  557. # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
  558. # fig, ax = plt.subplots(1, 2, figsize=(14, 7))
  559. # ax = ax.ravel()
  560. # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
  561. # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
  562. # ax[0].hist(wh[wh[:, 0]<100, 0],400)
  563. # ax[1].hist(wh[wh[:, 1]<100, 1],400)
  564. # fig.tight_layout()
  565. # fig.savefig('wh.png', dpi=200)
  566. # Evolve
  567. npr = np.random
  568. f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
  569. for _ in tqdm(range(gen), desc='Evolving anchors'):
  570. v = np.ones(sh)
  571. while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
  572. v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
  573. kg = (k.copy() * v).clip(min=2.0)
  574. fg = fitness(kg)
  575. if fg > f:
  576. f, k = fg, kg.copy()
  577. print_results(k)
  578. k = print_results(k)
  579. return k
  580. def print_mutation(hyp, results, bucket=''):
  581. # Print mutation results to evolve.txt (for use with train.py --evolve)
  582. a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
  583. b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
  584. c = '%10.4g' * len(results) % results # results (P, R, mAP, F1, test_loss)
  585. print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
  586. if bucket:
  587. os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
  588. with open('evolve.txt', 'a') as f: # append result
  589. f.write(c + b + '\n')
  590. x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
  591. np.savetxt('evolve.txt', x[np.argsort(-fitness(x))], '%10.3g') # save sort by fitness
  592. if bucket:
  593. os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
  594. def apply_classifier(x, model, img, im0):
  595. # applies a second stage classifier to yolo outputs
  596. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  597. for i, d in enumerate(x): # per image
  598. if d is not None and len(d):
  599. d = d.clone()
  600. # Reshape and pad cutouts
  601. b = xyxy2xywh(d[:, :4]) # boxes
  602. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  603. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  604. d[:, :4] = xywh2xyxy(b).long()
  605. # Rescale boxes from img_size to im0 size
  606. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  607. # Classes
  608. pred_cls1 = d[:, 5].long()
  609. ims = []
  610. for j, a in enumerate(d): # per item
  611. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  612. im = cv2.resize(cutout, (224, 224)) # BGR
  613. # cv2.imwrite('test%i.jpg' % j, cutout)
  614. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  615. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  616. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  617. ims.append(im)
  618. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  619. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  620. return x
  621. def fitness(x):
  622. # Returns fitness (for use with results.txt or evolve.txt)
  623. w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
  624. return (x[:, :4] * w).sum(1)
  625. def output_to_target(output, width, height):
  626. """
  627. Convert a YOLO model output to target format
  628. [batch_id, class_id, x, y, w, h, conf]
  629. """
  630. if isinstance(output, torch.Tensor):
  631. output = output.cpu().numpy()
  632. targets = []
  633. for i, o in enumerate(output):
  634. if o is not None:
  635. for pred in o:
  636. box = pred[:4]
  637. w = (box[2] - box[0]) / width
  638. h = (box[3] - box[1]) / height
  639. x = box[0] / width + w / 2
  640. y = box[1] / height + h / 2
  641. conf = pred[4]
  642. cls = int(pred[5])
  643. targets.append([i, cls, x, y, w, h, conf])
  644. return np.array(targets)
  645. # Plotting functions ---------------------------------------------------------------------------------------------------
  646. def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  647. # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
  648. def butter_lowpass(cutoff, fs, order):
  649. nyq = 0.5 * fs
  650. normal_cutoff = cutoff / nyq
  651. b, a = butter(order, normal_cutoff, btype='low', analog=False)
  652. return b, a
  653. b, a = butter_lowpass(cutoff, fs, order=order)
  654. return filtfilt(b, a, data) # forward-backward filter
  655. def plot_one_box(x, img, color=None, label=None, line_thickness=None):
  656. # Plots one bounding box on image img
  657. tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
  658. color = color or [random.randint(0, 255) for _ in range(3)]
  659. c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
  660. cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
  661. if label:
  662. tf = max(tl - 1, 1) # font thickness
  663. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  664. c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
  665. cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
  666. cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
  667. def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
  668. # Compares the two methods for width-height anchor multiplication
  669. # https://github.com/ultralytics/yolov3/issues/168
  670. x = np.arange(-4.0, 4.0, .1)
  671. ya = np.exp(x)
  672. yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
  673. fig = plt.figure(figsize=(6, 3), dpi=150)
  674. plt.plot(x, ya, '.-', label='yolo method')
  675. plt.plot(x, yb ** 2, '.-', label='^2 power method')
  676. plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
  677. plt.xlim(left=-4, right=4)
  678. plt.ylim(bottom=0, top=6)
  679. plt.xlabel('input')
  680. plt.ylabel('output')
  681. plt.legend()
  682. fig.tight_layout()
  683. fig.savefig('comparison.png', dpi=200)
  684. def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
  685. tl = 3 # line thickness
  686. tf = max(tl - 1, 1) # font thickness
  687. if os.path.isfile(fname): # do not overwrite
  688. return None
  689. if isinstance(images, torch.Tensor):
  690. images = images.cpu().numpy()
  691. if isinstance(targets, torch.Tensor):
  692. targets = targets.cpu().numpy()
  693. # un-normalise
  694. if np.max(images[0]) <= 1:
  695. images *= 255
  696. bs, _, h, w = images.shape # batch size, _, height, width
  697. bs = min(bs, max_subplots) # limit plot images
  698. ns = np.ceil(bs ** 0.5) # number of subplots (square)
  699. # Check if we should resize
  700. scale_factor = max_size / max(h, w)
  701. if scale_factor < 1:
  702. h = math.ceil(scale_factor * h)
  703. w = math.ceil(scale_factor * w)
  704. # Empty array for output
  705. mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
  706. # Fix class - colour map
  707. prop_cycle = plt.rcParams['axes.prop_cycle']
  708. # https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
  709. hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
  710. color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
  711. for i, img in enumerate(images):
  712. if i == max_subplots: # if last batch has fewer images than we expect
  713. break
  714. block_x = int(w * (i // ns))
  715. block_y = int(h * (i % ns))
  716. img = img.transpose(1, 2, 0)
  717. if scale_factor < 1:
  718. img = cv2.resize(img, (w, h))
  719. mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
  720. if len(targets) > 0:
  721. image_targets = targets[targets[:, 0] == i]
  722. boxes = xywh2xyxy(image_targets[:, 2:6]).T
  723. classes = image_targets[:, 1].astype('int')
  724. gt = image_targets.shape[1] == 6 # ground truth if no conf column
  725. conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
  726. boxes[[0, 2]] *= w
  727. boxes[[0, 2]] += block_x
  728. boxes[[1, 3]] *= h
  729. boxes[[1, 3]] += block_y
  730. for j, box in enumerate(boxes.T):
  731. cls = int(classes[j])
  732. color = color_lut[cls % len(color_lut)]
  733. cls = names[cls] if names else cls
  734. if gt or conf[j] > 0.3: # 0.3 conf thresh
  735. label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
  736. plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
  737. # Draw image filename labels
  738. if paths is not None:
  739. label = os.path.basename(paths[i])[:40] # trim to 40 char
  740. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  741. cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
  742. lineType=cv2.LINE_AA)
  743. # Image border
  744. cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
  745. if fname is not None:
  746. mosaic = cv2.resize(mosaic, (int(ns * w * 0.5), int(ns * h * 0.5)), interpolation=cv2.INTER_AREA)
  747. cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))
  748. return mosaic
  749. def plot_lr_scheduler(optimizer, scheduler, epochs=300):
  750. # Plot LR simulating training for full epochs
  751. optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
  752. y = []
  753. for _ in range(epochs):
  754. scheduler.step()
  755. y.append(optimizer.param_groups[0]['lr'])
  756. plt.plot(y, '.-', label='LR')
  757. plt.xlabel('epoch')
  758. plt.ylabel('LR')
  759. plt.grid()
  760. plt.xlim(0, epochs)
  761. plt.ylim(0)
  762. plt.tight_layout()
  763. plt.savefig('LR.png', dpi=200)
  764. def plot_test_txt(): # from utils.utils import *; plot_test()
  765. # Plot test.txt histograms
  766. x = np.loadtxt('test.txt', dtype=np.float32)
  767. box = xyxy2xywh(x[:, :4])
  768. cx, cy = box[:, 0], box[:, 1]
  769. fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
  770. ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
  771. ax.set_aspect('equal')
  772. plt.savefig('hist2d.png', dpi=300)
  773. fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
  774. ax[0].hist(cx, bins=600)
  775. ax[1].hist(cy, bins=600)
  776. plt.savefig('hist1d.png', dpi=200)
  777. def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
  778. # Plot targets.txt histograms
  779. x = np.loadtxt('targets.txt', dtype=np.float32).T
  780. s = ['x targets', 'y targets', 'width targets', 'height targets']
  781. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  782. ax = ax.ravel()
  783. for i in range(4):
  784. ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
  785. ax[i].legend()
  786. ax[i].set_title(s[i])
  787. plt.savefig('targets.jpg', dpi=200)
  788. def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_study_txt()
  789. # Plot study.txt generated by test.py
  790. y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
  791. x = np.arange(y.shape[1]) if x is None else np.array(x)
  792. s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
  793. fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
  794. ax = ax.ravel()
  795. for i in range(7):
  796. ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
  797. ax[i].set_title(s[i])
  798. plt.savefig(f.replace('.txt','.png'), dpi=200)
  799. def plot_labels(labels):
  800. # plot dataset labels
  801. c, b = labels[:, 0], labels[:, 1:].transpose() # classees, boxes
  802. def hist2d(x, y, n=100):
  803. xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
  804. hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
  805. xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
  806. yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
  807. return np.log(hist[xidx, yidx])
  808. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  809. ax = ax.ravel()
  810. ax[0].hist(c, bins=int(c.max() + 1))
  811. ax[0].set_xlabel('classes')
  812. ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
  813. ax[1].set_xlabel('x')
  814. ax[1].set_ylabel('y')
  815. ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
  816. ax[2].set_xlabel('width')
  817. ax[2].set_ylabel('height')
  818. plt.savefig('labels.png', dpi=200)
  819. def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
  820. # Plot hyperparameter evolution results in evolve.txt
  821. x = np.loadtxt('evolve.txt', ndmin=2)
  822. f = fitness(x)
  823. # weights = (f - f.min()) ** 2 # for weighted results
  824. plt.figure(figsize=(12, 10), tight_layout=True)
  825. matplotlib.rc('font', **{'size': 8})
  826. for i, (k, v) in enumerate(hyp.items()):
  827. y = x[:, i + 7]
  828. # mu = (y * weights).sum() / weights.sum() # best weighted result
  829. mu = y[f.argmax()] # best single result
  830. plt.subplot(4, 5, i + 1)
  831. plt.plot(mu, f.max(), 'o', markersize=10)
  832. plt.plot(y, f, '.')
  833. plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
  834. print('%15s: %.3g' % (k, mu))
  835. plt.savefig('evolve.png', dpi=200)
  836. def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
  837. # Plot training 'results*.txt', overlaying train and val losses
  838. s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
  839. t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
  840. for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
  841. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  842. n = results.shape[1] # number of rows
  843. x = range(start, min(stop, n) if stop else n)
  844. fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
  845. ax = ax.ravel()
  846. for i in range(5):
  847. for j in [i, i + 5]:
  848. y = results[j, x]
  849. # ax[i].plot(x, y, marker='.', label=s[j])
  850. y_smooth = butter_lowpass_filtfilt(y)
  851. ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
  852. ax[i].set_title(t[i])
  853. ax[i].legend()
  854. ax[i].set_ylabel(f) if i == 0 else None # add filename
  855. fig.savefig(f.replace('.txt', '.png'), dpi=200)
  856. def plot_results(start=0, stop=0, bucket='', id=()): # from utils.utils import *; plot_results()
  857. # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov3#training
  858. fig, ax = plt.subplots(2, 5, figsize=(12, 6))
  859. ax = ax.ravel()
  860. s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
  861. 'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
  862. if bucket:
  863. os.system('rm -rf storage.googleapis.com')
  864. files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
  865. else:
  866. files = glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')
  867. for f in sorted(files):
  868. try:
  869. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  870. n = results.shape[1] # number of rows
  871. x = range(start, min(stop, n) if stop else n)
  872. for i in range(10):
  873. y = results[i, x]
  874. if i in [0, 1, 2, 5, 6, 7]:
  875. y[y == 0] = np.nan # dont show zero loss values
  876. # y /= y[0] # normalize
  877. ax[i].plot(x, y, marker='.', label=Path(f).stem, linewidth=2, markersize=8)
  878. ax[i].set_title(s[i])
  879. # if i in [5, 6, 7]: # share train and val loss y axes
  880. # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
  881. except:
  882. print('Warning: Plotting error for %s, skipping file' % f)
  883. fig.tight_layout()
  884. ax[1].legend()
  885. fig.savefig('results.png', dpi=200)