Du kannst nicht mehr als 25 Themen auswählen Themen müssen entweder mit einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
vor 4 Jahren
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215
  1. import glob
  2. import math
  3. import os
  4. import random
  5. import shutil
  6. import subprocess
  7. import time
  8. from copy import copy
  9. from pathlib import Path
  10. from sys import platform
  11. import cv2
  12. import matplotlib
  13. import matplotlib.pyplot as plt
  14. import numpy as np
  15. import torch
  16. import torch.nn as nn
  17. import torchvision
  18. import yaml
  19. from scipy.signal import butter, filtfilt
  20. from tqdm import tqdm
  21. from . import torch_utils #  torch_utils, google_utils
  22. # Set printoptions
  23. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  24. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  25. matplotlib.rc('font', **{'size': 11})
  26. # Prevent OpenCV from multithreading (to use PyTorch DataLoader)
  27. cv2.setNumThreads(0)
  28. def init_seeds(seed=0):
  29. random.seed(seed)
  30. np.random.seed(seed)
  31. torch_utils.init_seeds(seed=seed)
  32. def get_latest_run(search_dir='./runs'):
  33. # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
  34. last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
  35. return max(last_list, key=os.path.getctime)
  36. def check_git_status():
  37. # Suggest 'git pull' if repo is out of date
  38. if platform in ['linux', 'darwin']:
  39. s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
  40. if 'Your branch is behind' in s:
  41. print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
  42. def check_img_size(img_size, s=32):
  43. # Verify img_size is a multiple of stride s
  44. new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
  45. if new_size != img_size:
  46. print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
  47. return new_size
  48. def check_anchors(dataset, model, thr=4.0, imgsz=640):
  49. # Check anchor fit to data, recompute if necessary
  50. print('\nAnalyzing anchors... ', end='')
  51. m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
  52. shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
  53. scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
  54. wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
  55. def metric(k): # compute metric
  56. r = wh[:, None] / k[None]
  57. x = torch.min(r, 1. / r).min(2)[0] # ratio metric
  58. best = x.max(1)[0] # best_x
  59. return (best > 1. / thr).float().mean() #  best possible recall
  60. bpr = metric(m.anchor_grid.clone().cpu().view(-1, 2))
  61. print('Best Possible Recall (BPR) = %.4f' % bpr, end='')
  62. if bpr < 0.99: # threshold to recompute
  63. print('. Attempting to generate improved anchors, please wait...' % bpr)
  64. na = m.anchor_grid.numel() // 2 # number of anchors
  65. new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
  66. new_bpr = metric(new_anchors.reshape(-1, 2))
  67. if new_bpr > bpr: # replace anchors
  68. new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
  69. m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
  70. m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
  71. check_anchor_order(m)
  72. print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
  73. else:
  74. print('Original anchors better than new anchors. Proceeding with original anchors.')
  75. print('') # newline
  76. def check_anchor_order(m):
  77. # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
  78. a = m.anchor_grid.prod(-1).view(-1) # anchor area
  79. da = a[-1] - a[0] # delta a
  80. ds = m.stride[-1] - m.stride[0] # delta s
  81. if da.sign() != ds.sign(): # same order
  82. m.anchors[:] = m.anchors.flip(0)
  83. m.anchor_grid[:] = m.anchor_grid.flip(0)
  84. def check_file(file):
  85. # Searches for file if not found locally
  86. if os.path.isfile(file):
  87. return file
  88. else:
  89. files = glob.glob('./**/' + file, recursive=True) # find file
  90. assert len(files), 'File Not Found: %s' % file # assert file was found
  91. return files[0] # return first file if multiple found
  92. def make_divisible(x, divisor):
  93. # Returns x evenly divisble by divisor
  94. return math.ceil(x / divisor) * divisor
  95. def labels_to_class_weights(labels, nc=80):
  96. # Get class weights (inverse frequency) from training labels
  97. if labels[0] is None: # no labels loaded
  98. return torch.Tensor()
  99. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  100. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  101. weights = np.bincount(classes, minlength=nc) # occurences per class
  102. # Prepend gridpoint count (for uCE trianing)
  103. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  104. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  105. weights[weights == 0] = 1 # replace empty bins with 1
  106. weights = 1 / weights # number of targets per class
  107. weights /= weights.sum() # normalize
  108. return torch.from_numpy(weights)
  109. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  110. # Produces image weights based on class mAPs
  111. n = len(labels)
  112. class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
  113. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  114. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  115. return image_weights
  116. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  117. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  118. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  119. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  120. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  121. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  122. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  123. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  124. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  125. return x
  126. def xyxy2xywh(x):
  127. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  128. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  129. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  130. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  131. y[:, 2] = x[:, 2] - x[:, 0] # width
  132. y[:, 3] = x[:, 3] - x[:, 1] # height
  133. return y
  134. def xywh2xyxy(x):
  135. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  136. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  137. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  138. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  139. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  140. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  141. return y
  142. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  143. # Rescale coords (xyxy) from img1_shape to img0_shape
  144. if ratio_pad is None: # calculate from img0_shape
  145. gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
  146. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  147. else:
  148. gain = ratio_pad[0][0]
  149. pad = ratio_pad[1]
  150. coords[:, [0, 2]] -= pad[0] # x padding
  151. coords[:, [1, 3]] -= pad[1] # y padding
  152. coords[:, :4] /= gain
  153. clip_coords(coords, img0_shape)
  154. return coords
  155. def clip_coords(boxes, img_shape):
  156. # Clip bounding xyxy bounding boxes to image shape (height, width)
  157. boxes[:, 0].clamp_(0, img_shape[1]) # x1
  158. boxes[:, 1].clamp_(0, img_shape[0]) # y1
  159. boxes[:, 2].clamp_(0, img_shape[1]) # x2
  160. boxes[:, 3].clamp_(0, img_shape[0]) # y2
  161. def ap_per_class(tp, conf, pred_cls, target_cls):
  162. """ Compute the average precision, given the recall and precision curves.
  163. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
  164. # Arguments
  165. tp: True positives (nparray, nx1 or nx10).
  166. conf: Objectness value from 0-1 (nparray).
  167. pred_cls: Predicted object classes (nparray).
  168. target_cls: True object classes (nparray).
  169. # Returns
  170. The average precision as computed in py-faster-rcnn.
  171. """
  172. # Sort by objectness
  173. i = np.argsort(-conf)
  174. tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
  175. # Find unique classes
  176. unique_classes = np.unique(target_cls)
  177. # Create Precision-Recall curve and compute AP for each class
  178. pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
  179. s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
  180. ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
  181. for ci, c in enumerate(unique_classes):
  182. i = pred_cls == c
  183. n_gt = (target_cls == c).sum() # Number of ground truth objects
  184. n_p = i.sum() # Number of predicted objects
  185. if n_p == 0 or n_gt == 0:
  186. continue
  187. else:
  188. # Accumulate FPs and TPs
  189. fpc = (1 - tp[i]).cumsum(0)
  190. tpc = tp[i].cumsum(0)
  191. # Recall
  192. recall = tpc / (n_gt + 1e-16) # recall curve
  193. r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
  194. # Precision
  195. precision = tpc / (tpc + fpc) # precision curve
  196. p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
  197. # AP from recall-precision curve
  198. for j in range(tp.shape[1]):
  199. ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
  200. # Plot
  201. # fig, ax = plt.subplots(1, 1, figsize=(5, 5))
  202. # ax.plot(recall, precision)
  203. # ax.set_xlabel('Recall')
  204. # ax.set_ylabel('Precision')
  205. # ax.set_xlim(0, 1.01)
  206. # ax.set_ylim(0, 1.01)
  207. # fig.tight_layout()
  208. # fig.savefig('PR_curve.png', dpi=300)
  209. # Compute F1 score (harmonic mean of precision and recall)
  210. f1 = 2 * p * r / (p + r + 1e-16)
  211. return p, r, ap, f1, unique_classes.astype('int32')
  212. def compute_ap(recall, precision):
  213. """ Compute the average precision, given the recall and precision curves.
  214. Source: https://github.com/rbgirshick/py-faster-rcnn.
  215. # Arguments
  216. recall: The recall curve (list).
  217. precision: The precision curve (list).
  218. # Returns
  219. The average precision as computed in py-faster-rcnn.
  220. """
  221. # Append sentinel values to beginning and end
  222. mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
  223. mpre = np.concatenate(([0.], precision, [0.]))
  224. # Compute the precision envelope
  225. mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
  226. # Integrate area under curve
  227. method = 'interp' # methods: 'continuous', 'interp'
  228. if method == 'interp':
  229. x = np.linspace(0, 1, 101) # 101-point interp (COCO)
  230. ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
  231. else: # 'continuous'
  232. i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
  233. ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
  234. return ap
  235. def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
  236. # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
  237. box2 = box2.t()
  238. # Get the coordinates of bounding boxes
  239. if x1y1x2y2: # x1, y1, x2, y2 = box1
  240. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  241. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  242. else: # transform from xywh to xyxy
  243. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  244. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  245. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
  246. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
  247. # Intersection area
  248. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  249. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  250. # Union Area
  251. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
  252. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
  253. union = (w1 * h1 + 1e-16) + w2 * h2 - inter
  254. iou = inter / union # iou
  255. if GIoU or DIoU or CIoU:
  256. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  257. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  258. if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
  259. c_area = cw * ch + 1e-16 # convex area
  260. return iou - (c_area - union) / c_area # GIoU
  261. if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  262. # convex diagonal squared
  263. c2 = cw ** 2 + ch ** 2 + 1e-16
  264. # centerpoint distance squared
  265. rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
  266. if DIoU:
  267. return iou - rho2 / c2 # DIoU
  268. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  269. v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  270. with torch.no_grad():
  271. alpha = v / (1 - iou + v)
  272. return iou - (rho2 / c2 + v * alpha) # CIoU
  273. return iou
  274. def box_iou(box1, box2):
  275. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  276. """
  277. Return intersection-over-union (Jaccard index) of boxes.
  278. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  279. Arguments:
  280. box1 (Tensor[N, 4])
  281. box2 (Tensor[M, 4])
  282. Returns:
  283. iou (Tensor[N, M]): the NxM matrix containing the pairwise
  284. IoU values for every element in boxes1 and boxes2
  285. """
  286. def box_area(box):
  287. # box = 4xn
  288. return (box[2] - box[0]) * (box[3] - box[1])
  289. area1 = box_area(box1.t())
  290. area2 = box_area(box2.t())
  291. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  292. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  293. return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
  294. def wh_iou(wh1, wh2):
  295. # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
  296. wh1 = wh1[:, None] # [N,1,2]
  297. wh2 = wh2[None] # [1,M,2]
  298. inter = torch.min(wh1, wh2).prod(2) # [N,M]
  299. return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
  300. class FocalLoss(nn.Module):
  301. # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
  302. def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
  303. super(FocalLoss, self).__init__()
  304. self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
  305. self.gamma = gamma
  306. self.alpha = alpha
  307. self.reduction = loss_fcn.reduction
  308. self.loss_fcn.reduction = 'none' # required to apply FL to each element
  309. def forward(self, pred, true):
  310. loss = self.loss_fcn(pred, true)
  311. # p_t = torch.exp(-loss)
  312. # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
  313. # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
  314. pred_prob = torch.sigmoid(pred) # prob from logits
  315. p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
  316. alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
  317. modulating_factor = (1.0 - p_t) ** self.gamma
  318. loss *= alpha_factor * modulating_factor
  319. if self.reduction == 'mean':
  320. return loss.mean()
  321. elif self.reduction == 'sum':
  322. return loss.sum()
  323. else: # 'none'
  324. return loss
  325. def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
  326. # return positive, negative label smoothing BCE targets
  327. return 1.0 - 0.5 * eps, 0.5 * eps
  328. class BCEBlurWithLogitsLoss(nn.Module):
  329. # BCEwithLogitLoss() with reduced missing label effects.
  330. def __init__(self, alpha=0.05):
  331. super(BCEBlurWithLogitsLoss, self).__init__()
  332. self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
  333. self.alpha = alpha
  334. def forward(self, pred, true):
  335. loss = self.loss_fcn(pred, true)
  336. pred = torch.sigmoid(pred) # prob from logits
  337. dx = pred - true # reduce only missing label effects
  338. # dx = (pred - true).abs() # reduce missing label and false label effects
  339. alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
  340. loss *= alpha_factor
  341. return loss.mean()
  342. def compute_loss(p, targets, model): # predictions, targets, model
  343. ft = torch.cuda.FloatTensor if p[0].is_cuda else torch.Tensor
  344. lcls, lbox, lobj = ft([0]), ft([0]), ft([0])
  345. tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
  346. h = model.hyp # hyperparameters
  347. red = 'mean' # Loss reduction (sum or mean)
  348. # Define criteria
  349. BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)
  350. BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)
  351. # class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
  352. cp, cn = smooth_BCE(eps=0.0)
  353. # focal loss
  354. g = h['fl_gamma'] # focal loss gamma
  355. if g > 0:
  356. BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
  357. # per output
  358. nt = 0 # number of targets
  359. np = len(p) # number of outputs
  360. balance = [1.0, 1.0, 1.0]
  361. for i, pi in enumerate(p): # layer index, layer predictions
  362. b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
  363. tobj = torch.zeros_like(pi[..., 0]) # target obj
  364. nb = b.shape[0] # number of targets
  365. if nb:
  366. nt += nb # cumulative targets
  367. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
  368. # GIoU
  369. pxy = ps[:, :2].sigmoid() * 2. - 0.5
  370. pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
  371. pbox = torch.cat((pxy, pwh), 1) # predicted box
  372. giou = bbox_iou(pbox.t(), tbox[i], x1y1x2y2=False, GIoU=True) # giou(prediction, target)
  373. lbox += (1.0 - giou).sum() if red == 'sum' else (1.0 - giou).mean() # giou loss
  374. # Obj
  375. tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) # giou ratio
  376. # Class
  377. if model.nc > 1: # cls loss (only if multiple classes)
  378. t = torch.full_like(ps[:, 5:], cn) # targets
  379. t[range(nb), tcls[i]] = cp
  380. lcls += BCEcls(ps[:, 5:], t) # BCE
  381. # Append targets to text file
  382. # with open('targets.txt', 'a') as file:
  383. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
  384. lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
  385. s = 3 / np # output count scaling
  386. lbox *= h['giou'] * s
  387. lobj *= h['obj'] * s
  388. lcls *= h['cls'] * s
  389. bs = tobj.shape[0] # batch size
  390. if red == 'sum':
  391. g = 3.0 # loss gain
  392. lobj *= g / bs
  393. if nt:
  394. lcls *= g / nt / model.nc
  395. lbox *= g / nt
  396. loss = lbox + lobj + lcls
  397. return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
  398. def build_targets(p, targets, model):
  399. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  400. det = model.module.model[-1] if type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) \
  401. else model.model[-1] # Detect() module
  402. na, nt = det.na, targets.shape[0] # number of anchors, targets
  403. tcls, tbox, indices, anch = [], [], [], []
  404. gain = torch.ones(6, device=targets.device) # normalized to gridspace gain
  405. off = torch.tensor([[1, 0], [0, 1], [-1, 0], [0, -1]], device=targets.device).float() # overlap offsets
  406. at = torch.arange(na).view(na, 1).repeat(1, nt) # anchor tensor, same as .repeat_interleave(nt)
  407. style = 'rect4'
  408. for i in range(det.nl):
  409. anchors = det.anchors[i]
  410. gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  411. # Match targets to anchors
  412. a, t, offsets = [], targets * gain, 0
  413. if nt:
  414. r = t[None, :, 4:6] / anchors[:, None] # wh ratio
  415. j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
  416. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
  417. a, t = at[j], t.repeat(na, 1, 1)[j] # filter
  418. # overlaps
  419. g = 0.5 # offset
  420. gxy = t[:, 2:4] # grid xy
  421. z = torch.zeros_like(gxy)
  422. if style == 'rect2':
  423. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  424. a, t = torch.cat((a, a[j], a[k]), 0), torch.cat((t, t[j], t[k]), 0)
  425. offsets = torch.cat((z, z[j] + off[0], z[k] + off[1]), 0) * g
  426. elif style == 'rect4':
  427. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  428. l, m = ((gxy % 1. > (1 - g)) & (gxy < (gain[[2, 3]] - 1.))).T
  429. a, t = torch.cat((a, a[j], a[k], a[l], a[m]), 0), torch.cat((t, t[j], t[k], t[l], t[m]), 0)
  430. offsets = torch.cat((z, z[j] + off[0], z[k] + off[1], z[l] + off[2], z[m] + off[3]), 0) * g
  431. # Define
  432. b, c = t[:, :2].long().T # image, class
  433. gxy = t[:, 2:4] # grid xy
  434. gwh = t[:, 4:6] # grid wh
  435. gij = (gxy - offsets).long()
  436. gi, gj = gij.T # grid xy indices
  437. # Append
  438. indices.append((b, a, gj, gi)) # image, anchor, grid indices
  439. tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
  440. anch.append(anchors[a]) # anchors
  441. tcls.append(c) # class
  442. return tcls, tbox, indices, anch
  443. def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False):
  444. """Performs Non-Maximum Suppression (NMS) on inference results
  445. Returns:
  446. detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
  447. """
  448. if prediction.dtype is torch.float16:
  449. prediction = prediction.float() # to FP32
  450. nc = prediction[0].shape[1] - 5 # number of classes
  451. xc = prediction[..., 4] > conf_thres # candidates
  452. # Settings
  453. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  454. max_det = 300 # maximum number of detections per image
  455. time_limit = 10.0 # seconds to quit after
  456. redundant = True # require redundant detections
  457. multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
  458. t = time.time()
  459. output = [None] * prediction.shape[0]
  460. for xi, x in enumerate(prediction): # image index, image inference
  461. # Apply constraints
  462. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  463. x = x[xc[xi]] # confidence
  464. # If none remain process next image
  465. if not x.shape[0]:
  466. continue
  467. # Compute conf
  468. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  469. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  470. box = xywh2xyxy(x[:, :4])
  471. # Detections matrix nx6 (xyxy, conf, cls)
  472. if multi_label:
  473. i, j = (x[:, 5:] > conf_thres).nonzero().t()
  474. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  475. else: # best class only
  476. conf, j = x[:, 5:].max(1, keepdim=True)
  477. x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  478. # Filter by class
  479. if classes:
  480. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  481. # Apply finite constraint
  482. # if not torch.isfinite(x).all():
  483. # x = x[torch.isfinite(x).all(1)]
  484. # If none remain process next image
  485. n = x.shape[0] # number of boxes
  486. if not n:
  487. continue
  488. # Sort by confidence
  489. # x = x[x[:, 4].argsort(descending=True)]
  490. # Batched NMS
  491. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  492. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  493. i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
  494. if i.shape[0] > max_det: # limit detections
  495. i = i[:max_det]
  496. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  497. try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  498. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  499. weights = iou * scores[None] # box weights
  500. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  501. if redundant:
  502. i = i[iou.sum(1) > 1] # require redundancy
  503. except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
  504. print(x, i, x.shape, i.shape)
  505. pass
  506. output[xi] = x[i]
  507. if (time.time() - t) > time_limit:
  508. break # time limit exceeded
  509. return output
  510. def strip_optimizer(f='weights/best.pt'): # from utils.utils import *; strip_optimizer()
  511. # Strip optimizer from *.pt files for lighter files (reduced by 1/2 size)
  512. x = torch.load(f, map_location=torch.device('cpu'))
  513. x['optimizer'] = None
  514. x['model'].half() # to FP16
  515. torch.save(x, f)
  516. print('Optimizer stripped from %s' % f)
  517. def create_pretrained(f='weights/best.pt', s='weights/pretrained.pt'): # from utils.utils import *; create_pretrained()
  518. # create pretrained checkpoint 's' from 'f' (create_pretrained(x, x) for x in glob.glob('./*.pt'))
  519. device = torch.device('cpu')
  520. x = torch.load(s, map_location=device)
  521. x['optimizer'] = None
  522. x['training_results'] = None
  523. x['epoch'] = -1
  524. x['model'].half() # to FP16
  525. for p in x['model'].parameters():
  526. p.requires_grad = True
  527. torch.save(x, s)
  528. print('%s saved as pretrained checkpoint %s' % (f, s))
  529. def coco_class_count(path='../coco/labels/train2014/'):
  530. # Histogram of occurrences per class
  531. nc = 80 # number classes
  532. x = np.zeros(nc, dtype='int32')
  533. files = sorted(glob.glob('%s/*.*' % path))
  534. for i, file in enumerate(files):
  535. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  536. x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
  537. print(i, len(files))
  538. def coco_only_people(path='../coco/labels/train2017/'): # from utils.utils import *; coco_only_people()
  539. # Find images with only people
  540. files = sorted(glob.glob('%s/*.*' % path))
  541. for i, file in enumerate(files):
  542. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  543. if all(labels[:, 0] == 0):
  544. print(labels.shape[0], file)
  545. def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
  546. # crops images into random squares up to scale fraction
  547. # WARNING: overwrites images!
  548. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  549. img = cv2.imread(file) # BGR
  550. if img is not None:
  551. h, w = img.shape[:2]
  552. # create random mask
  553. a = 30 # minimum size (pixels)
  554. mask_h = random.randint(a, int(max(a, h * scale))) # mask height
  555. mask_w = mask_h # mask width
  556. # box
  557. xmin = max(0, random.randint(0, w) - mask_w // 2)
  558. ymin = max(0, random.randint(0, h) - mask_h // 2)
  559. xmax = min(w, xmin + mask_w)
  560. ymax = min(h, ymin + mask_h)
  561. # apply random color mask
  562. cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
  563. def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
  564. # Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
  565. if os.path.exists('new/'):
  566. shutil.rmtree('new/') # delete output folder
  567. os.makedirs('new/') # make new output folder
  568. os.makedirs('new/labels/')
  569. os.makedirs('new/images/')
  570. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  571. with open(file, 'r') as f:
  572. labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
  573. i = labels[:, 0] == label_class
  574. if any(i):
  575. img_file = file.replace('labels', 'images').replace('txt', 'jpg')
  576. labels[:, 0] = 0 # reset class to 0
  577. with open('new/images.txt', 'a') as f: # add image to dataset list
  578. f.write(img_file + '\n')
  579. with open('new/labels/' + Path(file).name, 'a') as f: # write label
  580. for l in labels[i]:
  581. f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
  582. shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
  583. def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
  584. """ Creates kmeans-evolved anchors from training dataset
  585. Arguments:
  586. path: path to dataset *.yaml, or a loaded dataset
  587. n: number of anchors
  588. img_size: image size used for training
  589. thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
  590. gen: generations to evolve anchors using genetic algorithm
  591. Return:
  592. k: kmeans evolved anchors
  593. Usage:
  594. from utils.utils import *; _ = kmean_anchors()
  595. """
  596. thr = 1. / thr
  597. def metric(k, wh): # compute metrics
  598. r = wh[:, None] / k[None]
  599. x = torch.min(r, 1. / r).min(2)[0] # ratio metric
  600. # x = wh_iou(wh, torch.tensor(k)) # iou metric
  601. return x, x.max(1)[0] # x, best_x
  602. def fitness(k): # mutation fitness
  603. _, best = metric(torch.tensor(k, dtype=torch.float32), wh)
  604. return (best * (best > thr).float()).mean() # fitness
  605. def print_results(k):
  606. k = k[np.argsort(k.prod(1))] # sort small to large
  607. x, best = metric(k, wh0)
  608. bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
  609. print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat))
  610. print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' %
  611. (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='')
  612. for i, x in enumerate(k):
  613. print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
  614. return k
  615. if isinstance(path, str): # *.yaml file
  616. with open(path) as f:
  617. data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
  618. from utils.datasets import LoadImagesAndLabels
  619. dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
  620. else:
  621. dataset = path # dataset
  622. # Get label wh
  623. shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
  624. wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
  625. # Filter
  626. i = (wh0 < 3.0).any(1).sum()
  627. if i:
  628. print('WARNING: Extremely small objects found. '
  629. '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0)))
  630. wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
  631. # Kmeans calculation
  632. from scipy.cluster.vq import kmeans
  633. print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
  634. s = wh.std(0) # sigmas for whitening
  635. k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
  636. k *= s
  637. wh = torch.tensor(wh, dtype=torch.float32) # filtered
  638. wh0 = torch.tensor(wh0, dtype=torch.float32) # unflitered
  639. k = print_results(k)
  640. # Plot
  641. # k, d = [None] * 20, [None] * 20
  642. # for i in tqdm(range(1, 21)):
  643. # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
  644. # fig, ax = plt.subplots(1, 2, figsize=(14, 7))
  645. # ax = ax.ravel()
  646. # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
  647. # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
  648. # ax[0].hist(wh[wh[:, 0]<100, 0],400)
  649. # ax[1].hist(wh[wh[:, 1]<100, 1],400)
  650. # fig.tight_layout()
  651. # fig.savefig('wh.png', dpi=200)
  652. # Evolve
  653. npr = np.random
  654. f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
  655. pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
  656. for _ in pbar:
  657. v = np.ones(sh)
  658. while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
  659. v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
  660. kg = (k.copy() * v).clip(min=2.0)
  661. fg = fitness(kg)
  662. if fg > f:
  663. f, k = fg, kg.copy()
  664. pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f
  665. if verbose:
  666. print_results(k)
  667. return print_results(k)
  668. def print_mutation(hyp, results, bucket=''):
  669. # Print mutation results to evolve.txt (for use with train.py --evolve)
  670. a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
  671. b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
  672. c = '%10.4g' * len(results) % results # results (P, R, mAP, F1, test_loss)
  673. print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
  674. if bucket:
  675. os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
  676. with open('evolve.txt', 'a') as f: # append result
  677. f.write(c + b + '\n')
  678. x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
  679. np.savetxt('evolve.txt', x[np.argsort(-fitness(x))], '%10.3g') # save sort by fitness
  680. if bucket:
  681. os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
  682. def apply_classifier(x, model, img, im0):
  683. # applies a second stage classifier to yolo outputs
  684. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  685. for i, d in enumerate(x): # per image
  686. if d is not None and len(d):
  687. d = d.clone()
  688. # Reshape and pad cutouts
  689. b = xyxy2xywh(d[:, :4]) # boxes
  690. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  691. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  692. d[:, :4] = xywh2xyxy(b).long()
  693. # Rescale boxes from img_size to im0 size
  694. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  695. # Classes
  696. pred_cls1 = d[:, 5].long()
  697. ims = []
  698. for j, a in enumerate(d): # per item
  699. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  700. im = cv2.resize(cutout, (224, 224)) # BGR
  701. # cv2.imwrite('test%i.jpg' % j, cutout)
  702. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  703. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  704. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  705. ims.append(im)
  706. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  707. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  708. return x
  709. def fitness(x):
  710. # Returns fitness (for use with results.txt or evolve.txt)
  711. w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
  712. return (x[:, :4] * w).sum(1)
  713. def output_to_target(output, width, height):
  714. """
  715. Convert a YOLO model output to target format
  716. [batch_id, class_id, x, y, w, h, conf]
  717. """
  718. if isinstance(output, torch.Tensor):
  719. output = output.cpu().numpy()
  720. targets = []
  721. for i, o in enumerate(output):
  722. if o is not None:
  723. for pred in o:
  724. box = pred[:4]
  725. w = (box[2] - box[0]) / width
  726. h = (box[3] - box[1]) / height
  727. x = box[0] / width + w / 2
  728. y = box[1] / height + h / 2
  729. conf = pred[4]
  730. cls = int(pred[5])
  731. targets.append([i, cls, x, y, w, h, conf])
  732. return np.array(targets)
  733. # Plotting functions ---------------------------------------------------------------------------------------------------
  734. def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  735. # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
  736. def butter_lowpass(cutoff, fs, order):
  737. nyq = 0.5 * fs
  738. normal_cutoff = cutoff / nyq
  739. b, a = butter(order, normal_cutoff, btype='low', analog=False)
  740. return b, a
  741. b, a = butter_lowpass(cutoff, fs, order=order)
  742. return filtfilt(b, a, data) # forward-backward filter
  743. def plot_one_box(x, img, color=None, label=None, line_thickness=None):
  744. # Plots one bounding box on image img
  745. tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
  746. color = color or [random.randint(0, 255) for _ in range(3)]
  747. c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
  748. cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
  749. if label:
  750. tf = max(tl - 1, 1) # font thickness
  751. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  752. c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
  753. cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
  754. cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
  755. def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
  756. # Compares the two methods for width-height anchor multiplication
  757. # https://github.com/ultralytics/yolov3/issues/168
  758. x = np.arange(-4.0, 4.0, .1)
  759. ya = np.exp(x)
  760. yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
  761. fig = plt.figure(figsize=(6, 3), dpi=150)
  762. plt.plot(x, ya, '.-', label='yolo method')
  763. plt.plot(x, yb ** 2, '.-', label='^2 power method')
  764. plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
  765. plt.xlim(left=-4, right=4)
  766. plt.ylim(bottom=0, top=6)
  767. plt.xlabel('input')
  768. plt.ylabel('output')
  769. plt.legend()
  770. fig.tight_layout()
  771. fig.savefig('comparison.png', dpi=200)
  772. def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
  773. tl = 3 # line thickness
  774. tf = max(tl - 1, 1) # font thickness
  775. if os.path.isfile(fname): # do not overwrite
  776. return None
  777. if isinstance(images, torch.Tensor):
  778. images = images.cpu().float().numpy()
  779. if isinstance(targets, torch.Tensor):
  780. targets = targets.cpu().numpy()
  781. # un-normalise
  782. if np.max(images[0]) <= 1:
  783. images *= 255
  784. bs, _, h, w = images.shape # batch size, _, height, width
  785. bs = min(bs, max_subplots) # limit plot images
  786. ns = np.ceil(bs ** 0.5) # number of subplots (square)
  787. # Check if we should resize
  788. scale_factor = max_size / max(h, w)
  789. if scale_factor < 1:
  790. h = math.ceil(scale_factor * h)
  791. w = math.ceil(scale_factor * w)
  792. # Empty array for output
  793. mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
  794. # Fix class - colour map
  795. prop_cycle = plt.rcParams['axes.prop_cycle']
  796. # https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
  797. hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
  798. color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
  799. for i, img in enumerate(images):
  800. if i == max_subplots: # if last batch has fewer images than we expect
  801. break
  802. block_x = int(w * (i // ns))
  803. block_y = int(h * (i % ns))
  804. img = img.transpose(1, 2, 0)
  805. if scale_factor < 1:
  806. img = cv2.resize(img, (w, h))
  807. mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
  808. if len(targets) > 0:
  809. image_targets = targets[targets[:, 0] == i]
  810. boxes = xywh2xyxy(image_targets[:, 2:6]).T
  811. classes = image_targets[:, 1].astype('int')
  812. gt = image_targets.shape[1] == 6 # ground truth if no conf column
  813. conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
  814. boxes[[0, 2]] *= w
  815. boxes[[0, 2]] += block_x
  816. boxes[[1, 3]] *= h
  817. boxes[[1, 3]] += block_y
  818. for j, box in enumerate(boxes.T):
  819. cls = int(classes[j])
  820. color = color_lut[cls % len(color_lut)]
  821. cls = names[cls] if names else cls
  822. if gt or conf[j] > 0.3: # 0.3 conf thresh
  823. label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
  824. plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
  825. # Draw image filename labels
  826. if paths is not None:
  827. label = os.path.basename(paths[i])[:40] # trim to 40 char
  828. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  829. cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
  830. lineType=cv2.LINE_AA)
  831. # Image border
  832. cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
  833. if fname is not None:
  834. mosaic = cv2.resize(mosaic, (int(ns * w * 0.5), int(ns * h * 0.5)), interpolation=cv2.INTER_AREA)
  835. cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))
  836. return mosaic
  837. def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
  838. # Plot LR simulating training for full epochs
  839. optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
  840. y = []
  841. for _ in range(epochs):
  842. scheduler.step()
  843. y.append(optimizer.param_groups[0]['lr'])
  844. plt.plot(y, '.-', label='LR')
  845. plt.xlabel('epoch')
  846. plt.ylabel('LR')
  847. plt.grid()
  848. plt.xlim(0, epochs)
  849. plt.ylim(0)
  850. plt.tight_layout()
  851. plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
  852. def plot_test_txt(): # from utils.utils import *; plot_test()
  853. # Plot test.txt histograms
  854. x = np.loadtxt('test.txt', dtype=np.float32)
  855. box = xyxy2xywh(x[:, :4])
  856. cx, cy = box[:, 0], box[:, 1]
  857. fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
  858. ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
  859. ax.set_aspect('equal')
  860. plt.savefig('hist2d.png', dpi=300)
  861. fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
  862. ax[0].hist(cx, bins=600)
  863. ax[1].hist(cy, bins=600)
  864. plt.savefig('hist1d.png', dpi=200)
  865. def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
  866. # Plot targets.txt histograms
  867. x = np.loadtxt('targets.txt', dtype=np.float32).T
  868. s = ['x targets', 'y targets', 'width targets', 'height targets']
  869. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  870. ax = ax.ravel()
  871. for i in range(4):
  872. ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
  873. ax[i].legend()
  874. ax[i].set_title(s[i])
  875. plt.savefig('targets.jpg', dpi=200)
  876. def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_study_txt()
  877. # Plot study.txt generated by test.py
  878. fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
  879. ax = ax.ravel()
  880. fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
  881. for f in ['coco_study/study_coco_yolov5%s.txt' % x for x in ['s', 'm', 'l', 'x']]:
  882. y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
  883. x = np.arange(y.shape[1]) if x is None else np.array(x)
  884. s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
  885. for i in range(7):
  886. ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
  887. ax[i].set_title(s[i])
  888. j = y[3].argmax() + 1
  889. ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
  890. label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
  891. ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [33.5, 39.1, 42.5, 45.9, 49., 50.5],
  892. 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
  893. ax2.grid()
  894. ax2.set_xlim(0, 30)
  895. ax2.set_ylim(28, 50)
  896. ax2.set_yticks(np.arange(30, 55, 5))
  897. ax2.set_xlabel('GPU Speed (ms/img)')
  898. ax2.set_ylabel('COCO AP val')
  899. ax2.legend(loc='lower right')
  900. plt.savefig('study_mAP_latency.png', dpi=300)
  901. plt.savefig(f.replace('.txt', '.png'), dpi=200)
  902. def plot_labels(labels, save_dir=''):
  903. # plot dataset labels
  904. c, b = labels[:, 0], labels[:, 1:].transpose() # classees, boxes
  905. def hist2d(x, y, n=100):
  906. xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
  907. hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
  908. xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
  909. yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
  910. return np.log(hist[xidx, yidx])
  911. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  912. ax = ax.ravel()
  913. ax[0].hist(c, bins=int(c.max() + 1))
  914. ax[0].set_xlabel('classes')
  915. ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
  916. ax[1].set_xlabel('x')
  917. ax[1].set_ylabel('y')
  918. ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
  919. ax[2].set_xlabel('width')
  920. ax[2].set_ylabel('height')
  921. plt.savefig(Path(save_dir) / 'labels.png', dpi=200)
  922. plt.close()
  923. def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
  924. # Plot hyperparameter evolution results in evolve.txt
  925. x = np.loadtxt('evolve.txt', ndmin=2)
  926. f = fitness(x)
  927. # weights = (f - f.min()) ** 2 # for weighted results
  928. plt.figure(figsize=(12, 10), tight_layout=True)
  929. matplotlib.rc('font', **{'size': 8})
  930. for i, (k, v) in enumerate(hyp.items()):
  931. y = x[:, i + 7]
  932. # mu = (y * weights).sum() / weights.sum() # best weighted result
  933. mu = y[f.argmax()] # best single result
  934. plt.subplot(4, 5, i + 1)
  935. plt.plot(mu, f.max(), 'o', markersize=10)
  936. plt.plot(y, f, '.')
  937. plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
  938. print('%15s: %.3g' % (k, mu))
  939. plt.savefig('evolve.png', dpi=200)
  940. def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
  941. # Plot training 'results*.txt', overlaying train and val losses
  942. s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
  943. t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
  944. for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
  945. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  946. n = results.shape[1] # number of rows
  947. x = range(start, min(stop, n) if stop else n)
  948. fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
  949. ax = ax.ravel()
  950. for i in range(5):
  951. for j in [i, i + 5]:
  952. y = results[j, x]
  953. ax[i].plot(x, y, marker='.', label=s[j])
  954. # y_smooth = butter_lowpass_filtfilt(y)
  955. # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
  956. ax[i].set_title(t[i])
  957. ax[i].legend()
  958. ax[i].set_ylabel(f) if i == 0 else None # add filename
  959. fig.savefig(f.replace('.txt', '.png'), dpi=200)
  960. def plot_results(start=0, stop=0, bucket='', id=(), labels=(),
  961. save_dir=''): # from utils.utils import *; plot_results()
  962. # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
  963. fig, ax = plt.subplots(2, 5, figsize=(12, 6))
  964. ax = ax.ravel()
  965. s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
  966. 'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
  967. if bucket:
  968. os.system('rm -rf storage.googleapis.com')
  969. files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
  970. else:
  971. files = glob.glob(str(Path(save_dir) / 'results*.txt')) + glob.glob('../../Downloads/results*.txt')
  972. for fi, f in enumerate(files):
  973. try:
  974. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  975. n = results.shape[1] # number of rows
  976. x = range(start, min(stop, n) if stop else n)
  977. for i in range(10):
  978. y = results[i, x]
  979. if i in [0, 1, 2, 5, 6, 7]:
  980. y[y == 0] = np.nan # dont show zero loss values
  981. # y /= y[0] # normalize
  982. label = labels[fi] if len(labels) else Path(f).stem
  983. ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
  984. ax[i].set_title(s[i])
  985. # if i in [5, 6, 7]: # share train and val loss y axes
  986. # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
  987. except:
  988. print('Warning: Plotting error for %s, skipping file' % f)
  989. fig.tight_layout()
  990. ax[1].legend()
  991. fig.savefig(Path(save_dir) / 'results.png', dpi=200)