Du kannst nicht mehr als 25 Themen auswählen Themen müssen entweder mit einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

441 Zeilen
18KB

  1. # General utils
  2. import glob
  3. import logging
  4. import os
  5. import platform
  6. import random
  7. import re
  8. import subprocess
  9. import time
  10. from pathlib import Path
  11. import cv2
  12. import math
  13. import numpy as np
  14. import torch
  15. import torchvision
  16. import yaml
  17. from utils.google_utils import gsutil_getsize
  18. from utils.metrics import fitness
  19. from utils.torch_utils import init_torch_seeds
  20. # Settings
  21. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  22. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  23. cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
  24. def set_logging(rank=-1):
  25. logging.basicConfig(
  26. format="%(message)s",
  27. level=logging.INFO if rank in [-1, 0] else logging.WARN)
  28. def init_seeds(seed=0):
  29. random.seed(seed)
  30. np.random.seed(seed)
  31. init_torch_seeds(seed)
  32. def get_latest_run(search_dir='.'):
  33. # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
  34. last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
  35. return max(last_list, key=os.path.getctime) if last_list else ''
  36. def check_git_status():
  37. # Suggest 'git pull' if repo is out of date
  38. if platform.system() in ['Linux', 'Darwin'] and not os.path.isfile('/.dockerenv'):
  39. s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
  40. if 'Your branch is behind' in s:
  41. print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
  42. def check_img_size(img_size, s=32):
  43. # Verify img_size is a multiple of stride s
  44. new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
  45. if new_size != img_size:
  46. print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
  47. return new_size
  48. def check_file(file):
  49. # Search for file if not found
  50. if os.path.isfile(file) or file == '':
  51. return file
  52. else:
  53. files = glob.glob('./**/' + file, recursive=True) # find file
  54. assert len(files), 'File Not Found: %s' % file # assert file was found
  55. assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
  56. return files[0] # return file
  57. def check_dataset(dict):
  58. # Download dataset if not found locally
  59. val, s = dict.get('val'), dict.get('download')
  60. if val and len(val):
  61. val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
  62. if not all(x.exists() for x in val):
  63. print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
  64. if s and len(s): # download script
  65. print('Downloading %s ...' % s)
  66. if s.startswith('http') and s.endswith('.zip'): # URL
  67. f = Path(s).name # filename
  68. torch.hub.download_url_to_file(s, f)
  69. r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
  70. else: # bash script
  71. r = os.system(s)
  72. print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
  73. else:
  74. raise Exception('Dataset not found.')
  75. def make_divisible(x, divisor):
  76. # Returns x evenly divisible by divisor
  77. return math.ceil(x / divisor) * divisor
  78. def labels_to_class_weights(labels, nc=80):
  79. # Get class weights (inverse frequency) from training labels
  80. if labels[0] is None: # no labels loaded
  81. return torch.Tensor()
  82. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  83. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  84. weights = np.bincount(classes, minlength=nc) # occurrences per class
  85. # Prepend gridpoint count (for uCE training)
  86. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  87. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  88. weights[weights == 0] = 1 # replace empty bins with 1
  89. weights = 1 / weights # number of targets per class
  90. weights /= weights.sum() # normalize
  91. return torch.from_numpy(weights)
  92. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  93. # Produces image weights based on class_weights and image contents
  94. class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
  95. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  96. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  97. return image_weights
  98. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  99. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  100. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  101. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  102. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  103. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  104. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  105. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  106. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  107. return x
  108. def xyxy2xywh(x):
  109. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  110. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  111. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  112. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  113. y[:, 2] = x[:, 2] - x[:, 0] # width
  114. y[:, 3] = x[:, 3] - x[:, 1] # height
  115. return y
  116. def xywh2xyxy(x):
  117. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  118. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  119. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  120. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  121. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  122. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  123. return y
  124. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  125. # Rescale coords (xyxy) from img1_shape to img0_shape
  126. if ratio_pad is None: # calculate from img0_shape
  127. gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
  128. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  129. else:
  130. gain = ratio_pad[0][0]
  131. pad = ratio_pad[1]
  132. coords[:, [0, 2]] -= pad[0] # x padding
  133. coords[:, [1, 3]] -= pad[1] # y padding
  134. coords[:, :4] /= gain
  135. clip_coords(coords, img0_shape)
  136. return coords
  137. def clip_coords(boxes, img_shape):
  138. # Clip bounding xyxy bounding boxes to image shape (height, width)
  139. boxes[:, 0].clamp_(0, img_shape[1]) # x1
  140. boxes[:, 1].clamp_(0, img_shape[0]) # y1
  141. boxes[:, 2].clamp_(0, img_shape[1]) # x2
  142. boxes[:, 3].clamp_(0, img_shape[0]) # y2
  143. def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
  144. # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
  145. box2 = box2.T
  146. # Get the coordinates of bounding boxes
  147. if x1y1x2y2: # x1, y1, x2, y2 = box1
  148. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  149. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  150. else: # transform from xywh to xyxy
  151. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  152. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  153. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
  154. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
  155. # Intersection area
  156. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  157. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  158. # Union Area
  159. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
  160. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
  161. union = w1 * h1 + w2 * h2 - inter + eps
  162. iou = inter / union
  163. if GIoU or DIoU or CIoU:
  164. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  165. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  166. if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  167. c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
  168. rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
  169. (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
  170. if DIoU:
  171. return iou - rho2 / c2 # DIoU
  172. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  173. v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  174. with torch.no_grad():
  175. alpha = v / ((1 + eps) - iou + v)
  176. return iou - (rho2 / c2 + v * alpha) # CIoU
  177. else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
  178. c_area = cw * ch + eps # convex area
  179. return iou - (c_area - union) / c_area # GIoU
  180. else:
  181. return iou # IoU
  182. def box_iou(box1, box2):
  183. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  184. """
  185. Return intersection-over-union (Jaccard index) of boxes.
  186. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  187. Arguments:
  188. box1 (Tensor[N, 4])
  189. box2 (Tensor[M, 4])
  190. Returns:
  191. iou (Tensor[N, M]): the NxM matrix containing the pairwise
  192. IoU values for every element in boxes1 and boxes2
  193. """
  194. def box_area(box):
  195. # box = 4xn
  196. return (box[2] - box[0]) * (box[3] - box[1])
  197. area1 = box_area(box1.T)
  198. area2 = box_area(box2.T)
  199. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  200. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  201. return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
  202. def wh_iou(wh1, wh2):
  203. # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
  204. wh1 = wh1[:, None] # [N,1,2]
  205. wh2 = wh2[None] # [1,M,2]
  206. inter = torch.min(wh1, wh2).prod(2) # [N,M]
  207. return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
  208. def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, classes=None, agnostic=False, labels=()):
  209. """Performs Non-Maximum Suppression (NMS) on inference results
  210. Returns:
  211. detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
  212. """
  213. nc = prediction[0].shape[1] - 5 # number of classes
  214. xc = prediction[..., 4] > conf_thres # candidates
  215. # Settings
  216. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  217. max_det = 300 # maximum number of detections per image
  218. time_limit = 10.0 # seconds to quit after
  219. redundant = True # require redundant detections
  220. multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
  221. merge = False # use merge-NMS
  222. t = time.time()
  223. output = [torch.zeros(0, 6)] * prediction.shape[0]
  224. for xi, x in enumerate(prediction): # image index, image inference
  225. # Apply constraints
  226. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  227. x = x[xc[xi]] # confidence
  228. # Cat apriori labels if autolabelling
  229. if labels and len(labels[xi]):
  230. l = labels[xi]
  231. v = torch.zeros((len(l), nc + 5), device=x.device)
  232. v[:, :4] = l[:, 1:5] # box
  233. v[:, 4] = 1.0 # conf
  234. v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
  235. x = torch.cat((x, v), 0)
  236. # If none remain process next image
  237. if not x.shape[0]:
  238. continue
  239. # Compute conf
  240. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  241. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  242. box = xywh2xyxy(x[:, :4])
  243. # Detections matrix nx6 (xyxy, conf, cls)
  244. if multi_label:
  245. i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
  246. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  247. else: # best class only
  248. conf, j = x[:, 5:].max(1, keepdim=True)
  249. x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  250. # Filter by class
  251. if classes:
  252. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  253. # Apply finite constraint
  254. # if not torch.isfinite(x).all():
  255. # x = x[torch.isfinite(x).all(1)]
  256. # If none remain process next image
  257. n = x.shape[0] # number of boxes
  258. if not n:
  259. continue
  260. # Sort by confidence
  261. # x = x[x[:, 4].argsort(descending=True)]
  262. # Batched NMS
  263. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  264. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  265. i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
  266. if i.shape[0] > max_det: # limit detections
  267. i = i[:max_det]
  268. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  269. # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  270. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  271. weights = iou * scores[None] # box weights
  272. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  273. if redundant:
  274. i = i[iou.sum(1) > 1] # require redundancy
  275. output[xi] = x[i]
  276. if (time.time() - t) > time_limit:
  277. break # time limit exceeded
  278. return output
  279. def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer()
  280. # Strip optimizer from 'f' to finalize training, optionally save as 's'
  281. x = torch.load(f, map_location=torch.device('cpu'))
  282. x['optimizer'] = None
  283. x['training_results'] = None
  284. x['epoch'] = -1
  285. x['model'].half() # to FP16
  286. for p in x['model'].parameters():
  287. p.requires_grad = False
  288. torch.save(x, s or f)
  289. mb = os.path.getsize(s or f) / 1E6 # filesize
  290. print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb))
  291. def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
  292. # Print mutation results to evolve.txt (for use with train.py --evolve)
  293. a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
  294. b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
  295. c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  296. print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
  297. if bucket:
  298. url = 'gs://%s/evolve.txt' % bucket
  299. if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
  300. os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
  301. with open('evolve.txt', 'a') as f: # append result
  302. f.write(c + b + '\n')
  303. x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
  304. x = x[np.argsort(-fitness(x))] # sort
  305. np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
  306. # Save yaml
  307. for i, k in enumerate(hyp.keys()):
  308. hyp[k] = float(x[0, i + 7])
  309. with open(yaml_file, 'w') as f:
  310. results = tuple(x[0, :7])
  311. c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  312. f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
  313. yaml.dump(hyp, f, sort_keys=False)
  314. if bucket:
  315. os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
  316. def apply_classifier(x, model, img, im0):
  317. # applies a second stage classifier to yolo outputs
  318. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  319. for i, d in enumerate(x): # per image
  320. if d is not None and len(d):
  321. d = d.clone()
  322. # Reshape and pad cutouts
  323. b = xyxy2xywh(d[:, :4]) # boxes
  324. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  325. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  326. d[:, :4] = xywh2xyxy(b).long()
  327. # Rescale boxes from img_size to im0 size
  328. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  329. # Classes
  330. pred_cls1 = d[:, 5].long()
  331. ims = []
  332. for j, a in enumerate(d): # per item
  333. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  334. im = cv2.resize(cutout, (224, 224)) # BGR
  335. # cv2.imwrite('test%i.jpg' % j, cutout)
  336. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  337. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  338. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  339. ims.append(im)
  340. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  341. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  342. return x
  343. def increment_path(path, exist_ok=True, sep=''):
  344. # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
  345. path = Path(path) # os-agnostic
  346. if (path.exists() and exist_ok) or (not path.exists()):
  347. return str(path)
  348. else:
  349. dirs = glob.glob(f"{path}{sep}*") # similar paths
  350. matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
  351. i = [int(m.groups()[0]) for m in matches if m] # indices
  352. n = max(i) + 1 if i else 2 # increment number
  353. return f"{path}{sep}{n}" # update path