You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

general.py 21KB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. # General utils
  2. import glob
  3. import logging
  4. import math
  5. import os
  6. import platform
  7. import random
  8. import re
  9. import subprocess
  10. import time
  11. from pathlib import Path
  12. import cv2
  13. import numpy as np
  14. import torch
  15. import torchvision
  16. import yaml
  17. from utils.google_utils import gsutil_getsize
  18. from utils.metrics import fitness
  19. from utils.torch_utils import init_torch_seeds
  20. # Settings
  21. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  22. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  23. cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
  24. os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
  25. def set_logging(rank=-1):
  26. logging.basicConfig(
  27. format="%(message)s",
  28. level=logging.INFO if rank in [-1, 0] else logging.WARN)
  29. def init_seeds(seed=0):
  30. # Initialize random number generator (RNG) seeds
  31. random.seed(seed)
  32. np.random.seed(seed)
  33. init_torch_seeds(seed)
  34. def get_latest_run(search_dir='.'):
  35. # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
  36. last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
  37. return max(last_list, key=os.path.getctime) if last_list else ''
  38. def check_online():
  39. # Check internet connectivity
  40. import socket
  41. try:
  42. socket.create_connection(("1.1.1.1", 53)) # check host accesability
  43. return True
  44. except OSError:
  45. return False
  46. def check_git_status():
  47. # Recommend 'git pull' if code is out of date
  48. print(colorstr('github: '), end='')
  49. try:
  50. assert Path('.git').exists(), 'skipping check (not a git repository)'
  51. assert not Path('/workspace').exists(), 'skipping check (Docker image)' # not Path('/.dockerenv').exists()
  52. assert check_online(), 'skipping check (offline)'
  53. cmd = 'git fetch && git config --get remote.origin.url'
  54. url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url
  55. branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
  56. n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
  57. if n > 0:
  58. s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \
  59. f"Use 'git pull' to update or 'git clone {url}' to download latest."
  60. else:
  61. s = f'up to date with {url} ✅'
  62. print(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s)
  63. except Exception as e:
  64. print(e)
  65. def check_requirements(file='requirements.txt'):
  66. # Check installed dependencies meet requirements
  67. import pkg_resources
  68. requirements = pkg_resources.parse_requirements(Path(file).open())
  69. requirements = [x.name + ''.join(*x.specs) if len(x.specs) else x.name for x in requirements]
  70. pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met
  71. def check_img_size(img_size, s=32):
  72. # Verify img_size is a multiple of stride s
  73. new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
  74. if new_size != img_size:
  75. print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
  76. return new_size
  77. def check_file(file):
  78. # Search for file if not found
  79. if os.path.isfile(file) or file == '':
  80. return file
  81. else:
  82. files = glob.glob('./**/' + file, recursive=True) # find file
  83. assert len(files), 'File Not Found: %s' % file # assert file was found
  84. assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
  85. return files[0] # return file
  86. def check_dataset(dict):
  87. # Download dataset if not found locally
  88. val, s = dict.get('val'), dict.get('download')
  89. if val and len(val):
  90. val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
  91. if not all(x.exists() for x in val):
  92. print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
  93. if s and len(s): # download script
  94. print('Downloading %s ...' % s)
  95. if s.startswith('http') and s.endswith('.zip'): # URL
  96. f = Path(s).name # filename
  97. torch.hub.download_url_to_file(s, f)
  98. r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
  99. else: # bash script
  100. r = os.system(s)
  101. print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
  102. else:
  103. raise Exception('Dataset not found.')
  104. def make_divisible(x, divisor):
  105. # Returns x evenly divisible by divisor
  106. return math.ceil(x / divisor) * divisor
  107. def clean_str(s):
  108. # Cleans a string by replacing special characters with underscore _
  109. return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
  110. def one_cycle(y1=0.0, y2=1.0, steps=100):
  111. # lambda function for sinusoidal ramp from y1 to y2
  112. return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
  113. def colorstr(*input):
  114. # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
  115. *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
  116. colors = {'black': '\033[30m', # basic colors
  117. 'red': '\033[31m',
  118. 'green': '\033[32m',
  119. 'yellow': '\033[33m',
  120. 'blue': '\033[34m',
  121. 'magenta': '\033[35m',
  122. 'cyan': '\033[36m',
  123. 'white': '\033[37m',
  124. 'bright_black': '\033[90m', # bright colors
  125. 'bright_red': '\033[91m',
  126. 'bright_green': '\033[92m',
  127. 'bright_yellow': '\033[93m',
  128. 'bright_blue': '\033[94m',
  129. 'bright_magenta': '\033[95m',
  130. 'bright_cyan': '\033[96m',
  131. 'bright_white': '\033[97m',
  132. 'end': '\033[0m', # misc
  133. 'bold': '\033[1m',
  134. 'underline': '\033[4m'}
  135. return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
  136. def labels_to_class_weights(labels, nc=80):
  137. # Get class weights (inverse frequency) from training labels
  138. if labels[0] is None: # no labels loaded
  139. return torch.Tensor()
  140. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  141. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  142. weights = np.bincount(classes, minlength=nc) # occurrences per class
  143. # Prepend gridpoint count (for uCE training)
  144. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  145. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  146. weights[weights == 0] = 1 # replace empty bins with 1
  147. weights = 1 / weights # number of targets per class
  148. weights /= weights.sum() # normalize
  149. return torch.from_numpy(weights)
  150. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  151. # Produces image weights based on class_weights and image contents
  152. class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
  153. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  154. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  155. return image_weights
  156. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  157. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  158. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  159. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  160. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  161. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  162. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  163. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  164. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  165. return x
  166. def xyxy2xywh(x):
  167. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  168. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  169. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  170. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  171. y[:, 2] = x[:, 2] - x[:, 0] # width
  172. y[:, 3] = x[:, 3] - x[:, 1] # height
  173. return y
  174. def xywh2xyxy(x):
  175. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  176. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  177. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  178. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  179. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  180. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  181. return y
  182. def xywhn2xyxy(x, w=640, h=640, padw=32, padh=32):
  183. # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  184. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  185. y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
  186. y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
  187. y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
  188. y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
  189. return y
  190. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  191. # Rescale coords (xyxy) from img1_shape to img0_shape
  192. if ratio_pad is None: # calculate from img0_shape
  193. gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
  194. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  195. else:
  196. gain = ratio_pad[0][0]
  197. pad = ratio_pad[1]
  198. coords[:, [0, 2]] -= pad[0] # x padding
  199. coords[:, [1, 3]] -= pad[1] # y padding
  200. coords[:, :4] /= gain
  201. clip_coords(coords, img0_shape)
  202. return coords
  203. def clip_coords(boxes, img_shape):
  204. # Clip bounding xyxy bounding boxes to image shape (height, width)
  205. boxes[:, 0].clamp_(0, img_shape[1]) # x1
  206. boxes[:, 1].clamp_(0, img_shape[0]) # y1
  207. boxes[:, 2].clamp_(0, img_shape[1]) # x2
  208. boxes[:, 3].clamp_(0, img_shape[0]) # y2
  209. def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
  210. # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
  211. box2 = box2.T
  212. # Get the coordinates of bounding boxes
  213. if x1y1x2y2: # x1, y1, x2, y2 = box1
  214. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  215. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  216. else: # transform from xywh to xyxy
  217. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  218. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  219. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
  220. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
  221. # Intersection area
  222. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  223. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  224. # Union Area
  225. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
  226. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
  227. union = w1 * h1 + w2 * h2 - inter + eps
  228. iou = inter / union
  229. if GIoU or DIoU or CIoU:
  230. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  231. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  232. if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  233. c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
  234. rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
  235. (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
  236. if DIoU:
  237. return iou - rho2 / c2 # DIoU
  238. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  239. v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  240. with torch.no_grad():
  241. alpha = v / ((1 + eps) - iou + v)
  242. return iou - (rho2 / c2 + v * alpha) # CIoU
  243. else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
  244. c_area = cw * ch + eps # convex area
  245. return iou - (c_area - union) / c_area # GIoU
  246. else:
  247. return iou # IoU
  248. def box_iou(box1, box2):
  249. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  250. """
  251. Return intersection-over-union (Jaccard index) of boxes.
  252. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  253. Arguments:
  254. box1 (Tensor[N, 4])
  255. box2 (Tensor[M, 4])
  256. Returns:
  257. iou (Tensor[N, M]): the NxM matrix containing the pairwise
  258. IoU values for every element in boxes1 and boxes2
  259. """
  260. def box_area(box):
  261. # box = 4xn
  262. return (box[2] - box[0]) * (box[3] - box[1])
  263. area1 = box_area(box1.T)
  264. area2 = box_area(box2.T)
  265. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  266. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  267. return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
  268. def wh_iou(wh1, wh2):
  269. # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
  270. wh1 = wh1[:, None] # [N,1,2]
  271. wh2 = wh2[None] # [1,M,2]
  272. inter = torch.min(wh1, wh2).prod(2) # [N,M]
  273. return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
  274. def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
  275. """Performs Non-Maximum Suppression (NMS) on inference results
  276. Returns:
  277. detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
  278. """
  279. nc = prediction.shape[2] - 5 # number of classes
  280. xc = prediction[..., 4] > conf_thres # candidates
  281. # Settings
  282. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  283. max_det = 300 # maximum number of detections per image
  284. max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
  285. time_limit = 10.0 # seconds to quit after
  286. redundant = True # require redundant detections
  287. multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
  288. merge = False # use merge-NMS
  289. t = time.time()
  290. output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
  291. for xi, x in enumerate(prediction): # image index, image inference
  292. # Apply constraints
  293. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  294. x = x[xc[xi]] # confidence
  295. # Cat apriori labels if autolabelling
  296. if labels and len(labels[xi]):
  297. l = labels[xi]
  298. v = torch.zeros((len(l), nc + 5), device=x.device)
  299. v[:, :4] = l[:, 1:5] # box
  300. v[:, 4] = 1.0 # conf
  301. v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
  302. x = torch.cat((x, v), 0)
  303. # If none remain process next image
  304. if not x.shape[0]:
  305. continue
  306. # Compute conf
  307. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  308. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  309. box = xywh2xyxy(x[:, :4])
  310. # Detections matrix nx6 (xyxy, conf, cls)
  311. if multi_label:
  312. i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
  313. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  314. else: # best class only
  315. conf, j = x[:, 5:].max(1, keepdim=True)
  316. x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  317. # Filter by class
  318. if classes is not None:
  319. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  320. # Apply finite constraint
  321. # if not torch.isfinite(x).all():
  322. # x = x[torch.isfinite(x).all(1)]
  323. # Check shape
  324. n = x.shape[0] # number of boxes
  325. if not n: # no boxes
  326. continue
  327. elif n > max_nms: # excess boxes
  328. x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
  329. # Batched NMS
  330. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  331. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  332. i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
  333. if i.shape[0] > max_det: # limit detections
  334. i = i[:max_det]
  335. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  336. # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  337. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  338. weights = iou * scores[None] # box weights
  339. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  340. if redundant:
  341. i = i[iou.sum(1) > 1] # require redundancy
  342. output[xi] = x[i]
  343. if (time.time() - t) > time_limit:
  344. print(f'WARNING: NMS time limit {time_limit}s exceeded')
  345. break # time limit exceeded
  346. return output
  347. def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer()
  348. # Strip optimizer from 'f' to finalize training, optionally save as 's'
  349. x = torch.load(f, map_location=torch.device('cpu'))
  350. for key in 'optimizer', 'training_results', 'wandb_id':
  351. x[key] = None
  352. x['epoch'] = -1
  353. x['model'].half() # to FP16
  354. for p in x['model'].parameters():
  355. p.requires_grad = False
  356. torch.save(x, s or f)
  357. mb = os.path.getsize(s or f) / 1E6 # filesize
  358. print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb))
  359. def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
  360. # Print mutation results to evolve.txt (for use with train.py --evolve)
  361. a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
  362. b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
  363. c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  364. print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
  365. if bucket:
  366. url = 'gs://%s/evolve.txt' % bucket
  367. if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
  368. os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
  369. with open('evolve.txt', 'a') as f: # append result
  370. f.write(c + b + '\n')
  371. x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
  372. x = x[np.argsort(-fitness(x))] # sort
  373. np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
  374. # Save yaml
  375. for i, k in enumerate(hyp.keys()):
  376. hyp[k] = float(x[0, i + 7])
  377. with open(yaml_file, 'w') as f:
  378. results = tuple(x[0, :7])
  379. c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  380. f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
  381. yaml.dump(hyp, f, sort_keys=False)
  382. if bucket:
  383. os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
  384. def apply_classifier(x, model, img, im0):
  385. # applies a second stage classifier to yolo outputs
  386. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  387. for i, d in enumerate(x): # per image
  388. if d is not None and len(d):
  389. d = d.clone()
  390. # Reshape and pad cutouts
  391. b = xyxy2xywh(d[:, :4]) # boxes
  392. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  393. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  394. d[:, :4] = xywh2xyxy(b).long()
  395. # Rescale boxes from img_size to im0 size
  396. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  397. # Classes
  398. pred_cls1 = d[:, 5].long()
  399. ims = []
  400. for j, a in enumerate(d): # per item
  401. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  402. im = cv2.resize(cutout, (224, 224)) # BGR
  403. # cv2.imwrite('test%i.jpg' % j, cutout)
  404. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  405. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  406. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  407. ims.append(im)
  408. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  409. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  410. return x
  411. def increment_path(path, exist_ok=True, sep=''):
  412. # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
  413. path = Path(path) # os-agnostic
  414. if (path.exists() and exist_ok) or (not path.exists()):
  415. return str(path)
  416. else:
  417. dirs = glob.glob(f"{path}{sep}*") # similar paths
  418. matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
  419. i = [int(m.groups()[0]) for m in matches if m] # indices
  420. n = max(i) + 1 if i else 2 # increment number
  421. return f"{path}{sep}{n}" # update path