TensorRT转化代码
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

722 lines
30KB

  1. # YOLOv5 general utils
  2. import contextlib
  3. import glob
  4. import logging
  5. import os
  6. import platform
  7. import random
  8. import re
  9. import signal
  10. import time
  11. import urllib
  12. from itertools import repeat
  13. from multiprocessing.pool import ThreadPool
  14. from pathlib import Path
  15. from subprocess import check_output
  16. import cv2
  17. import math
  18. import numpy as np
  19. import pandas as pd
  20. import pkg_resources as pkg
  21. import torch
  22. import torchvision
  23. import yaml
  24. from utils.downloads import gsutil_getsize
  25. from utils.metrics import bbox_iou, fitness
  26. from utils.torch_utils import init_torch_seeds
  27. # Settings
  28. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  29. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  30. pd.options.display.max_columns = 10
  31. cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
  32. os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
  33. class timeout(contextlib.ContextDecorator):
  34. # Usage: @timeout(seconds) decorator or 'with timeout(seconds):' context manager
  35. def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):
  36. self.seconds = int(seconds)
  37. self.timeout_message = timeout_msg
  38. self.suppress = bool(suppress_timeout_errors)
  39. def _timeout_handler(self, signum, frame):
  40. raise TimeoutError(self.timeout_message)
  41. def __enter__(self):
  42. signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM
  43. signal.alarm(self.seconds) # start countdown for SIGALRM to be raised
  44. def __exit__(self, exc_type, exc_val, exc_tb):
  45. signal.alarm(0) # Cancel SIGALRM if it's scheduled
  46. if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError
  47. return True
  48. def try_except(func):
  49. # try-except function. Usage: @try_except decorator
  50. def handler(*args, **kwargs):
  51. try:
  52. func(*args, **kwargs)
  53. except Exception as e:
  54. print(e)
  55. return handler
  56. def methods(instance):
  57. # Get class/instance methods
  58. return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")]
  59. def set_logging(rank=-1, verbose=True):
  60. logging.basicConfig(
  61. format="%(message)s",
  62. level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN)
  63. def init_seeds(seed=0):
  64. # Initialize random number generator (RNG) seeds
  65. random.seed(seed)
  66. np.random.seed(seed)
  67. init_torch_seeds(seed)
  68. def get_latest_run(search_dir='.'):
  69. # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
  70. last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
  71. return max(last_list, key=os.path.getctime) if last_list else ''
  72. def is_docker():
  73. # Is environment a Docker container?
  74. return Path('/workspace').exists() # or Path('/.dockerenv').exists()
  75. def is_colab():
  76. # Is environment a Google Colab instance?
  77. try:
  78. import google.colab
  79. return True
  80. except Exception as e:
  81. return False
  82. def is_pip():
  83. # Is file in a pip package?
  84. return 'site-packages' in Path(__file__).absolute().parts
  85. def emojis(str=''):
  86. # Return platform-dependent emoji-safe version of string
  87. return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
  88. def file_size(file):
  89. # Return file size in MB
  90. return Path(file).stat().st_size / 1e6
  91. def check_online():
  92. # Check internet connectivity
  93. import socket
  94. try:
  95. socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility
  96. return True
  97. except OSError:
  98. return False
  99. @try_except
  100. def check_git_status():
  101. # Recommend 'git pull' if code is out of date
  102. msg = ', for updates see https://github.com/ultralytics/yolov5'
  103. print(colorstr('github: '), end='')
  104. assert Path('.git').exists(), 'skipping check (not a git repository)' + msg
  105. assert not is_docker(), 'skipping check (Docker image)' + msg
  106. assert check_online(), 'skipping check (offline)' + msg
  107. cmd = 'git fetch && git config --get remote.origin.url'
  108. url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch
  109. branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
  110. n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
  111. if n > 0:
  112. s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \
  113. f"Use 'git pull' to update or 'git clone {url}' to download latest."
  114. else:
  115. s = f'up to date with {url} ✅'
  116. print(emojis(s)) # emoji-safe
  117. def check_python(minimum='3.6.2'):
  118. # Check current python version vs. required python version
  119. check_version(platform.python_version(), minimum, name='Python ')
  120. def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False):
  121. # Check version vs. required version
  122. current, minimum = (pkg.parse_version(x) for x in (current, minimum))
  123. result = (current == minimum) if pinned else (current >= minimum)
  124. assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed'
  125. @try_except
  126. def check_requirements(requirements='requirements.txt', exclude=()):
  127. # Check installed dependencies meet requirements (pass *.txt file or list of packages)
  128. prefix = colorstr('red', 'bold', 'requirements:')
  129. check_python() # check python version
  130. if isinstance(requirements, (str, Path)): # requirements.txt file
  131. file = Path(requirements)
  132. assert file.exists(), f"{prefix} {file.resolve()} not found, check failed."
  133. requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
  134. else: # list or tuple of packages
  135. requirements = [x for x in requirements if x not in exclude]
  136. n = 0 # number of packages updates
  137. for r in requirements:
  138. try:
  139. pkg.require(r)
  140. except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
  141. print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...")
  142. try:
  143. assert check_online(), f"'pip install {r}' skipped (offline)"
  144. print(check_output(f"pip install '{r}'", shell=True).decode())
  145. n += 1
  146. except Exception as e:
  147. print(f'{prefix} {e}')
  148. if n: # if packages updated
  149. source = file.resolve() if 'file' in locals() else requirements
  150. s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
  151. f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
  152. print(emojis(s))
  153. def check_img_size(img_size, s=32, floor=0):
  154. # Verify img_size is a multiple of stride s
  155. new_size = max(make_divisible(img_size, int(s)), floor) # ceil gs-multiple
  156. if new_size != img_size:
  157. print(f'WARNING: --img-size {img_size} must be multiple of max stride {s}, updating to {new_size}')
  158. return new_size
  159. def check_imshow():
  160. # Check if environment supports image displays
  161. try:
  162. assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'
  163. assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'
  164. cv2.imshow('test', np.zeros((1, 1, 3)))
  165. cv2.waitKey(1)
  166. cv2.destroyAllWindows()
  167. cv2.waitKey(1)
  168. return True
  169. except Exception as e:
  170. print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
  171. return False
  172. def check_file(file):
  173. # Search/download file (if necessary) and return path
  174. file = str(file) # convert to str()
  175. if Path(file).is_file() or file == '': # exists
  176. return file
  177. elif file.startswith(('http:/', 'https:/')): # download
  178. url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/
  179. file = Path(urllib.parse.unquote(file)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
  180. print(f'Downloading {url} to {file}...')
  181. torch.hub.download_url_to_file(url, file)
  182. assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check
  183. return file
  184. else: # search
  185. files = glob.glob('./**/' + file, recursive=True) # find file
  186. assert len(files), f'File not found: {file}' # assert file was found
  187. assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
  188. return files[0] # return file
  189. def check_dataset(data, autodownload=True):
  190. # Download and/or unzip dataset if not found locally
  191. # Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip
  192. # Download (optional)
  193. extract_dir = ''
  194. if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip
  195. download(data, dir='../datasets', unzip=True, delete=False, curl=False, threads=1)
  196. data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml'))
  197. extract_dir, autodownload = data.parent, False
  198. # Read yaml (optional)
  199. if isinstance(data, (str, Path)):
  200. with open(data, encoding='ascii', errors='ignore') as f:
  201. data = yaml.safe_load(f) # dictionary
  202. # Parse yaml
  203. path = extract_dir or Path(data.get('path') or '') # optional 'path' default to '.'
  204. for k in 'train', 'val', 'test':
  205. if data.get(k): # prepend path
  206. data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]]
  207. assert 'nc' in data, "Dataset 'nc' key missing."
  208. if 'names' not in data:
  209. data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing
  210. train, val, test, s = [data.get(x) for x in ('train', 'val', 'test', 'download')]
  211. if val:
  212. val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
  213. if not all(x.exists() for x in val):
  214. print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
  215. if s and autodownload: # download script
  216. if s.startswith('http') and s.endswith('.zip'): # URL
  217. f = Path(s).name # filename
  218. print(f'Downloading {s} ...')
  219. torch.hub.download_url_to_file(s, f)
  220. root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'
  221. Path(root).mkdir(parents=True, exist_ok=True) # create root
  222. r = os.system(f'unzip -q {f} -d {root} && rm {f}') # unzip
  223. elif s.startswith('bash '): # bash script
  224. print(f'Running {s} ...')
  225. r = os.system(s)
  226. else: # python script
  227. r = exec(s, {'yaml': data}) # return None
  228. print('Dataset autodownload %s\n' % ('success' if r in (0, None) else 'failure')) # print result
  229. else:
  230. raise Exception('Dataset not found.')
  231. return data # dictionary
  232. def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1):
  233. # Multi-threaded file download and unzip function, used in data.yaml for autodownload
  234. def download_one(url, dir):
  235. # Download 1 file
  236. f = dir / Path(url).name # filename
  237. if Path(url).is_file(): # exists in current path
  238. Path(url).rename(f) # move to dir
  239. elif not f.exists():
  240. print(f'Downloading {url} to {f}...')
  241. if curl:
  242. os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail
  243. else:
  244. torch.hub.download_url_to_file(url, f, progress=True) # torch download
  245. if unzip and f.suffix in ('.zip', '.gz'):
  246. print(f'Unzipping {f}...')
  247. if f.suffix == '.zip':
  248. s = f'unzip -qo {f} -d {dir}' # unzip -quiet -overwrite
  249. elif f.suffix == '.gz':
  250. s = f'tar xfz {f} --directory {f.parent}' # unzip
  251. if delete: # delete zip file after unzip
  252. s += f' && rm {f}'
  253. os.system(s)
  254. dir = Path(dir)
  255. dir.mkdir(parents=True, exist_ok=True) # make directory
  256. if threads > 1:
  257. pool = ThreadPool(threads)
  258. pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded
  259. pool.close()
  260. pool.join()
  261. else:
  262. for u in [url] if isinstance(url, (str, Path)) else url:
  263. download_one(u, dir)
  264. def make_divisible(x, divisor):
  265. # Returns x evenly divisible by divisor
  266. return math.ceil(x / divisor) * divisor
  267. def clean_str(s):
  268. # Cleans a string by replacing special characters with underscore _
  269. return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
  270. def one_cycle(y1=0.0, y2=1.0, steps=100):
  271. # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf
  272. return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
  273. def colorstr(*input):
  274. # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
  275. *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
  276. colors = {'black': '\033[30m', # basic colors
  277. 'red': '\033[31m',
  278. 'green': '\033[32m',
  279. 'yellow': '\033[33m',
  280. 'blue': '\033[34m',
  281. 'magenta': '\033[35m',
  282. 'cyan': '\033[36m',
  283. 'white': '\033[37m',
  284. 'bright_black': '\033[90m', # bright colors
  285. 'bright_red': '\033[91m',
  286. 'bright_green': '\033[92m',
  287. 'bright_yellow': '\033[93m',
  288. 'bright_blue': '\033[94m',
  289. 'bright_magenta': '\033[95m',
  290. 'bright_cyan': '\033[96m',
  291. 'bright_white': '\033[97m',
  292. 'end': '\033[0m', # misc
  293. 'bold': '\033[1m',
  294. 'underline': '\033[4m'}
  295. return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
  296. def labels_to_class_weights(labels, nc=80):
  297. # Get class weights (inverse frequency) from training labels
  298. if labels[0] is None: # no labels loaded
  299. return torch.Tensor()
  300. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  301. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  302. weights = np.bincount(classes, minlength=nc) # occurrences per class
  303. # Prepend gridpoint count (for uCE training)
  304. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  305. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  306. weights[weights == 0] = 1 # replace empty bins with 1
  307. weights = 1 / weights # number of targets per class
  308. weights /= weights.sum() # normalize
  309. return torch.from_numpy(weights)
  310. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  311. # Produces image weights based on class_weights and image contents
  312. class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
  313. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  314. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  315. return image_weights
  316. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  317. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  318. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  319. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  320. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  321. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  322. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  323. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  324. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  325. return x
  326. def xyxy2xywh(x):
  327. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  328. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  329. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  330. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  331. y[:, 2] = x[:, 2] - x[:, 0] # width
  332. y[:, 3] = x[:, 3] - x[:, 1] # height
  333. return y
  334. def xywh2xyxy(x):
  335. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  336. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  337. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  338. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  339. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  340. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  341. return y
  342. def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
  343. # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  344. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  345. y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
  346. y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
  347. y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
  348. y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
  349. return y
  350. def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
  351. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
  352. if clip:
  353. clip_coords(x, (h - eps, w - eps)) # warning: inplace clip
  354. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  355. y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center
  356. y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center
  357. y[:, 2] = (x[:, 2] - x[:, 0]) / w # width
  358. y[:, 3] = (x[:, 3] - x[:, 1]) / h # height
  359. return y
  360. def xyn2xy(x, w=640, h=640, padw=0, padh=0):
  361. # Convert normalized segments into pixel segments, shape (n,2)
  362. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  363. y[:, 0] = w * x[:, 0] + padw # top left x
  364. y[:, 1] = h * x[:, 1] + padh # top left y
  365. return y
  366. def segment2box(segment, width=640, height=640):
  367. # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
  368. x, y = segment.T # segment xy
  369. inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
  370. x, y, = x[inside], y[inside]
  371. return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
  372. def segments2boxes(segments):
  373. # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
  374. boxes = []
  375. for s in segments:
  376. x, y = s.T # segment xy
  377. boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
  378. return xyxy2xywh(np.array(boxes)) # cls, xywh
  379. def resample_segments(segments, n=1000):
  380. # Up-sample an (n,2) segment
  381. for i, s in enumerate(segments):
  382. x = np.linspace(0, len(s) - 1, n)
  383. xp = np.arange(len(s))
  384. segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
  385. return segments
  386. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  387. # Rescale coords (xyxy) from img1_shape to img0_shape
  388. if ratio_pad is None: # calculate from img0_shape
  389. gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
  390. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  391. else:
  392. gain = ratio_pad[0][0]
  393. pad = ratio_pad[1]
  394. coords[:, [0, 2]] -= pad[0] # x padding
  395. coords[:, [1, 3]] -= pad[1] # y padding
  396. coords[:, :4] /= gain
  397. clip_coords(coords, img0_shape)
  398. return coords
  399. def clip_coords(boxes, shape):
  400. # Clip bounding xyxy bounding boxes to image shape (height, width)
  401. if isinstance(boxes, torch.Tensor): # faster individually
  402. boxes[:, 0].clamp_(0, shape[1]) # x1
  403. boxes[:, 1].clamp_(0, shape[0]) # y1
  404. boxes[:, 2].clamp_(0, shape[1]) # x2
  405. boxes[:, 3].clamp_(0, shape[0]) # y2
  406. else: # np.array (faster grouped)
  407. boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
  408. boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
  409. def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
  410. labels=(), max_det=300):
  411. """Runs Non-Maximum Suppression (NMS) on inference results
  412. Returns:
  413. list of detections, on (n,6) tensor per image [xyxy, conf, cls]
  414. """
  415. nc = prediction.shape[2] - 5 # number of classes
  416. xc = prediction[..., 4] > conf_thres # candidates
  417. # Checks
  418. assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
  419. assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
  420. # Settings
  421. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  422. max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
  423. time_limit = 10.0 # seconds to quit after
  424. redundant = True # require redundant detections
  425. multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
  426. merge = False # use merge-NMS
  427. t = time.time()
  428. output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
  429. for xi, x in enumerate(prediction): # image index, image inference
  430. # Apply constraints
  431. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  432. x = x[xc[xi]] # confidence
  433. # Cat apriori labels if autolabelling
  434. if labels and len(labels[xi]):
  435. l = labels[xi]
  436. v = torch.zeros((len(l), nc + 5), device=x.device)
  437. v[:, :4] = l[:, 1:5] # box
  438. v[:, 4] = 1.0 # conf
  439. v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
  440. x = torch.cat((x, v), 0)
  441. # If none remain process next image
  442. if not x.shape[0]:
  443. continue
  444. # Compute conf
  445. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  446. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  447. box = xywh2xyxy(x[:, :4])
  448. # Detections matrix nx6 (xyxy, conf, cls)
  449. if multi_label:
  450. i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
  451. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  452. else: # best class only
  453. conf, j = x[:, 5:].max(1, keepdim=True)
  454. x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  455. # Filter by class
  456. if classes is not None:
  457. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  458. # Apply finite constraint
  459. # if not torch.isfinite(x).all():
  460. # x = x[torch.isfinite(x).all(1)]
  461. # Check shape
  462. n = x.shape[0] # number of boxes
  463. if not n: # no boxes
  464. continue
  465. elif n > max_nms: # excess boxes
  466. x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
  467. # Batched NMS
  468. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  469. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  470. i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
  471. if i.shape[0] > max_det: # limit detections
  472. i = i[:max_det]
  473. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  474. # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  475. iou = bbox_iou(boxes[i], boxes,x1y1x2y2=False,DIoU=True) > iou_thres # iou matrix
  476. weights = iou * scores[None] # box weights
  477. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  478. if redundant:
  479. i = i[iou.sum(1) > 1] # require redundancy
  480. output[xi] = x[i]
  481. if (time.time() - t) > time_limit:
  482. print(f'WARNING: NMS time limit {time_limit}s exceeded')
  483. break # time limit exceeded
  484. return output
  485. def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
  486. # Strip optimizer from 'f' to finalize training, optionally save as 's'
  487. x = torch.load(f, map_location=torch.device('cpu'))
  488. if x.get('ema'):
  489. x['model'] = x['ema'] # replace model with ema
  490. for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
  491. x[k] = None
  492. x['epoch'] = -1
  493. x['model'].half() # to FP16
  494. for p in x['model'].parameters():
  495. p.requires_grad = False
  496. torch.save(x, s or f)
  497. mb = os.path.getsize(s or f) / 1E6 # filesize
  498. print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
  499. def print_mutation(results, hyp, save_dir, bucket):
  500. evolve_csv, results_csv, evolve_yaml = save_dir / 'evolve.csv', save_dir / 'results.csv', save_dir / 'hyp_evolve.yaml'
  501. keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
  502. 'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps]
  503. keys = tuple(x.strip() for x in keys)
  504. vals = results + tuple(hyp.values())
  505. n = len(keys)
  506. # Download (optional)
  507. if bucket:
  508. url = f'gs://{bucket}/evolve.csv'
  509. if gsutil_getsize(url) > (os.path.getsize(evolve_csv) if os.path.exists(evolve_csv) else 0):
  510. os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local
  511. # Log to evolve.csv
  512. s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header
  513. with open(evolve_csv, 'a') as f:
  514. f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n')
  515. # Print to screen
  516. print(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys))
  517. print(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals), end='\n\n\n')
  518. # Save yaml
  519. with open(evolve_yaml, 'w') as f:
  520. data = pd.read_csv(evolve_csv)
  521. data = data.rename(columns=lambda x: x.strip()) # strip keys
  522. i = np.argmax(fitness(data.values[:, :7])) #
  523. f.write(f'# YOLOv5 Hyperparameter Evolution Results\n' +
  524. f'# Best generation: {i}\n' +
  525. f'# Last generation: {len(data)}\n' +
  526. f'# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' +
  527. f'# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n')
  528. yaml.safe_dump(hyp, f, sort_keys=False)
  529. if bucket:
  530. os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload
  531. def apply_classifier(x, model, img, im0):
  532. # Apply a second stage classifier to yolo outputs
  533. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  534. for i, d in enumerate(x): # per image
  535. if d is not None and len(d):
  536. d = d.clone()
  537. # Reshape and pad cutouts
  538. b = xyxy2xywh(d[:, :4]) # boxes
  539. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  540. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  541. d[:, :4] = xywh2xyxy(b).long()
  542. # Rescale boxes from img_size to im0 size
  543. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  544. # Classes
  545. pred_cls1 = d[:, 5].long()
  546. ims = []
  547. for j, a in enumerate(d): # per item
  548. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  549. im = cv2.resize(cutout, (224, 224)) # BGR
  550. # cv2.imwrite('example%i.jpg' % j, cutout)
  551. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  552. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  553. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  554. ims.append(im)
  555. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  556. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  557. return x
  558. def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):
  559. # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
  560. xyxy = torch.tensor(xyxy).view(-1, 4)
  561. b = xyxy2xywh(xyxy) # boxes
  562. if square:
  563. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
  564. b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
  565. xyxy = xywh2xyxy(b).long()
  566. clip_coords(xyxy, im.shape)
  567. crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
  568. if save:
  569. cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop)
  570. return crop
  571. def increment_path(path, exist_ok=False, sep='', mkdir=False):
  572. # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
  573. path = Path(path) # os-agnostic
  574. if path.exists() and not exist_ok:
  575. suffix = path.suffix
  576. path = path.with_suffix('')
  577. dirs = glob.glob(f"{path}{sep}*") # similar paths
  578. matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
  579. i = [int(m.groups()[0]) for m in matches if m] # indices
  580. n = max(i) + 1 if i else 2 # increment number
  581. path = Path(f"{path}{sep}{n}{suffix}") # update path
  582. dir = path if path.suffix == '' else path.parent # directory
  583. if not dir.exists() and mkdir:
  584. dir.mkdir(parents=True, exist_ok=True) # make directory
  585. return path