You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

471 lines
20KB

  1. # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
  2. """
  3. Plotting utils
  4. """
  5. import math
  6. import os
  7. from copy import copy
  8. from pathlib import Path
  9. import cv2
  10. import matplotlib
  11. import matplotlib.pyplot as plt
  12. import numpy as np
  13. import pandas as pd
  14. import seaborn as sn
  15. import torch
  16. from PIL import Image, ImageDraw, ImageFont
  17. from utils.general import (LOGGER, Timeout, check_requirements, clip_coords, increment_path, is_ascii, is_chinese,
  18. try_except, user_config_dir, xywh2xyxy, xyxy2xywh)
  19. from utils.metrics import fitness
  20. # Settings
  21. CONFIG_DIR = user_config_dir() # Ultralytics settings dir
  22. RANK = int(os.getenv('RANK', -1))
  23. matplotlib.rc('font', **{'size': 11})
  24. matplotlib.use('Agg') # for writing to files only
  25. class Colors:
  26. # Ultralytics color palette https://ultralytics.com/
  27. def __init__(self):
  28. # hex = matplotlib.colors.TABLEAU_COLORS.values()
  29. hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
  30. '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
  31. self.palette = [self.hex2rgb('#' + c) for c in hex]
  32. self.n = len(self.palette)
  33. def __call__(self, i, bgr=False):
  34. c = self.palette[int(i) % self.n]
  35. return (c[2], c[1], c[0]) if bgr else c
  36. @staticmethod
  37. def hex2rgb(h): # rgb order (PIL)
  38. return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
  39. colors = Colors() # create instance for 'from utils.plots import colors'
  40. def check_font(font='Arial.ttf', size=10):
  41. # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
  42. font = Path(font)
  43. font = font if font.exists() else (CONFIG_DIR / font.name)
  44. try:
  45. return ImageFont.truetype(str(font) if font.exists() else font.name, size)
  46. except Exception as e: # download if missing
  47. url = "https://ultralytics.com/assets/" + font.name
  48. print(f'Downloading {url} to {font}...')
  49. torch.hub.download_url_to_file(url, str(font), progress=False)
  50. try:
  51. return ImageFont.truetype(str(font), size)
  52. except TypeError:
  53. check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374
  54. class Annotator:
  55. if RANK in (-1, 0):
  56. check_font() # download TTF if necessary
  57. # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
  58. def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
  59. assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
  60. self.pil = pil or not is_ascii(example) or is_chinese(example)
  61. if self.pil: # use PIL
  62. self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
  63. self.draw = ImageDraw.Draw(self.im)
  64. self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font,
  65. size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
  66. else: # use cv2
  67. self.im = im
  68. self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
  69. def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
  70. # Add one xyxy box to image with label
  71. if self.pil or not is_ascii(label):
  72. self.draw.rectangle(box, width=self.lw, outline=color) # box
  73. if label:
  74. w, h = self.font.getsize(label) # text width, height
  75. outside = box[1] - h >= 0 # label fits outside box
  76. self.draw.rectangle([box[0],
  77. box[1] - h if outside else box[1],
  78. box[0] + w + 1,
  79. box[1] + 1 if outside else box[1] + h + 1], fill=color)
  80. # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
  81. self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
  82. else: # cv2
  83. p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
  84. cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
  85. if label:
  86. tf = max(self.lw - 1, 1) # font thickness
  87. w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
  88. outside = p1[1] - h - 3 >= 0 # label fits outside box
  89. p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
  90. cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
  91. cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color,
  92. thickness=tf, lineType=cv2.LINE_AA)
  93. def rectangle(self, xy, fill=None, outline=None, width=1):
  94. # Add rectangle to image (PIL-only)
  95. self.draw.rectangle(xy, fill, outline, width)
  96. def text(self, xy, text, txt_color=(255, 255, 255)):
  97. # Add text to image (PIL-only)
  98. w, h = self.font.getsize(text) # text width, height
  99. self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font)
  100. def result(self):
  101. # Return annotated image as array
  102. return np.asarray(self.im)
  103. def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
  104. """
  105. x: Features to be visualized
  106. module_type: Module type
  107. stage: Module stage within model
  108. n: Maximum number of feature maps to plot
  109. save_dir: Directory to save results
  110. """
  111. if 'Detect' not in module_type:
  112. batch, channels, height, width = x.shape # batch, channels, height, width
  113. if height > 1 and width > 1:
  114. f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename
  115. blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
  116. n = min(n, channels) # number of plots
  117. fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
  118. ax = ax.ravel()
  119. plt.subplots_adjust(wspace=0.05, hspace=0.05)
  120. for i in range(n):
  121. ax[i].imshow(blocks[i].squeeze()) # cmap='gray'
  122. ax[i].axis('off')
  123. print(f'Saving {f}... ({n}/{channels})')
  124. plt.savefig(f, dpi=300, bbox_inches='tight')
  125. plt.close()
  126. np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save
  127. def hist2d(x, y, n=100):
  128. # 2d histogram used in labels.png and evolve.png
  129. xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
  130. hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
  131. xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
  132. yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
  133. return np.log(hist[xidx, yidx])
  134. def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  135. from scipy.signal import butter, filtfilt
  136. # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
  137. def butter_lowpass(cutoff, fs, order):
  138. nyq = 0.5 * fs
  139. normal_cutoff = cutoff / nyq
  140. return butter(order, normal_cutoff, btype='low', analog=False)
  141. b, a = butter_lowpass(cutoff, fs, order=order)
  142. return filtfilt(b, a, data) # forward-backward filter
  143. def output_to_target(output):
  144. # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
  145. targets = []
  146. for i, o in enumerate(output):
  147. for *box, conf, cls in o.cpu().numpy():
  148. targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
  149. return np.array(targets)
  150. def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16):
  151. # Plot image grid with labels
  152. if isinstance(images, torch.Tensor):
  153. images = images.cpu().float().numpy()
  154. if isinstance(targets, torch.Tensor):
  155. targets = targets.cpu().numpy()
  156. if np.max(images[0]) <= 1:
  157. images *= 255 # de-normalise (optional)
  158. bs, _, h, w = images.shape # batch size, _, height, width
  159. bs = min(bs, max_subplots) # limit plot images
  160. ns = np.ceil(bs ** 0.5) # number of subplots (square)
  161. # Build Image
  162. mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
  163. for i, im in enumerate(images):
  164. if i == max_subplots: # if last batch has fewer images than we expect
  165. break
  166. x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
  167. im = im.transpose(1, 2, 0)
  168. mosaic[y:y + h, x:x + w, :] = im
  169. # Resize (optional)
  170. scale = max_size / ns / max(h, w)
  171. if scale < 1:
  172. h = math.ceil(scale * h)
  173. w = math.ceil(scale * w)
  174. mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
  175. # Annotate
  176. fs = int((h + w) * ns * 0.01) # font size
  177. annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True)
  178. for i in range(i + 1):
  179. x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
  180. annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
  181. if paths:
  182. annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
  183. if len(targets) > 0:
  184. ti = targets[targets[:, 0] == i] # image targets
  185. boxes = xywh2xyxy(ti[:, 2:6]).T
  186. classes = ti[:, 1].astype('int')
  187. labels = ti.shape[1] == 6 # labels if no conf column
  188. conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
  189. if boxes.shape[1]:
  190. if boxes.max() <= 1.01: # if normalized with tolerance 0.01
  191. boxes[[0, 2]] *= w # scale to pixels
  192. boxes[[1, 3]] *= h
  193. elif scale < 1: # absolute coords need scale if image scales
  194. boxes *= scale
  195. boxes[[0, 2]] += x
  196. boxes[[1, 3]] += y
  197. for j, box in enumerate(boxes.T.tolist()):
  198. cls = classes[j]
  199. color = colors(cls)
  200. cls = names[cls] if names else cls
  201. if labels or conf[j] > 0.25: # 0.25 conf thresh
  202. label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
  203. annotator.box_label(box, label, color=color)
  204. annotator.im.save(fname) # save
  205. def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
  206. # Plot LR simulating training for full epochs
  207. optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
  208. y = []
  209. for _ in range(epochs):
  210. scheduler.step()
  211. y.append(optimizer.param_groups[0]['lr'])
  212. plt.plot(y, '.-', label='LR')
  213. plt.xlabel('epoch')
  214. plt.ylabel('LR')
  215. plt.grid()
  216. plt.xlim(0, epochs)
  217. plt.ylim(0)
  218. plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
  219. plt.close()
  220. def plot_val_txt(): # from utils.plots import *; plot_val()
  221. # Plot val.txt histograms
  222. x = np.loadtxt('val.txt', dtype=np.float32)
  223. box = xyxy2xywh(x[:, :4])
  224. cx, cy = box[:, 0], box[:, 1]
  225. fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
  226. ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
  227. ax.set_aspect('equal')
  228. plt.savefig('hist2d.png', dpi=300)
  229. fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
  230. ax[0].hist(cx, bins=600)
  231. ax[1].hist(cy, bins=600)
  232. plt.savefig('hist1d.png', dpi=200)
  233. def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
  234. # Plot targets.txt histograms
  235. x = np.loadtxt('targets.txt', dtype=np.float32).T
  236. s = ['x targets', 'y targets', 'width targets', 'height targets']
  237. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  238. ax = ax.ravel()
  239. for i in range(4):
  240. ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')
  241. ax[i].legend()
  242. ax[i].set_title(s[i])
  243. plt.savefig('targets.jpg', dpi=200)
  244. def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()
  245. # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)
  246. save_dir = Path(file).parent if file else Path(dir)
  247. plot2 = False # plot additional results
  248. if plot2:
  249. ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
  250. fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
  251. # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
  252. for f in sorted(save_dir.glob('study*.txt')):
  253. y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
  254. x = np.arange(y.shape[1]) if x is None else np.array(x)
  255. if plot2:
  256. s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']
  257. for i in range(7):
  258. ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
  259. ax[i].set_title(s[i])
  260. j = y[3].argmax() + 1
  261. ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
  262. label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
  263. ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
  264. 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
  265. ax2.grid(alpha=0.2)
  266. ax2.set_yticks(np.arange(20, 60, 5))
  267. ax2.set_xlim(0, 57)
  268. ax2.set_ylim(25, 55)
  269. ax2.set_xlabel('GPU Speed (ms/img)')
  270. ax2.set_ylabel('COCO AP val')
  271. ax2.legend(loc='lower right')
  272. f = save_dir / 'study.png'
  273. print(f'Saving {f}...')
  274. plt.savefig(f, dpi=300)
  275. @try_except # known issue https://github.com/ultralytics/yolov5/issues/5395
  276. @Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611
  277. def plot_labels(labels, names=(), save_dir=Path('')):
  278. # plot dataset labels
  279. LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ")
  280. c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
  281. nc = int(c.max() + 1) # number of classes
  282. x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
  283. # seaborn correlogram
  284. sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
  285. plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
  286. plt.close()
  287. # matplotlib labels
  288. matplotlib.use('svg') # faster
  289. ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
  290. y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
  291. # [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195
  292. ax[0].set_ylabel('instances')
  293. if 0 < len(names) < 30:
  294. ax[0].set_xticks(range(len(names)))
  295. ax[0].set_xticklabels(names, rotation=90, fontsize=10)
  296. else:
  297. ax[0].set_xlabel('classes')
  298. sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
  299. sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
  300. # rectangles
  301. labels[:, 1:3] = 0.5 # center
  302. labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
  303. img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
  304. for cls, *box in labels[:1000]:
  305. ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
  306. ax[1].imshow(img)
  307. ax[1].axis('off')
  308. for a in [0, 1, 2, 3]:
  309. for s in ['top', 'right', 'left', 'bottom']:
  310. ax[a].spines[s].set_visible(False)
  311. plt.savefig(save_dir / 'labels.jpg', dpi=200)
  312. matplotlib.use('Agg')
  313. plt.close()
  314. def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()
  315. # Plot evolve.csv hyp evolution results
  316. evolve_csv = Path(evolve_csv)
  317. data = pd.read_csv(evolve_csv)
  318. keys = [x.strip() for x in data.columns]
  319. x = data.values
  320. f = fitness(x)
  321. j = np.argmax(f) # max fitness index
  322. plt.figure(figsize=(10, 12), tight_layout=True)
  323. matplotlib.rc('font', **{'size': 8})
  324. for i, k in enumerate(keys[7:]):
  325. v = x[:, 7 + i]
  326. mu = v[j] # best single result
  327. plt.subplot(6, 5, i + 1)
  328. plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
  329. plt.plot(mu, f.max(), 'k+', markersize=15)
  330. plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters
  331. if i % 5 != 0:
  332. plt.yticks([])
  333. print(f'{k:>15}: {mu:.3g}')
  334. f = evolve_csv.with_suffix('.png') # filename
  335. plt.savefig(f, dpi=200)
  336. plt.close()
  337. print(f'Saved {f}')
  338. def plot_results(file='path/to/results.csv', dir=''):
  339. # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
  340. save_dir = Path(file).parent if file else Path(dir)
  341. fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
  342. ax = ax.ravel()
  343. files = list(save_dir.glob('results*.csv'))
  344. assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'
  345. for fi, f in enumerate(files):
  346. try:
  347. data = pd.read_csv(f)
  348. s = [x.strip() for x in data.columns]
  349. x = data.values[:, 0]
  350. for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
  351. y = data.values[:, j]
  352. # y[y == 0] = np.nan # don't show zero values
  353. ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)
  354. ax[i].set_title(s[j], fontsize=12)
  355. # if j in [8, 9, 10]: # share train and val loss y axes
  356. # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
  357. except Exception as e:
  358. print(f'Warning: Plotting error for {f}: {e}')
  359. ax[1].legend()
  360. fig.savefig(save_dir / 'results.png', dpi=200)
  361. plt.close()
  362. def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
  363. # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
  364. ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
  365. s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
  366. files = list(Path(save_dir).glob('frames*.txt'))
  367. for fi, f in enumerate(files):
  368. try:
  369. results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
  370. n = results.shape[1] # number of rows
  371. x = np.arange(start, min(stop, n) if stop else n)
  372. results = results[:, x]
  373. t = (results[0] - results[0].min()) # set t0=0s
  374. results[0] = x
  375. for i, a in enumerate(ax):
  376. if i < len(results):
  377. label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
  378. a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
  379. a.set_title(s[i])
  380. a.set_xlabel('time (s)')
  381. # if fi == len(files) - 1:
  382. # a.set_ylim(bottom=0)
  383. for side in ['top', 'right']:
  384. a.spines[side].set_visible(False)
  385. else:
  386. a.remove()
  387. except Exception as e:
  388. print(f'Warning: Plotting error for {f}; {e}')
  389. ax[1].legend()
  390. plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
  391. def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):
  392. # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
  393. xyxy = torch.tensor(xyxy).view(-1, 4)
  394. b = xyxy2xywh(xyxy) # boxes
  395. if square:
  396. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
  397. b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
  398. xyxy = xywh2xyxy(b).long()
  399. clip_coords(xyxy, im.shape)
  400. crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
  401. if save:
  402. file.parent.mkdir(parents=True, exist_ok=True) # make directory
  403. cv2.imwrite(str(increment_path(file).with_suffix('.jpg')), crop)
  404. return crop