Nie możesz wybrać więcej, niż 25 tematów Tematy muszą się zaczynać od litery lub cyfry, mogą zawierać myślniki ('-') i mogą mieć do 35 znaków.

489 lines
21KB

  1. # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
  2. """
  3. Plotting utils
  4. """
  5. import math
  6. import os
  7. from copy import copy
  8. from pathlib import Path
  9. from urllib.error import URLError
  10. import cv2
  11. import matplotlib
  12. import matplotlib.pyplot as plt
  13. import numpy as np
  14. import pandas as pd
  15. import seaborn as sn
  16. import torch
  17. from PIL import Image, ImageDraw, ImageFont
  18. from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords,
  19. increment_path, is_ascii, try_except, xywh2xyxy, xyxy2xywh)
  20. from utils.metrics import fitness
  21. # Settings
  22. RANK = int(os.getenv('RANK', -1))
  23. matplotlib.rc('font', **{'size': 11})
  24. matplotlib.use('Agg') # for writing to files only
  25. class Colors:
  26. # Ultralytics color palette https://ultralytics.com/
  27. def __init__(self):
  28. # hex = matplotlib.colors.TABLEAU_COLORS.values()
  29. hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
  30. '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
  31. self.palette = [self.hex2rgb('#' + c) for c in hex]
  32. self.n = len(self.palette)
  33. def __call__(self, i, bgr=False):
  34. c = self.palette[int(i) % self.n]
  35. return (c[2], c[1], c[0]) if bgr else c
  36. @staticmethod
  37. def hex2rgb(h): # rgb order (PIL)
  38. return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
  39. colors = Colors() # create instance for 'from utils.plots import colors'
  40. def check_pil_font(font=FONT, size=10):
  41. # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
  42. font = Path(font)
  43. font = font if font.exists() else (CONFIG_DIR / font.name)
  44. try:
  45. return ImageFont.truetype(str(font) if font.exists() else font.name, size)
  46. except Exception: # download if missing
  47. try:
  48. check_font(font)
  49. return ImageFont.truetype(str(font), size)
  50. except TypeError:
  51. check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374
  52. except URLError: # not online
  53. return ImageFont.load_default()
  54. class Annotator:
  55. # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
  56. def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
  57. assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
  58. non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic
  59. self.pil = pil or non_ascii
  60. if self.pil: # use PIL
  61. self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
  62. self.draw = ImageDraw.Draw(self.im)
  63. self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font,
  64. size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
  65. else: # use cv2
  66. self.im = im
  67. self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
  68. def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
  69. # Add one xyxy box to image with label
  70. if self.pil or not is_ascii(label):
  71. self.draw.rectangle(box, width=self.lw, outline=color) # box
  72. if label:
  73. w, h = self.font.getsize(label) # text width, height
  74. outside = box[1] - h >= 0 # label fits outside box
  75. self.draw.rectangle(
  76. (box[0], box[1] - h if outside else box[1], box[0] + w + 1,
  77. box[1] + 1 if outside else box[1] + h + 1),
  78. fill=color,
  79. )
  80. # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
  81. self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
  82. else: # cv2
  83. p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
  84. cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
  85. if label:
  86. tf = max(self.lw - 1, 1) # font thickness
  87. w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
  88. outside = p1[1] - h - 3 >= 0 # label fits outside box
  89. p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
  90. cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
  91. cv2.putText(self.im,
  92. label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
  93. 0,
  94. self.lw / 3,
  95. txt_color,
  96. thickness=tf,
  97. lineType=cv2.LINE_AA)
  98. def rectangle(self, xy, fill=None, outline=None, width=1):
  99. # Add rectangle to image (PIL-only)
  100. self.draw.rectangle(xy, fill, outline, width)
  101. def text(self, xy, text, txt_color=(255, 255, 255)):
  102. # Add text to image (PIL-only)
  103. w, h = self.font.getsize(text) # text width, height
  104. self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font)
  105. def result(self):
  106. # Return annotated image as array
  107. return np.asarray(self.im)
  108. def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
  109. """
  110. x: Features to be visualized
  111. module_type: Module type
  112. stage: Module stage within model
  113. n: Maximum number of feature maps to plot
  114. save_dir: Directory to save results
  115. """
  116. if 'Detect' not in module_type:
  117. batch, channels, height, width = x.shape # batch, channels, height, width
  118. if height > 1 and width > 1:
  119. f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename
  120. blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
  121. n = min(n, channels) # number of plots
  122. fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
  123. ax = ax.ravel()
  124. plt.subplots_adjust(wspace=0.05, hspace=0.05)
  125. for i in range(n):
  126. ax[i].imshow(blocks[i].squeeze()) # cmap='gray'
  127. ax[i].axis('off')
  128. LOGGER.info(f'Saving {f}... ({n}/{channels})')
  129. plt.savefig(f, dpi=300, bbox_inches='tight')
  130. plt.close()
  131. np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save
  132. def hist2d(x, y, n=100):
  133. # 2d histogram used in labels.png and evolve.png
  134. xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
  135. hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
  136. xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
  137. yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
  138. return np.log(hist[xidx, yidx])
  139. def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  140. from scipy.signal import butter, filtfilt
  141. # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
  142. def butter_lowpass(cutoff, fs, order):
  143. nyq = 0.5 * fs
  144. normal_cutoff = cutoff / nyq
  145. return butter(order, normal_cutoff, btype='low', analog=False)
  146. b, a = butter_lowpass(cutoff, fs, order=order)
  147. return filtfilt(b, a, data) # forward-backward filter
  148. def output_to_target(output):
  149. # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
  150. targets = []
  151. for i, o in enumerate(output):
  152. for *box, conf, cls in o.cpu().numpy():
  153. targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
  154. return np.array(targets)
  155. def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16):
  156. # Plot image grid with labels
  157. if isinstance(images, torch.Tensor):
  158. images = images.cpu().float().numpy()
  159. if isinstance(targets, torch.Tensor):
  160. targets = targets.cpu().numpy()
  161. if np.max(images[0]) <= 1:
  162. images *= 255 # de-normalise (optional)
  163. bs, _, h, w = images.shape # batch size, _, height, width
  164. bs = min(bs, max_subplots) # limit plot images
  165. ns = np.ceil(bs ** 0.5) # number of subplots (square)
  166. # Build Image
  167. mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
  168. for i, im in enumerate(images):
  169. if i == max_subplots: # if last batch has fewer images than we expect
  170. break
  171. x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
  172. im = im.transpose(1, 2, 0)
  173. mosaic[y:y + h, x:x + w, :] = im
  174. # Resize (optional)
  175. scale = max_size / ns / max(h, w)
  176. if scale < 1:
  177. h = math.ceil(scale * h)
  178. w = math.ceil(scale * w)
  179. mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
  180. # Annotate
  181. fs = int((h + w) * ns * 0.01) # font size
  182. annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)
  183. for i in range(i + 1):
  184. x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
  185. annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
  186. if paths:
  187. annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
  188. if len(targets) > 0:
  189. ti = targets[targets[:, 0] == i] # image targets
  190. boxes = xywh2xyxy(ti[:, 2:6]).T
  191. classes = ti[:, 1].astype('int')
  192. labels = ti.shape[1] == 6 # labels if no conf column
  193. conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
  194. if boxes.shape[1]:
  195. if boxes.max() <= 1.01: # if normalized with tolerance 0.01
  196. boxes[[0, 2]] *= w # scale to pixels
  197. boxes[[1, 3]] *= h
  198. elif scale < 1: # absolute coords need scale if image scales
  199. boxes *= scale
  200. boxes[[0, 2]] += x
  201. boxes[[1, 3]] += y
  202. for j, box in enumerate(boxes.T.tolist()):
  203. cls = classes[j]
  204. color = colors(cls)
  205. cls = names[cls] if names else cls
  206. if labels or conf[j] > 0.25: # 0.25 conf thresh
  207. label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
  208. annotator.box_label(box, label, color=color)
  209. annotator.im.save(fname) # save
  210. def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
  211. # Plot LR simulating training for full epochs
  212. optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
  213. y = []
  214. for _ in range(epochs):
  215. scheduler.step()
  216. y.append(optimizer.param_groups[0]['lr'])
  217. plt.plot(y, '.-', label='LR')
  218. plt.xlabel('epoch')
  219. plt.ylabel('LR')
  220. plt.grid()
  221. plt.xlim(0, epochs)
  222. plt.ylim(0)
  223. plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
  224. plt.close()
  225. def plot_val_txt(): # from utils.plots import *; plot_val()
  226. # Plot val.txt histograms
  227. x = np.loadtxt('val.txt', dtype=np.float32)
  228. box = xyxy2xywh(x[:, :4])
  229. cx, cy = box[:, 0], box[:, 1]
  230. fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
  231. ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
  232. ax.set_aspect('equal')
  233. plt.savefig('hist2d.png', dpi=300)
  234. fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
  235. ax[0].hist(cx, bins=600)
  236. ax[1].hist(cy, bins=600)
  237. plt.savefig('hist1d.png', dpi=200)
  238. def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
  239. # Plot targets.txt histograms
  240. x = np.loadtxt('targets.txt', dtype=np.float32).T
  241. s = ['x targets', 'y targets', 'width targets', 'height targets']
  242. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  243. ax = ax.ravel()
  244. for i in range(4):
  245. ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')
  246. ax[i].legend()
  247. ax[i].set_title(s[i])
  248. plt.savefig('targets.jpg', dpi=200)
  249. def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()
  250. # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)
  251. save_dir = Path(file).parent if file else Path(dir)
  252. plot2 = False # plot additional results
  253. if plot2:
  254. ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
  255. fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
  256. # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
  257. for f in sorted(save_dir.glob('study*.txt')):
  258. y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
  259. x = np.arange(y.shape[1]) if x is None else np.array(x)
  260. if plot2:
  261. s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']
  262. for i in range(7):
  263. ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
  264. ax[i].set_title(s[i])
  265. j = y[3].argmax() + 1
  266. ax2.plot(y[5, 1:j],
  267. y[3, 1:j] * 1E2,
  268. '.-',
  269. linewidth=2,
  270. markersize=8,
  271. label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
  272. ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
  273. 'k.-',
  274. linewidth=2,
  275. markersize=8,
  276. alpha=.25,
  277. label='EfficientDet')
  278. ax2.grid(alpha=0.2)
  279. ax2.set_yticks(np.arange(20, 60, 5))
  280. ax2.set_xlim(0, 57)
  281. ax2.set_ylim(25, 55)
  282. ax2.set_xlabel('GPU Speed (ms/img)')
  283. ax2.set_ylabel('COCO AP val')
  284. ax2.legend(loc='lower right')
  285. f = save_dir / 'study.png'
  286. print(f'Saving {f}...')
  287. plt.savefig(f, dpi=300)
  288. @try_except # known issue https://github.com/ultralytics/yolov5/issues/5395
  289. @Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611
  290. def plot_labels(labels, names=(), save_dir=Path('')):
  291. # plot dataset labels
  292. LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ")
  293. c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
  294. nc = int(c.max() + 1) # number of classes
  295. x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
  296. # seaborn correlogram
  297. sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
  298. plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
  299. plt.close()
  300. # matplotlib labels
  301. matplotlib.use('svg') # faster
  302. ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
  303. y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
  304. try: # color histogram bars by class
  305. [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195
  306. except Exception:
  307. pass
  308. ax[0].set_ylabel('instances')
  309. if 0 < len(names) < 30:
  310. ax[0].set_xticks(range(len(names)))
  311. ax[0].set_xticklabels(names, rotation=90, fontsize=10)
  312. else:
  313. ax[0].set_xlabel('classes')
  314. sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
  315. sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
  316. # rectangles
  317. labels[:, 1:3] = 0.5 # center
  318. labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
  319. img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
  320. for cls, *box in labels[:1000]:
  321. ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
  322. ax[1].imshow(img)
  323. ax[1].axis('off')
  324. for a in [0, 1, 2, 3]:
  325. for s in ['top', 'right', 'left', 'bottom']:
  326. ax[a].spines[s].set_visible(False)
  327. plt.savefig(save_dir / 'labels.jpg', dpi=200)
  328. matplotlib.use('Agg')
  329. plt.close()
  330. def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()
  331. # Plot evolve.csv hyp evolution results
  332. evolve_csv = Path(evolve_csv)
  333. data = pd.read_csv(evolve_csv)
  334. keys = [x.strip() for x in data.columns]
  335. x = data.values
  336. f = fitness(x)
  337. j = np.argmax(f) # max fitness index
  338. plt.figure(figsize=(10, 12), tight_layout=True)
  339. matplotlib.rc('font', **{'size': 8})
  340. print(f'Best results from row {j} of {evolve_csv}:')
  341. for i, k in enumerate(keys[7:]):
  342. v = x[:, 7 + i]
  343. mu = v[j] # best single result
  344. plt.subplot(6, 5, i + 1)
  345. plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
  346. plt.plot(mu, f.max(), 'k+', markersize=15)
  347. plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters
  348. if i % 5 != 0:
  349. plt.yticks([])
  350. print(f'{k:>15}: {mu:.3g}')
  351. f = evolve_csv.with_suffix('.png') # filename
  352. plt.savefig(f, dpi=200)
  353. plt.close()
  354. print(f'Saved {f}')
  355. def plot_results(file='path/to/results.csv', dir=''):
  356. # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
  357. save_dir = Path(file).parent if file else Path(dir)
  358. fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
  359. ax = ax.ravel()
  360. files = list(save_dir.glob('results*.csv'))
  361. assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'
  362. for fi, f in enumerate(files):
  363. try:
  364. data = pd.read_csv(f)
  365. s = [x.strip() for x in data.columns]
  366. x = data.values[:, 0]
  367. for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
  368. y = data.values[:, j].astype('float')
  369. # y[y == 0] = np.nan # don't show zero values
  370. ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)
  371. ax[i].set_title(s[j], fontsize=12)
  372. # if j in [8, 9, 10]: # share train and val loss y axes
  373. # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
  374. except Exception as e:
  375. LOGGER.info(f'Warning: Plotting error for {f}: {e}')
  376. ax[1].legend()
  377. fig.savefig(save_dir / 'results.png', dpi=200)
  378. plt.close()
  379. def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
  380. # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
  381. ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
  382. s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
  383. files = list(Path(save_dir).glob('frames*.txt'))
  384. for fi, f in enumerate(files):
  385. try:
  386. results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
  387. n = results.shape[1] # number of rows
  388. x = np.arange(start, min(stop, n) if stop else n)
  389. results = results[:, x]
  390. t = (results[0] - results[0].min()) # set t0=0s
  391. results[0] = x
  392. for i, a in enumerate(ax):
  393. if i < len(results):
  394. label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
  395. a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
  396. a.set_title(s[i])
  397. a.set_xlabel('time (s)')
  398. # if fi == len(files) - 1:
  399. # a.set_ylim(bottom=0)
  400. for side in ['top', 'right']:
  401. a.spines[side].set_visible(False)
  402. else:
  403. a.remove()
  404. except Exception as e:
  405. print(f'Warning: Plotting error for {f}; {e}')
  406. ax[1].legend()
  407. plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
  408. def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True):
  409. # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
  410. xyxy = torch.tensor(xyxy).view(-1, 4)
  411. b = xyxy2xywh(xyxy) # boxes
  412. if square:
  413. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
  414. b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
  415. xyxy = xywh2xyxy(b).long()
  416. clip_coords(xyxy, im.shape)
  417. crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
  418. if save:
  419. file.parent.mkdir(parents=True, exist_ok=True) # make directory
  420. f = str(increment_path(file).with_suffix('.jpg'))
  421. # cv2.imwrite(f, crop) # https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue
  422. Image.fromarray(cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)).save(f, quality=95, subsampling=0)
  423. return crop