選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

1020 行
43KB

  1. # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
  2. """
  3. Dataloaders and dataset utils
  4. """
  5. import glob
  6. import hashlib
  7. import json
  8. import logging
  9. import os
  10. import random
  11. import shutil
  12. import time
  13. from itertools import repeat
  14. from multiprocessing.pool import ThreadPool, Pool
  15. from pathlib import Path
  16. from threading import Thread
  17. from zipfile import ZipFile
  18. import cv2
  19. import numpy as np
  20. import torch
  21. import torch.nn.functional as F
  22. import yaml
  23. from PIL import Image, ExifTags
  24. from torch.utils.data import Dataset
  25. from tqdm import tqdm
  26. from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
  27. from utils.general import check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, \
  28. xywh2xyxy, xywhn2xyxy, xyxy2xywhn, xyn2xy
  29. from utils.torch_utils import torch_distributed_zero_first
  30. # Parameters
  31. HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
  32. IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
  33. VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
  34. NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads
  35. # Get orientation exif tag
  36. for orientation in ExifTags.TAGS.keys():
  37. if ExifTags.TAGS[orientation] == 'Orientation':
  38. break
  39. def get_hash(paths):
  40. # Returns a single hash value of a list of paths (files or dirs)
  41. size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
  42. h = hashlib.md5(str(size).encode()) # hash sizes
  43. h.update(''.join(paths).encode()) # hash paths
  44. return h.hexdigest() # return hash
  45. def exif_size(img):
  46. # Returns exif-corrected PIL size
  47. s = img.size # (width, height)
  48. try:
  49. rotation = dict(img._getexif().items())[orientation]
  50. if rotation == 6: # rotation 270
  51. s = (s[1], s[0])
  52. elif rotation == 8: # rotation 90
  53. s = (s[1], s[0])
  54. except:
  55. pass
  56. return s
  57. def exif_transpose(image):
  58. """
  59. Transpose a PIL image accordingly if it has an EXIF Orientation tag.
  60. From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py
  61. :param image: The image to transpose.
  62. :return: An image.
  63. """
  64. exif = image.getexif()
  65. orientation = exif.get(0x0112, 1) # default 1
  66. if orientation > 1:
  67. method = {2: Image.FLIP_LEFT_RIGHT,
  68. 3: Image.ROTATE_180,
  69. 4: Image.FLIP_TOP_BOTTOM,
  70. 5: Image.TRANSPOSE,
  71. 6: Image.ROTATE_270,
  72. 7: Image.TRANSVERSE,
  73. 8: Image.ROTATE_90,
  74. }.get(orientation)
  75. if method is not None:
  76. image = image.transpose(method)
  77. del exif[0x0112]
  78. image.info["exif"] = exif.tobytes()
  79. return image
  80. def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
  81. rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):
  82. # Make sure only the first process in DDP process the dataset first, and the following others can use the cache
  83. with torch_distributed_zero_first(rank):
  84. dataset = LoadImagesAndLabels(path, imgsz, batch_size,
  85. augment=augment, # augment images
  86. hyp=hyp, # augmentation hyperparameters
  87. rect=rect, # rectangular training
  88. cache_images=cache,
  89. single_cls=single_cls,
  90. stride=int(stride),
  91. pad=pad,
  92. image_weights=image_weights,
  93. prefix=prefix)
  94. batch_size = min(batch_size, len(dataset))
  95. nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers
  96. sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
  97. loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
  98. # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
  99. dataloader = loader(dataset,
  100. batch_size=batch_size,
  101. num_workers=nw,
  102. sampler=sampler,
  103. pin_memory=True,
  104. collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
  105. return dataloader, dataset
  106. class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
  107. """ Dataloader that reuses workers
  108. Uses same syntax as vanilla DataLoader
  109. """
  110. def __init__(self, *args, **kwargs):
  111. super().__init__(*args, **kwargs)
  112. object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
  113. self.iterator = super().__iter__()
  114. def __len__(self):
  115. return len(self.batch_sampler.sampler)
  116. def __iter__(self):
  117. for i in range(len(self)):
  118. yield next(self.iterator)
  119. class _RepeatSampler(object):
  120. """ Sampler that repeats forever
  121. Args:
  122. sampler (Sampler)
  123. """
  124. def __init__(self, sampler):
  125. self.sampler = sampler
  126. def __iter__(self):
  127. while True:
  128. yield from iter(self.sampler)
  129. class LoadImages:
  130. # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
  131. def __init__(self, path, img_size=640, stride=32, auto=True):
  132. p = str(Path(path).resolve()) # os-agnostic absolute path
  133. if '*' in p:
  134. files = sorted(glob.glob(p, recursive=True)) # glob
  135. elif os.path.isdir(p):
  136. files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
  137. elif os.path.isfile(p):
  138. files = [p] # files
  139. else:
  140. raise Exception(f'ERROR: {p} does not exist')
  141. images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
  142. videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
  143. ni, nv = len(images), len(videos)
  144. self.img_size = img_size
  145. self.stride = stride
  146. self.files = images + videos
  147. self.nf = ni + nv # number of files
  148. self.video_flag = [False] * ni + [True] * nv
  149. self.mode = 'image'
  150. self.auto = auto
  151. if any(videos):
  152. self.new_video(videos[0]) # new video
  153. else:
  154. self.cap = None
  155. assert self.nf > 0, f'No images or videos found in {p}. ' \
  156. f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
  157. def __iter__(self):
  158. self.count = 0
  159. return self
  160. def __next__(self):
  161. if self.count == self.nf:
  162. raise StopIteration
  163. path = self.files[self.count]
  164. if self.video_flag[self.count]:
  165. # Read video
  166. self.mode = 'video'
  167. ret_val, img0 = self.cap.read()
  168. if not ret_val:
  169. self.count += 1
  170. self.cap.release()
  171. if self.count == self.nf: # last video
  172. raise StopIteration
  173. else:
  174. path = self.files[self.count]
  175. self.new_video(path)
  176. ret_val, img0 = self.cap.read()
  177. self.frame += 1
  178. print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')
  179. else:
  180. # Read image
  181. self.count += 1
  182. img0 = cv2.imread(path) # BGR
  183. assert img0 is not None, 'Image Not Found ' + path
  184. print(f'image {self.count}/{self.nf} {path}: ', end='')
  185. # Padded resize
  186. img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
  187. # Convert
  188. img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
  189. img = np.ascontiguousarray(img)
  190. return path, img, img0, self.cap
  191. def new_video(self, path):
  192. self.frame = 0
  193. self.cap = cv2.VideoCapture(path)
  194. self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
  195. def __len__(self):
  196. return self.nf # number of files
  197. class LoadWebcam: # for inference
  198. # YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0`
  199. def __init__(self, pipe='0', img_size=640, stride=32):
  200. self.img_size = img_size
  201. self.stride = stride
  202. self.pipe = eval(pipe) if pipe.isnumeric() else pipe
  203. self.cap = cv2.VideoCapture(self.pipe) # video capture object
  204. self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
  205. def __iter__(self):
  206. self.count = -1
  207. return self
  208. def __next__(self):
  209. self.count += 1
  210. if cv2.waitKey(1) == ord('q'): # q to quit
  211. self.cap.release()
  212. cv2.destroyAllWindows()
  213. raise StopIteration
  214. # Read frame
  215. ret_val, img0 = self.cap.read()
  216. img0 = cv2.flip(img0, 1) # flip left-right
  217. # Print
  218. assert ret_val, f'Camera Error {self.pipe}'
  219. img_path = 'webcam.jpg'
  220. print(f'webcam {self.count}: ', end='')
  221. # Padded resize
  222. img = letterbox(img0, self.img_size, stride=self.stride)[0]
  223. # Convert
  224. img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
  225. img = np.ascontiguousarray(img)
  226. return img_path, img, img0, None
  227. def __len__(self):
  228. return 0
  229. class LoadStreams:
  230. # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
  231. def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
  232. self.mode = 'stream'
  233. self.img_size = img_size
  234. self.stride = stride
  235. if os.path.isfile(sources):
  236. with open(sources, 'r') as f:
  237. sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
  238. else:
  239. sources = [sources]
  240. n = len(sources)
  241. self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
  242. self.sources = [clean_str(x) for x in sources] # clean source names for later
  243. self.auto = auto
  244. for i, s in enumerate(sources): # index, source
  245. # Start thread to read frames from video stream
  246. print(f'{i + 1}/{n}: {s}... ', end='')
  247. if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
  248. check_requirements(('pafy', 'youtube_dl'))
  249. import pafy
  250. s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
  251. s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
  252. cap = cv2.VideoCapture(s)
  253. assert cap.isOpened(), f'Failed to open {s}'
  254. w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  255. h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  256. self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
  257. self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
  258. _, self.imgs[i] = cap.read() # guarantee first frame
  259. self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
  260. print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
  261. self.threads[i].start()
  262. print('') # newline
  263. # check for common shapes
  264. s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
  265. self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
  266. if not self.rect:
  267. print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
  268. def update(self, i, cap, stream):
  269. # Read stream `i` frames in daemon thread
  270. n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
  271. while cap.isOpened() and n < f:
  272. n += 1
  273. # _, self.imgs[index] = cap.read()
  274. cap.grab()
  275. if n % read == 0:
  276. success, im = cap.retrieve()
  277. if success:
  278. self.imgs[i] = im
  279. else:
  280. print('WARNING: Video stream unresponsive, please check your IP camera connection.')
  281. self.imgs[i] *= 0
  282. cap.open(stream) # re-open stream if signal was lost
  283. time.sleep(1 / self.fps[i]) # wait time
  284. def __iter__(self):
  285. self.count = -1
  286. return self
  287. def __next__(self):
  288. self.count += 1
  289. if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
  290. cv2.destroyAllWindows()
  291. raise StopIteration
  292. # Letterbox
  293. img0 = self.imgs.copy()
  294. img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
  295. # Stack
  296. img = np.stack(img, 0)
  297. # Convert
  298. img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
  299. img = np.ascontiguousarray(img)
  300. return self.sources, img, img0, None
  301. def __len__(self):
  302. return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
  303. def img2label_paths(img_paths):
  304. # Define label paths as a function of image paths
  305. sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
  306. return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
  307. class LoadImagesAndLabels(Dataset):
  308. # YOLOv5 train_loader/val_loader, loads images and labels for training and validation
  309. cache_version = 0.5 # dataset labels *.cache version
  310. def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
  311. cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
  312. self.img_size = img_size
  313. self.augment = augment
  314. self.hyp = hyp
  315. self.image_weights = image_weights
  316. self.rect = False if image_weights else rect
  317. self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
  318. self.mosaic_border = [-img_size // 2, -img_size // 2]
  319. self.stride = stride
  320. self.path = path
  321. self.albumentations = Albumentations() if augment else None
  322. try:
  323. f = [] # image files
  324. for p in path if isinstance(path, list) else [path]:
  325. p = Path(p) # os-agnostic
  326. if p.is_dir(): # dir
  327. f += glob.glob(str(p / '**' / '*.*'), recursive=True)
  328. # f = list(p.rglob('**/*.*')) # pathlib
  329. elif p.is_file(): # file
  330. with open(p, 'r') as t:
  331. t = t.read().strip().splitlines()
  332. parent = str(p.parent) + os.sep
  333. f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
  334. # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
  335. else:
  336. raise Exception(f'{prefix}{p} does not exist')
  337. self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS])
  338. # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
  339. assert self.img_files, f'{prefix}No images found'
  340. except Exception as e:
  341. raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
  342. # Check cache
  343. self.label_files = img2label_paths(self.img_files) # labels
  344. cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
  345. try:
  346. cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
  347. assert cache['version'] == self.cache_version # same version
  348. assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash
  349. except:
  350. cache, exists = self.cache_labels(cache_path, prefix), False # cache
  351. # Display cache
  352. nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
  353. if exists:
  354. d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
  355. tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
  356. if cache['msgs']:
  357. logging.info('\n'.join(cache['msgs'])) # display warnings
  358. assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
  359. # Read cache
  360. [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
  361. labels, shapes, self.segments = zip(*cache.values())
  362. self.labels = list(labels)
  363. self.shapes = np.array(shapes, dtype=np.float64)
  364. self.img_files = list(cache.keys()) # update
  365. self.label_files = img2label_paths(cache.keys()) # update
  366. if single_cls:
  367. for x in self.labels:
  368. x[:, 0] = 0
  369. n = len(shapes) # number of images
  370. bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
  371. nb = bi[-1] + 1 # number of batches
  372. self.batch = bi # batch index of image
  373. self.n = n
  374. self.indices = range(n)
  375. # Rectangular Training
  376. if self.rect:
  377. # Sort by aspect ratio
  378. s = self.shapes # wh
  379. ar = s[:, 1] / s[:, 0] # aspect ratio
  380. irect = ar.argsort()
  381. self.img_files = [self.img_files[i] for i in irect]
  382. self.label_files = [self.label_files[i] for i in irect]
  383. self.labels = [self.labels[i] for i in irect]
  384. self.shapes = s[irect] # wh
  385. ar = ar[irect]
  386. # Set training image shapes
  387. shapes = [[1, 1]] * nb
  388. for i in range(nb):
  389. ari = ar[bi == i]
  390. mini, maxi = ari.min(), ari.max()
  391. if maxi < 1:
  392. shapes[i] = [maxi, 1]
  393. elif mini > 1:
  394. shapes[i] = [1, 1 / mini]
  395. self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
  396. # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
  397. self.imgs, self.img_npy = [None] * n, [None] * n
  398. if cache_images:
  399. if cache_images == 'disk':
  400. self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
  401. self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
  402. self.im_cache_dir.mkdir(parents=True, exist_ok=True)
  403. gb = 0 # Gigabytes of cached images
  404. self.img_hw0, self.img_hw = [None] * n, [None] * n
  405. results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
  406. pbar = tqdm(enumerate(results), total=n)
  407. for i, x in pbar:
  408. if cache_images == 'disk':
  409. if not self.img_npy[i].exists():
  410. np.save(self.img_npy[i].as_posix(), x[0])
  411. gb += self.img_npy[i].stat().st_size
  412. else:
  413. self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
  414. gb += self.imgs[i].nbytes
  415. pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
  416. pbar.close()
  417. def cache_labels(self, path=Path('./labels.cache'), prefix=''):
  418. # Cache dataset labels, check images and read shapes
  419. x = {} # dict
  420. nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
  421. desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
  422. with Pool(NUM_THREADS) as pool:
  423. pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
  424. desc=desc, total=len(self.img_files))
  425. for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
  426. nm += nm_f
  427. nf += nf_f
  428. ne += ne_f
  429. nc += nc_f
  430. if im_file:
  431. x[im_file] = [l, shape, segments]
  432. if msg:
  433. msgs.append(msg)
  434. pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
  435. pbar.close()
  436. if msgs:
  437. logging.info('\n'.join(msgs))
  438. if nf == 0:
  439. logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
  440. x['hash'] = get_hash(self.label_files + self.img_files)
  441. x['results'] = nf, nm, ne, nc, len(self.img_files)
  442. x['msgs'] = msgs # warnings
  443. x['version'] = self.cache_version # cache version
  444. try:
  445. np.save(path, x) # save cache for next time
  446. path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
  447. logging.info(f'{prefix}New cache created: {path}')
  448. except Exception as e:
  449. logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
  450. return x
  451. def __len__(self):
  452. return len(self.img_files)
  453. # def __iter__(self):
  454. # self.count = -1
  455. # print('ran dataset iter')
  456. # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
  457. # return self
  458. def __getitem__(self, index):
  459. index = self.indices[index] # linear, shuffled, or image_weights
  460. hyp = self.hyp
  461. mosaic = self.mosaic and random.random() < hyp['mosaic']
  462. if mosaic:
  463. # Load mosaic
  464. img, labels = load_mosaic(self, index)
  465. shapes = None
  466. # MixUp augmentation
  467. if random.random() < hyp['mixup']:
  468. img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
  469. else:
  470. # Load image
  471. img, (h0, w0), (h, w) = load_image(self, index)
  472. # Letterbox
  473. shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
  474. img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
  475. shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
  476. labels = self.labels[index].copy()
  477. if labels.size: # normalized xywh to pixel xyxy format
  478. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
  479. if self.augment:
  480. img, labels = random_perspective(img, labels,
  481. degrees=hyp['degrees'],
  482. translate=hyp['translate'],
  483. scale=hyp['scale'],
  484. shear=hyp['shear'],
  485. perspective=hyp['perspective'])
  486. nl = len(labels) # number of labels
  487. if nl:
  488. labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
  489. if self.augment:
  490. # Albumentations
  491. img, labels = self.albumentations(img, labels)
  492. nl = len(labels) # update after albumentations
  493. # HSV color-space
  494. augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
  495. # Flip up-down
  496. if random.random() < hyp['flipud']:
  497. img = np.flipud(img)
  498. if nl:
  499. labels[:, 2] = 1 - labels[:, 2]
  500. # Flip left-right
  501. if random.random() < hyp['fliplr']:
  502. img = np.fliplr(img)
  503. if nl:
  504. labels[:, 1] = 1 - labels[:, 1]
  505. # Cutouts
  506. # labels = cutout(img, labels, p=0.5)
  507. labels_out = torch.zeros((nl, 6))
  508. if nl:
  509. labels_out[:, 1:] = torch.from_numpy(labels)
  510. # Convert
  511. img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
  512. img = np.ascontiguousarray(img)
  513. return torch.from_numpy(img), labels_out, self.img_files[index], shapes
  514. @staticmethod
  515. def collate_fn(batch):
  516. img, label, path, shapes = zip(*batch) # transposed
  517. for i, l in enumerate(label):
  518. l[:, 0] = i # add target image index for build_targets()
  519. return torch.stack(img, 0), torch.cat(label, 0), path, shapes
  520. @staticmethod
  521. def collate_fn4(batch):
  522. img, label, path, shapes = zip(*batch) # transposed
  523. n = len(shapes) // 4
  524. img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
  525. ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
  526. wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
  527. s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
  528. for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
  529. i *= 4
  530. if random.random() < 0.5:
  531. im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
  532. 0].type(img[i].type())
  533. l = label[i]
  534. else:
  535. im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
  536. l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
  537. img4.append(im)
  538. label4.append(l)
  539. for i, l in enumerate(label4):
  540. l[:, 0] = i # add target image index for build_targets()
  541. return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
  542. # Ancillary functions --------------------------------------------------------------------------------------------------
  543. def load_image(self, i):
  544. # loads 1 image from dataset index 'i', returns im, original hw, resized hw
  545. im = self.imgs[i]
  546. if im is None: # not cached in ram
  547. npy = self.img_npy[i]
  548. if npy and npy.exists(): # load npy
  549. im = np.load(npy)
  550. else: # read image
  551. path = self.img_files[i]
  552. im = cv2.imread(path) # BGR
  553. assert im is not None, 'Image Not Found ' + path
  554. h0, w0 = im.shape[:2] # orig hw
  555. r = self.img_size / max(h0, w0) # ratio
  556. if r != 1: # if sizes are not equal
  557. im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
  558. interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
  559. return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
  560. else:
  561. return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized
  562. def load_mosaic(self, index):
  563. # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
  564. labels4, segments4 = [], []
  565. s = self.img_size
  566. yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
  567. indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
  568. random.shuffle(indices)
  569. for i, index in enumerate(indices):
  570. # Load image
  571. img, _, (h, w) = load_image(self, index)
  572. # place img in img4
  573. if i == 0: # top left
  574. img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
  575. x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
  576. x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
  577. elif i == 1: # top right
  578. x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
  579. x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
  580. elif i == 2: # bottom left
  581. x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
  582. x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
  583. elif i == 3: # bottom right
  584. x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
  585. x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
  586. img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
  587. padw = x1a - x1b
  588. padh = y1a - y1b
  589. # Labels
  590. labels, segments = self.labels[index].copy(), self.segments[index].copy()
  591. if labels.size:
  592. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
  593. segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
  594. labels4.append(labels)
  595. segments4.extend(segments)
  596. # Concat/clip labels
  597. labels4 = np.concatenate(labels4, 0)
  598. for x in (labels4[:, 1:], *segments4):
  599. np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
  600. # img4, labels4 = replicate(img4, labels4) # replicate
  601. # Augment
  602. img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
  603. img4, labels4 = random_perspective(img4, labels4, segments4,
  604. degrees=self.hyp['degrees'],
  605. translate=self.hyp['translate'],
  606. scale=self.hyp['scale'],
  607. shear=self.hyp['shear'],
  608. perspective=self.hyp['perspective'],
  609. border=self.mosaic_border) # border to remove
  610. return img4, labels4
  611. def load_mosaic9(self, index):
  612. # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
  613. labels9, segments9 = [], []
  614. s = self.img_size
  615. indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
  616. random.shuffle(indices)
  617. for i, index in enumerate(indices):
  618. # Load image
  619. img, _, (h, w) = load_image(self, index)
  620. # place img in img9
  621. if i == 0: # center
  622. img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
  623. h0, w0 = h, w
  624. c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
  625. elif i == 1: # top
  626. c = s, s - h, s + w, s
  627. elif i == 2: # top right
  628. c = s + wp, s - h, s + wp + w, s
  629. elif i == 3: # right
  630. c = s + w0, s, s + w0 + w, s + h
  631. elif i == 4: # bottom right
  632. c = s + w0, s + hp, s + w0 + w, s + hp + h
  633. elif i == 5: # bottom
  634. c = s + w0 - w, s + h0, s + w0, s + h0 + h
  635. elif i == 6: # bottom left
  636. c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
  637. elif i == 7: # left
  638. c = s - w, s + h0 - h, s, s + h0
  639. elif i == 8: # top left
  640. c = s - w, s + h0 - hp - h, s, s + h0 - hp
  641. padx, pady = c[:2]
  642. x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
  643. # Labels
  644. labels, segments = self.labels[index].copy(), self.segments[index].copy()
  645. if labels.size:
  646. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
  647. segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
  648. labels9.append(labels)
  649. segments9.extend(segments)
  650. # Image
  651. img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
  652. hp, wp = h, w # height, width previous
  653. # Offset
  654. yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
  655. img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
  656. # Concat/clip labels
  657. labels9 = np.concatenate(labels9, 0)
  658. labels9[:, [1, 3]] -= xc
  659. labels9[:, [2, 4]] -= yc
  660. c = np.array([xc, yc]) # centers
  661. segments9 = [x - c for x in segments9]
  662. for x in (labels9[:, 1:], *segments9):
  663. np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
  664. # img9, labels9 = replicate(img9, labels9) # replicate
  665. # Augment
  666. img9, labels9 = random_perspective(img9, labels9, segments9,
  667. degrees=self.hyp['degrees'],
  668. translate=self.hyp['translate'],
  669. scale=self.hyp['scale'],
  670. shear=self.hyp['shear'],
  671. perspective=self.hyp['perspective'],
  672. border=self.mosaic_border) # border to remove
  673. return img9, labels9
  674. def create_folder(path='./new'):
  675. # Create folder
  676. if os.path.exists(path):
  677. shutil.rmtree(path) # delete output folder
  678. os.makedirs(path) # make new output folder
  679. def flatten_recursive(path='../datasets/coco128'):
  680. # Flatten a recursive directory by bringing all files to top level
  681. new_path = Path(path + '_flat')
  682. create_folder(new_path)
  683. for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
  684. shutil.copyfile(file, new_path / Path(file).name)
  685. def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
  686. # Convert detection dataset into classification dataset, with one directory per class
  687. path = Path(path) # images dir
  688. shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
  689. files = list(path.rglob('*.*'))
  690. n = len(files) # number of files
  691. for im_file in tqdm(files, total=n):
  692. if im_file.suffix[1:] in IMG_FORMATS:
  693. # image
  694. im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
  695. h, w = im.shape[:2]
  696. # labels
  697. lb_file = Path(img2label_paths([str(im_file)])[0])
  698. if Path(lb_file).exists():
  699. with open(lb_file, 'r') as f:
  700. lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
  701. for j, x in enumerate(lb):
  702. c = int(x[0]) # class
  703. f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
  704. if not f.parent.is_dir():
  705. f.parent.mkdir(parents=True)
  706. b = x[1:] * [w, h, w, h] # box
  707. # b[2:] = b[2:].max() # rectangle to square
  708. b[2:] = b[2:] * 1.2 + 3 # pad
  709. b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
  710. b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
  711. b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
  712. assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
  713. def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
  714. """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
  715. Usage: from utils.datasets import *; autosplit()
  716. Arguments
  717. path: Path to images directory
  718. weights: Train, val, test weights (list, tuple)
  719. annotated_only: Only use images with an annotated txt file
  720. """
  721. path = Path(path) # images dir
  722. files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in IMG_FORMATS], []) # image files only
  723. n = len(files) # number of files
  724. random.seed(0) # for reproducibility
  725. indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
  726. txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
  727. [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
  728. print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
  729. for i, img in tqdm(zip(indices, files), total=n):
  730. if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
  731. with open(path.parent / txt[i], 'a') as f:
  732. f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
  733. def verify_image_label(args):
  734. # Verify one image-label pair
  735. im_file, lb_file, prefix = args
  736. nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
  737. try:
  738. # verify images
  739. im = Image.open(im_file)
  740. im.verify() # PIL verify
  741. shape = exif_size(im) # image size
  742. assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
  743. assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
  744. if im.format.lower() in ('jpg', 'jpeg'):
  745. with open(im_file, 'rb') as f:
  746. f.seek(-2, 2)
  747. if f.read() != b'\xff\xd9': # corrupt JPEG
  748. Image.open(im_file).save(im_file, format='JPEG', subsampling=0, quality=100) # re-save image
  749. msg = f'{prefix}WARNING: corrupt JPEG restored and saved {im_file}'
  750. # verify labels
  751. if os.path.isfile(lb_file):
  752. nf = 1 # label found
  753. with open(lb_file, 'r') as f:
  754. l = [x.split() for x in f.read().strip().splitlines() if len(x)]
  755. if any([len(x) > 8 for x in l]): # is segment
  756. classes = np.array([x[0] for x in l], dtype=np.float32)
  757. segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
  758. l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
  759. l = np.array(l, dtype=np.float32)
  760. if len(l):
  761. assert l.shape[1] == 5, 'labels require 5 columns each'
  762. assert (l >= 0).all(), 'negative labels'
  763. assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
  764. assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
  765. else:
  766. ne = 1 # label empty
  767. l = np.zeros((0, 5), dtype=np.float32)
  768. else:
  769. nm = 1 # label missing
  770. l = np.zeros((0, 5), dtype=np.float32)
  771. return im_file, l, shape, segments, nm, nf, ne, nc, msg
  772. except Exception as e:
  773. nc = 1
  774. msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}'
  775. return [None, None, None, None, nm, nf, ne, nc, msg]
  776. def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
  777. """ Return dataset statistics dictionary with images and instances counts per split per class
  778. To run in parent directory: export PYTHONPATH="$PWD/yolov5"
  779. Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
  780. Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')
  781. Arguments
  782. path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
  783. autodownload: Attempt to download dataset if not found locally
  784. verbose: Print stats dictionary
  785. """
  786. def round_labels(labels):
  787. # Update labels to integer class and 6 decimal place floats
  788. return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels]
  789. def unzip(path):
  790. # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
  791. if str(path).endswith('.zip'): # path is data.zip
  792. assert Path(path).is_file(), f'Error unzipping {path}, file not found'
  793. ZipFile(path).extractall(path=path.parent) # unzip
  794. dir = path.with_suffix('') # dataset directory == zip name
  795. return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
  796. else: # path is data.yaml
  797. return False, None, path
  798. def hub_ops(f, max_dim=1920):
  799. # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
  800. f_new = im_dir / Path(f).name # dataset-hub image filename
  801. try: # use PIL
  802. im = Image.open(f)
  803. r = max_dim / max(im.height, im.width) # ratio
  804. if r < 1.0: # image too large
  805. im = im.resize((int(im.width * r), int(im.height * r)))
  806. im.save(f_new, quality=75) # save
  807. except Exception as e: # use OpenCV
  808. print(f'WARNING: HUB ops PIL failure {f}: {e}')
  809. im = cv2.imread(f)
  810. im_height, im_width = im.shape[:2]
  811. r = max_dim / max(im_height, im_width) # ratio
  812. if r < 1.0: # image too large
  813. im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_LINEAR)
  814. cv2.imwrite(str(f_new), im)
  815. zipped, data_dir, yaml_path = unzip(Path(path))
  816. with open(check_yaml(yaml_path), errors='ignore') as f:
  817. data = yaml.safe_load(f) # data dict
  818. if zipped:
  819. data['path'] = data_dir # TODO: should this be dir.resolve()?
  820. check_dataset(data, autodownload) # download dataset if missing
  821. hub_dir = Path(data['path'] + ('-hub' if hub else ''))
  822. stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
  823. for split in 'train', 'val', 'test':
  824. if data.get(split) is None:
  825. stats[split] = None # i.e. no test set
  826. continue
  827. x = []
  828. dataset = LoadImagesAndLabels(data[split]) # load dataset
  829. for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
  830. x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
  831. x = np.array(x) # shape(128x80)
  832. stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
  833. 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
  834. 'per_class': (x > 0).sum(0).tolist()},
  835. 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
  836. zip(dataset.img_files, dataset.labels)]}
  837. if hub:
  838. im_dir = hub_dir / 'images'
  839. im_dir.mkdir(parents=True, exist_ok=True)
  840. for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):
  841. pass
  842. # Profile
  843. stats_path = hub_dir / 'stats.json'
  844. if profile:
  845. for _ in range(1):
  846. file = stats_path.with_suffix('.npy')
  847. t1 = time.time()
  848. np.save(file, stats)
  849. t2 = time.time()
  850. x = np.load(file, allow_pickle=True)
  851. print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
  852. file = stats_path.with_suffix('.json')
  853. t1 = time.time()
  854. with open(file, 'w') as f:
  855. json.dump(stats, f) # save stats *.json
  856. t2 = time.time()
  857. with open(file, 'r') as f:
  858. x = json.load(f) # load hyps dict
  859. print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
  860. # Save, print and return
  861. if hub:
  862. print(f'Saving {stats_path.resolve()}...')
  863. with open(stats_path, 'w') as f:
  864. json.dump(stats, f) # save stats.json
  865. if verbose:
  866. print(json.dumps(stats, indent=2, sort_keys=False))
  867. return stats