選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

1003 行
42KB

  1. # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
  2. """
  3. Dataloaders and dataset utils
  4. """
  5. import glob
  6. import hashlib
  7. import json
  8. import logging
  9. import os
  10. import random
  11. import shutil
  12. import time
  13. from itertools import repeat
  14. from multiprocessing.pool import ThreadPool, Pool
  15. from pathlib import Path
  16. from threading import Thread
  17. from zipfile import ZipFile
  18. import cv2
  19. import numpy as np
  20. import torch
  21. import torch.nn.functional as F
  22. import yaml
  23. from PIL import Image, ExifTags
  24. from torch.utils.data import Dataset
  25. from tqdm import tqdm
  26. from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
  27. from utils.general import check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, \
  28. xywh2xyxy, xywhn2xyxy, xyxy2xywhn, xyn2xy
  29. from utils.torch_utils import torch_distributed_zero_first
  30. # Parameters
  31. HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
  32. IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
  33. VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
  34. NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads
  35. # Get orientation exif tag
  36. for orientation in ExifTags.TAGS.keys():
  37. if ExifTags.TAGS[orientation] == 'Orientation':
  38. break
  39. def get_hash(paths):
  40. # Returns a single hash value of a list of paths (files or dirs)
  41. size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
  42. h = hashlib.md5(str(size).encode()) # hash sizes
  43. h.update(''.join(paths).encode()) # hash paths
  44. return h.hexdigest() # return hash
  45. def exif_size(img):
  46. # Returns exif-corrected PIL size
  47. s = img.size # (width, height)
  48. try:
  49. rotation = dict(img._getexif().items())[orientation]
  50. if rotation == 6: # rotation 270
  51. s = (s[1], s[0])
  52. elif rotation == 8: # rotation 90
  53. s = (s[1], s[0])
  54. except:
  55. pass
  56. return s
  57. def exif_transpose(image):
  58. """
  59. Transpose a PIL image accordingly if it has an EXIF Orientation tag.
  60. From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py
  61. :param image: The image to transpose.
  62. :return: An image.
  63. """
  64. exif = image.getexif()
  65. orientation = exif.get(0x0112, 1) # default 1
  66. if orientation > 1:
  67. method = {2: Image.FLIP_LEFT_RIGHT,
  68. 3: Image.ROTATE_180,
  69. 4: Image.FLIP_TOP_BOTTOM,
  70. 5: Image.TRANSPOSE,
  71. 6: Image.ROTATE_270,
  72. 7: Image.TRANSVERSE,
  73. 8: Image.ROTATE_90,
  74. }.get(orientation)
  75. if method is not None:
  76. image = image.transpose(method)
  77. del exif[0x0112]
  78. image.info["exif"] = exif.tobytes()
  79. return image
  80. def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
  81. rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):
  82. # Make sure only the first process in DDP process the dataset first, and the following others can use the cache
  83. with torch_distributed_zero_first(rank):
  84. dataset = LoadImagesAndLabels(path, imgsz, batch_size,
  85. augment=augment, # augment images
  86. hyp=hyp, # augmentation hyperparameters
  87. rect=rect, # rectangular training
  88. cache_images=cache,
  89. single_cls=single_cls,
  90. stride=int(stride),
  91. pad=pad,
  92. image_weights=image_weights,
  93. prefix=prefix)
  94. batch_size = min(batch_size, len(dataset))
  95. nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers
  96. sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
  97. loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
  98. # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
  99. dataloader = loader(dataset,
  100. batch_size=batch_size,
  101. num_workers=nw,
  102. sampler=sampler,
  103. pin_memory=True,
  104. collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
  105. return dataloader, dataset
  106. class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
  107. """ Dataloader that reuses workers
  108. Uses same syntax as vanilla DataLoader
  109. """
  110. def __init__(self, *args, **kwargs):
  111. super().__init__(*args, **kwargs)
  112. object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
  113. self.iterator = super().__iter__()
  114. def __len__(self):
  115. return len(self.batch_sampler.sampler)
  116. def __iter__(self):
  117. for i in range(len(self)):
  118. yield next(self.iterator)
  119. class _RepeatSampler(object):
  120. """ Sampler that repeats forever
  121. Args:
  122. sampler (Sampler)
  123. """
  124. def __init__(self, sampler):
  125. self.sampler = sampler
  126. def __iter__(self):
  127. while True:
  128. yield from iter(self.sampler)
  129. class LoadImages: # for inference
  130. def __init__(self, path, img_size=640, stride=32, auto=True):
  131. p = str(Path(path).resolve()) # os-agnostic absolute path
  132. if '*' in p:
  133. files = sorted(glob.glob(p, recursive=True)) # glob
  134. elif os.path.isdir(p):
  135. files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
  136. elif os.path.isfile(p):
  137. files = [p] # files
  138. else:
  139. raise Exception(f'ERROR: {p} does not exist')
  140. images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
  141. videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
  142. ni, nv = len(images), len(videos)
  143. self.img_size = img_size
  144. self.stride = stride
  145. self.files = images + videos
  146. self.nf = ni + nv # number of files
  147. self.video_flag = [False] * ni + [True] * nv
  148. self.mode = 'image'
  149. self.auto = auto
  150. if any(videos):
  151. self.new_video(videos[0]) # new video
  152. else:
  153. self.cap = None
  154. assert self.nf > 0, f'No images or videos found in {p}. ' \
  155. f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
  156. def __iter__(self):
  157. self.count = 0
  158. return self
  159. def __next__(self):
  160. if self.count == self.nf:
  161. raise StopIteration
  162. path = self.files[self.count]
  163. if self.video_flag[self.count]:
  164. # Read video
  165. self.mode = 'video'
  166. ret_val, img0 = self.cap.read()
  167. if not ret_val:
  168. self.count += 1
  169. self.cap.release()
  170. if self.count == self.nf: # last video
  171. raise StopIteration
  172. else:
  173. path = self.files[self.count]
  174. self.new_video(path)
  175. ret_val, img0 = self.cap.read()
  176. self.frame += 1
  177. print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')
  178. else:
  179. # Read image
  180. self.count += 1
  181. img0 = cv2.imread(path) # BGR
  182. assert img0 is not None, 'Image Not Found ' + path
  183. print(f'image {self.count}/{self.nf} {path}: ', end='')
  184. # Padded resize
  185. img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
  186. # Convert
  187. img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
  188. img = np.ascontiguousarray(img)
  189. return path, img, img0, self.cap
  190. def new_video(self, path):
  191. self.frame = 0
  192. self.cap = cv2.VideoCapture(path)
  193. self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
  194. def __len__(self):
  195. return self.nf # number of files
  196. class LoadWebcam: # for inference
  197. def __init__(self, pipe='0', img_size=640, stride=32):
  198. self.img_size = img_size
  199. self.stride = stride
  200. self.pipe = eval(pipe) if pipe.isnumeric() else pipe
  201. self.cap = cv2.VideoCapture(self.pipe) # video capture object
  202. self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
  203. def __iter__(self):
  204. self.count = -1
  205. return self
  206. def __next__(self):
  207. self.count += 1
  208. if cv2.waitKey(1) == ord('q'): # q to quit
  209. self.cap.release()
  210. cv2.destroyAllWindows()
  211. raise StopIteration
  212. # Read frame
  213. ret_val, img0 = self.cap.read()
  214. img0 = cv2.flip(img0, 1) # flip left-right
  215. # Print
  216. assert ret_val, f'Camera Error {self.pipe}'
  217. img_path = 'webcam.jpg'
  218. print(f'webcam {self.count}: ', end='')
  219. # Padded resize
  220. img = letterbox(img0, self.img_size, stride=self.stride)[0]
  221. # Convert
  222. img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
  223. img = np.ascontiguousarray(img)
  224. return img_path, img, img0, None
  225. def __len__(self):
  226. return 0
  227. class LoadStreams: # multiple IP or RTSP cameras
  228. def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
  229. self.mode = 'stream'
  230. self.img_size = img_size
  231. self.stride = stride
  232. if os.path.isfile(sources):
  233. with open(sources, 'r') as f:
  234. sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
  235. else:
  236. sources = [sources]
  237. n = len(sources)
  238. self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
  239. self.sources = [clean_str(x) for x in sources] # clean source names for later
  240. self.auto = auto
  241. for i, s in enumerate(sources): # index, source
  242. # Start thread to read frames from video stream
  243. print(f'{i + 1}/{n}: {s}... ', end='')
  244. if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
  245. check_requirements(('pafy', 'youtube_dl'))
  246. import pafy
  247. s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
  248. s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
  249. cap = cv2.VideoCapture(s)
  250. assert cap.isOpened(), f'Failed to open {s}'
  251. w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  252. h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  253. self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
  254. self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
  255. _, self.imgs[i] = cap.read() # guarantee first frame
  256. self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True)
  257. print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
  258. self.threads[i].start()
  259. print('') # newline
  260. # check for common shapes
  261. s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
  262. self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
  263. if not self.rect:
  264. print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
  265. def update(self, i, cap):
  266. # Read stream `i` frames in daemon thread
  267. n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
  268. while cap.isOpened() and n < f:
  269. n += 1
  270. # _, self.imgs[index] = cap.read()
  271. cap.grab()
  272. if n % read == 0:
  273. success, im = cap.retrieve()
  274. self.imgs[i] = im if success else self.imgs[i] * 0
  275. time.sleep(1 / self.fps[i]) # wait time
  276. def __iter__(self):
  277. self.count = -1
  278. return self
  279. def __next__(self):
  280. self.count += 1
  281. if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
  282. cv2.destroyAllWindows()
  283. raise StopIteration
  284. # Letterbox
  285. img0 = self.imgs.copy()
  286. img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
  287. # Stack
  288. img = np.stack(img, 0)
  289. # Convert
  290. img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
  291. img = np.ascontiguousarray(img)
  292. return self.sources, img, img0, None
  293. def __len__(self):
  294. return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
  295. def img2label_paths(img_paths):
  296. # Define label paths as a function of image paths
  297. sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
  298. return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
  299. class LoadImagesAndLabels(Dataset): # for training/testing
  300. cache_version = 0.5 # dataset labels *.cache version
  301. def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
  302. cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
  303. self.img_size = img_size
  304. self.augment = augment
  305. self.hyp = hyp
  306. self.image_weights = image_weights
  307. self.rect = False if image_weights else rect
  308. self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
  309. self.mosaic_border = [-img_size // 2, -img_size // 2]
  310. self.stride = stride
  311. self.path = path
  312. self.albumentations = Albumentations() if augment else None
  313. try:
  314. f = [] # image files
  315. for p in path if isinstance(path, list) else [path]:
  316. p = Path(p) # os-agnostic
  317. if p.is_dir(): # dir
  318. f += glob.glob(str(p / '**' / '*.*'), recursive=True)
  319. # f = list(p.rglob('**/*.*')) # pathlib
  320. elif p.is_file(): # file
  321. with open(p, 'r') as t:
  322. t = t.read().strip().splitlines()
  323. parent = str(p.parent) + os.sep
  324. f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
  325. # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
  326. else:
  327. raise Exception(f'{prefix}{p} does not exist')
  328. self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS])
  329. # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
  330. assert self.img_files, f'{prefix}No images found'
  331. except Exception as e:
  332. raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
  333. # Check cache
  334. self.label_files = img2label_paths(self.img_files) # labels
  335. cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
  336. try:
  337. cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
  338. assert cache['version'] == self.cache_version # same version
  339. assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash
  340. except:
  341. cache, exists = self.cache_labels(cache_path, prefix), False # cache
  342. # Display cache
  343. nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
  344. if exists:
  345. d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
  346. tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
  347. if cache['msgs']:
  348. logging.info('\n'.join(cache['msgs'])) # display warnings
  349. assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
  350. # Read cache
  351. [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
  352. labels, shapes, self.segments = zip(*cache.values())
  353. self.labels = list(labels)
  354. self.shapes = np.array(shapes, dtype=np.float64)
  355. self.img_files = list(cache.keys()) # update
  356. self.label_files = img2label_paths(cache.keys()) # update
  357. if single_cls:
  358. for x in self.labels:
  359. x[:, 0] = 0
  360. n = len(shapes) # number of images
  361. bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
  362. nb = bi[-1] + 1 # number of batches
  363. self.batch = bi # batch index of image
  364. self.n = n
  365. self.indices = range(n)
  366. # Rectangular Training
  367. if self.rect:
  368. # Sort by aspect ratio
  369. s = self.shapes # wh
  370. ar = s[:, 1] / s[:, 0] # aspect ratio
  371. irect = ar.argsort()
  372. self.img_files = [self.img_files[i] for i in irect]
  373. self.label_files = [self.label_files[i] for i in irect]
  374. self.labels = [self.labels[i] for i in irect]
  375. self.shapes = s[irect] # wh
  376. ar = ar[irect]
  377. # Set training image shapes
  378. shapes = [[1, 1]] * nb
  379. for i in range(nb):
  380. ari = ar[bi == i]
  381. mini, maxi = ari.min(), ari.max()
  382. if maxi < 1:
  383. shapes[i] = [maxi, 1]
  384. elif mini > 1:
  385. shapes[i] = [1, 1 / mini]
  386. self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
  387. # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
  388. self.imgs, self.img_npy = [None] * n, [None] * n
  389. if cache_images:
  390. if cache_images == 'disk':
  391. self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
  392. self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
  393. self.im_cache_dir.mkdir(parents=True, exist_ok=True)
  394. gb = 0 # Gigabytes of cached images
  395. self.img_hw0, self.img_hw = [None] * n, [None] * n
  396. results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
  397. pbar = tqdm(enumerate(results), total=n)
  398. for i, x in pbar:
  399. if cache_images == 'disk':
  400. if not self.img_npy[i].exists():
  401. np.save(self.img_npy[i].as_posix(), x[0])
  402. gb += self.img_npy[i].stat().st_size
  403. else:
  404. self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
  405. gb += self.imgs[i].nbytes
  406. pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
  407. pbar.close()
  408. def cache_labels(self, path=Path('./labels.cache'), prefix=''):
  409. # Cache dataset labels, check images and read shapes
  410. x = {} # dict
  411. nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
  412. desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
  413. with Pool(NUM_THREADS) as pool:
  414. pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
  415. desc=desc, total=len(self.img_files))
  416. for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
  417. nm += nm_f
  418. nf += nf_f
  419. ne += ne_f
  420. nc += nc_f
  421. if im_file:
  422. x[im_file] = [l, shape, segments]
  423. if msg:
  424. msgs.append(msg)
  425. pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
  426. pbar.close()
  427. if msgs:
  428. logging.info('\n'.join(msgs))
  429. if nf == 0:
  430. logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
  431. x['hash'] = get_hash(self.label_files + self.img_files)
  432. x['results'] = nf, nm, ne, nc, len(self.img_files)
  433. x['msgs'] = msgs # warnings
  434. x['version'] = self.cache_version # cache version
  435. try:
  436. np.save(path, x) # save cache for next time
  437. path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
  438. logging.info(f'{prefix}New cache created: {path}')
  439. except Exception as e:
  440. logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
  441. return x
  442. def __len__(self):
  443. return len(self.img_files)
  444. # def __iter__(self):
  445. # self.count = -1
  446. # print('ran dataset iter')
  447. # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
  448. # return self
  449. def __getitem__(self, index):
  450. index = self.indices[index] # linear, shuffled, or image_weights
  451. hyp = self.hyp
  452. mosaic = self.mosaic and random.random() < hyp['mosaic']
  453. if mosaic:
  454. # Load mosaic
  455. img, labels = load_mosaic(self, index)
  456. shapes = None
  457. # MixUp augmentation
  458. if random.random() < hyp['mixup']:
  459. img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
  460. else:
  461. # Load image
  462. img, (h0, w0), (h, w) = load_image(self, index)
  463. # Letterbox
  464. shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
  465. img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
  466. shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
  467. labels = self.labels[index].copy()
  468. if labels.size: # normalized xywh to pixel xyxy format
  469. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
  470. if self.augment:
  471. img, labels = random_perspective(img, labels,
  472. degrees=hyp['degrees'],
  473. translate=hyp['translate'],
  474. scale=hyp['scale'],
  475. shear=hyp['shear'],
  476. perspective=hyp['perspective'])
  477. nl = len(labels) # number of labels
  478. if nl:
  479. labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
  480. if self.augment:
  481. # Albumentations
  482. img, labels = self.albumentations(img, labels)
  483. nl = len(labels) # update after albumentations
  484. # HSV color-space
  485. augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
  486. # Flip up-down
  487. if random.random() < hyp['flipud']:
  488. img = np.flipud(img)
  489. if nl:
  490. labels[:, 2] = 1 - labels[:, 2]
  491. # Flip left-right
  492. if random.random() < hyp['fliplr']:
  493. img = np.fliplr(img)
  494. if nl:
  495. labels[:, 1] = 1 - labels[:, 1]
  496. # Cutouts
  497. # labels = cutout(img, labels, p=0.5)
  498. labels_out = torch.zeros((nl, 6))
  499. if nl:
  500. labels_out[:, 1:] = torch.from_numpy(labels)
  501. # Convert
  502. img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
  503. img = np.ascontiguousarray(img)
  504. return torch.from_numpy(img), labels_out, self.img_files[index], shapes
  505. @staticmethod
  506. def collate_fn(batch):
  507. img, label, path, shapes = zip(*batch) # transposed
  508. for i, l in enumerate(label):
  509. l[:, 0] = i # add target image index for build_targets()
  510. return torch.stack(img, 0), torch.cat(label, 0), path, shapes
  511. @staticmethod
  512. def collate_fn4(batch):
  513. img, label, path, shapes = zip(*batch) # transposed
  514. n = len(shapes) // 4
  515. img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
  516. ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
  517. wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
  518. s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
  519. for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
  520. i *= 4
  521. if random.random() < 0.5:
  522. im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
  523. 0].type(img[i].type())
  524. l = label[i]
  525. else:
  526. im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
  527. l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
  528. img4.append(im)
  529. label4.append(l)
  530. for i, l in enumerate(label4):
  531. l[:, 0] = i # add target image index for build_targets()
  532. return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
  533. # Ancillary functions --------------------------------------------------------------------------------------------------
  534. def load_image(self, i):
  535. # loads 1 image from dataset index 'i', returns im, original hw, resized hw
  536. im = self.imgs[i]
  537. if im is None: # not cached in ram
  538. npy = self.img_npy[i]
  539. if npy and npy.exists(): # load npy
  540. im = np.load(npy)
  541. else: # read image
  542. path = self.img_files[i]
  543. im = cv2.imread(path) # BGR
  544. assert im is not None, 'Image Not Found ' + path
  545. h0, w0 = im.shape[:2] # orig hw
  546. r = self.img_size / max(h0, w0) # ratio
  547. if r != 1: # if sizes are not equal
  548. im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
  549. interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
  550. return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
  551. else:
  552. return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized
  553. def load_mosaic(self, index):
  554. # loads images in a 4-mosaic
  555. labels4, segments4 = [], []
  556. s = self.img_size
  557. yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
  558. indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
  559. random.shuffle(indices)
  560. for i, index in enumerate(indices):
  561. # Load image
  562. img, _, (h, w) = load_image(self, index)
  563. # place img in img4
  564. if i == 0: # top left
  565. img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
  566. x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
  567. x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
  568. elif i == 1: # top right
  569. x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
  570. x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
  571. elif i == 2: # bottom left
  572. x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
  573. x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
  574. elif i == 3: # bottom right
  575. x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
  576. x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
  577. img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
  578. padw = x1a - x1b
  579. padh = y1a - y1b
  580. # Labels
  581. labels, segments = self.labels[index].copy(), self.segments[index].copy()
  582. if labels.size:
  583. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
  584. segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
  585. labels4.append(labels)
  586. segments4.extend(segments)
  587. # Concat/clip labels
  588. labels4 = np.concatenate(labels4, 0)
  589. for x in (labels4[:, 1:], *segments4):
  590. np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
  591. # img4, labels4 = replicate(img4, labels4) # replicate
  592. # Augment
  593. img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
  594. img4, labels4 = random_perspective(img4, labels4, segments4,
  595. degrees=self.hyp['degrees'],
  596. translate=self.hyp['translate'],
  597. scale=self.hyp['scale'],
  598. shear=self.hyp['shear'],
  599. perspective=self.hyp['perspective'],
  600. border=self.mosaic_border) # border to remove
  601. return img4, labels4
  602. def load_mosaic9(self, index):
  603. # loads images in a 9-mosaic
  604. labels9, segments9 = [], []
  605. s = self.img_size
  606. indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
  607. random.shuffle(indices)
  608. for i, index in enumerate(indices):
  609. # Load image
  610. img, _, (h, w) = load_image(self, index)
  611. # place img in img9
  612. if i == 0: # center
  613. img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
  614. h0, w0 = h, w
  615. c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
  616. elif i == 1: # top
  617. c = s, s - h, s + w, s
  618. elif i == 2: # top right
  619. c = s + wp, s - h, s + wp + w, s
  620. elif i == 3: # right
  621. c = s + w0, s, s + w0 + w, s + h
  622. elif i == 4: # bottom right
  623. c = s + w0, s + hp, s + w0 + w, s + hp + h
  624. elif i == 5: # bottom
  625. c = s + w0 - w, s + h0, s + w0, s + h0 + h
  626. elif i == 6: # bottom left
  627. c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
  628. elif i == 7: # left
  629. c = s - w, s + h0 - h, s, s + h0
  630. elif i == 8: # top left
  631. c = s - w, s + h0 - hp - h, s, s + h0 - hp
  632. padx, pady = c[:2]
  633. x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
  634. # Labels
  635. labels, segments = self.labels[index].copy(), self.segments[index].copy()
  636. if labels.size:
  637. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
  638. segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
  639. labels9.append(labels)
  640. segments9.extend(segments)
  641. # Image
  642. img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
  643. hp, wp = h, w # height, width previous
  644. # Offset
  645. yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
  646. img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
  647. # Concat/clip labels
  648. labels9 = np.concatenate(labels9, 0)
  649. labels9[:, [1, 3]] -= xc
  650. labels9[:, [2, 4]] -= yc
  651. c = np.array([xc, yc]) # centers
  652. segments9 = [x - c for x in segments9]
  653. for x in (labels9[:, 1:], *segments9):
  654. np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
  655. # img9, labels9 = replicate(img9, labels9) # replicate
  656. # Augment
  657. img9, labels9 = random_perspective(img9, labels9, segments9,
  658. degrees=self.hyp['degrees'],
  659. translate=self.hyp['translate'],
  660. scale=self.hyp['scale'],
  661. shear=self.hyp['shear'],
  662. perspective=self.hyp['perspective'],
  663. border=self.mosaic_border) # border to remove
  664. return img9, labels9
  665. def create_folder(path='./new'):
  666. # Create folder
  667. if os.path.exists(path):
  668. shutil.rmtree(path) # delete output folder
  669. os.makedirs(path) # make new output folder
  670. def flatten_recursive(path='../datasets/coco128'):
  671. # Flatten a recursive directory by bringing all files to top level
  672. new_path = Path(path + '_flat')
  673. create_folder(new_path)
  674. for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
  675. shutil.copyfile(file, new_path / Path(file).name)
  676. def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
  677. # Convert detection dataset into classification dataset, with one directory per class
  678. path = Path(path) # images dir
  679. shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
  680. files = list(path.rglob('*.*'))
  681. n = len(files) # number of files
  682. for im_file in tqdm(files, total=n):
  683. if im_file.suffix[1:] in IMG_FORMATS:
  684. # image
  685. im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
  686. h, w = im.shape[:2]
  687. # labels
  688. lb_file = Path(img2label_paths([str(im_file)])[0])
  689. if Path(lb_file).exists():
  690. with open(lb_file, 'r') as f:
  691. lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
  692. for j, x in enumerate(lb):
  693. c = int(x[0]) # class
  694. f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
  695. if not f.parent.is_dir():
  696. f.parent.mkdir(parents=True)
  697. b = x[1:] * [w, h, w, h] # box
  698. # b[2:] = b[2:].max() # rectangle to square
  699. b[2:] = b[2:] * 1.2 + 3 # pad
  700. b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
  701. b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
  702. b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
  703. assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
  704. def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
  705. """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
  706. Usage: from utils.datasets import *; autosplit()
  707. Arguments
  708. path: Path to images directory
  709. weights: Train, val, test weights (list, tuple)
  710. annotated_only: Only use images with an annotated txt file
  711. """
  712. path = Path(path) # images dir
  713. files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in IMG_FORMATS], []) # image files only
  714. n = len(files) # number of files
  715. random.seed(0) # for reproducibility
  716. indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
  717. txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
  718. [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
  719. print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
  720. for i, img in tqdm(zip(indices, files), total=n):
  721. if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
  722. with open(path.parent / txt[i], 'a') as f:
  723. f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
  724. def verify_image_label(args):
  725. # Verify one image-label pair
  726. im_file, lb_file, prefix = args
  727. nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
  728. try:
  729. # verify images
  730. im = Image.open(im_file)
  731. im.verify() # PIL verify
  732. shape = exif_size(im) # image size
  733. assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
  734. assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
  735. if im.format.lower() in ('jpg', 'jpeg'):
  736. with open(im_file, 'rb') as f:
  737. f.seek(-2, 2)
  738. if f.read() != b'\xff\xd9': # corrupt JPEG
  739. Image.open(im_file).save(im_file, format='JPEG', subsampling=0, quality=100) # re-save image
  740. msg = f'{prefix}WARNING: corrupt JPEG restored and saved {im_file}'
  741. # verify labels
  742. if os.path.isfile(lb_file):
  743. nf = 1 # label found
  744. with open(lb_file, 'r') as f:
  745. l = [x.split() for x in f.read().strip().splitlines() if len(x)]
  746. if any([len(x) > 8 for x in l]): # is segment
  747. classes = np.array([x[0] for x in l], dtype=np.float32)
  748. segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
  749. l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
  750. l = np.array(l, dtype=np.float32)
  751. if len(l):
  752. assert l.shape[1] == 5, 'labels require 5 columns each'
  753. assert (l >= 0).all(), 'negative labels'
  754. assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
  755. assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
  756. else:
  757. ne = 1 # label empty
  758. l = np.zeros((0, 5), dtype=np.float32)
  759. else:
  760. nm = 1 # label missing
  761. l = np.zeros((0, 5), dtype=np.float32)
  762. return im_file, l, shape, segments, nm, nf, ne, nc, msg
  763. except Exception as e:
  764. nc = 1
  765. msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}'
  766. return [None, None, None, None, nm, nf, ne, nc, msg]
  767. def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
  768. """ Return dataset statistics dictionary with images and instances counts per split per class
  769. To run in parent directory: export PYTHONPATH="$PWD/yolov5"
  770. Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
  771. Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')
  772. Arguments
  773. path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
  774. autodownload: Attempt to download dataset if not found locally
  775. verbose: Print stats dictionary
  776. """
  777. def round_labels(labels):
  778. # Update labels to integer class and 6 decimal place floats
  779. return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels]
  780. def unzip(path):
  781. # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
  782. if str(path).endswith('.zip'): # path is data.zip
  783. assert Path(path).is_file(), f'Error unzipping {path}, file not found'
  784. ZipFile(path).extractall(path=path.parent) # unzip
  785. dir = path.with_suffix('') # dataset directory == zip name
  786. return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
  787. else: # path is data.yaml
  788. return False, None, path
  789. def hub_ops(f, max_dim=1920):
  790. # HUB ops for 1 image 'f'
  791. im = Image.open(f)
  792. r = max_dim / max(im.height, im.width) # ratio
  793. if r < 1.0: # image too large
  794. im = im.resize((int(im.width * r), int(im.height * r)))
  795. im.save(im_dir / Path(f).name, quality=75) # save
  796. zipped, data_dir, yaml_path = unzip(Path(path))
  797. with open(check_yaml(yaml_path), errors='ignore') as f:
  798. data = yaml.safe_load(f) # data dict
  799. if zipped:
  800. data['path'] = data_dir # TODO: should this be dir.resolve()?
  801. check_dataset(data, autodownload) # download dataset if missing
  802. hub_dir = Path(data['path'] + ('-hub' if hub else ''))
  803. stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
  804. for split in 'train', 'val', 'test':
  805. if data.get(split) is None:
  806. stats[split] = None # i.e. no test set
  807. continue
  808. x = []
  809. dataset = LoadImagesAndLabels(data[split]) # load dataset
  810. for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
  811. x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
  812. x = np.array(x) # shape(128x80)
  813. stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
  814. 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
  815. 'per_class': (x > 0).sum(0).tolist()},
  816. 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
  817. zip(dataset.img_files, dataset.labels)]}
  818. if hub:
  819. im_dir = hub_dir / 'images'
  820. im_dir.mkdir(parents=True, exist_ok=True)
  821. for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):
  822. pass
  823. # Profile
  824. stats_path = hub_dir / 'stats.json'
  825. if profile:
  826. for _ in range(1):
  827. file = stats_path.with_suffix('.npy')
  828. t1 = time.time()
  829. np.save(file, stats)
  830. t2 = time.time()
  831. x = np.load(file, allow_pickle=True)
  832. print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
  833. file = stats_path.with_suffix('.json')
  834. t1 = time.time()
  835. with open(file, 'w') as f:
  836. json.dump(stats, f) # save stats *.json
  837. t2 = time.time()
  838. with open(file, 'r') as f:
  839. x = json.load(f) # load hyps dict
  840. print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
  841. # Save, print and return
  842. if hub:
  843. print(f'Saving {stats_path.resolve()}...')
  844. with open(stats_path, 'w') as f:
  845. json.dump(stats, f) # save stats.json
  846. if verbose:
  847. print(json.dumps(stats, indent=2, sort_keys=False))
  848. return stats