Nie możesz wybrać więcej, niż 25 tematów Tematy muszą się zaczynać od litery lub cyfry, mogą zawierać myślniki ('-') i mogą mieć do 35 znaków.

603 lines
30KB

  1. # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
  2. """
  3. Train a YOLOv5 model on a custom dataset
  4. Usage:
  5. $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640
  6. """
  7. import argparse
  8. import logging
  9. import math
  10. import os
  11. import random
  12. import sys
  13. import time
  14. from copy import deepcopy
  15. from pathlib import Path
  16. import numpy as np
  17. import torch
  18. import torch.distributed as dist
  19. import torch.nn as nn
  20. import yaml
  21. from torch.cuda import amp
  22. from torch.nn.parallel import DistributedDataParallel as DDP
  23. from torch.optim import Adam, SGD, lr_scheduler
  24. from tqdm import tqdm
  25. FILE = Path(__file__).absolute()
  26. sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path
  27. import val # for end-of-epoch mAP
  28. from models.experimental import attempt_load
  29. from models.yolo import Model
  30. from utils.autoanchor import check_anchors
  31. from utils.datasets import create_dataloader
  32. from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
  33. strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
  34. check_requirements, print_mutation, set_logging, one_cycle, colorstr, methods
  35. from utils.downloads import attempt_download
  36. from utils.loss import ComputeLoss
  37. from utils.plots import plot_labels, plot_evolve
  38. from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel
  39. from utils.loggers.wandb.wandb_utils import check_wandb_resume
  40. from utils.metrics import fitness
  41. from utils.loggers import Loggers
  42. from utils.callbacks import Callbacks
  43. LOGGER = logging.getLogger(__name__)
  44. LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
  45. RANK = int(os.getenv('RANK', -1))
  46. WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
  47. def train(hyp, # path/to/hyp.yaml or hyp dictionary
  48. opt,
  49. device,
  50. callbacks=Callbacks()
  51. ):
  52. save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \
  53. Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
  54. opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
  55. # Directories
  56. w = save_dir / 'weights' # weights dir
  57. w.mkdir(parents=True, exist_ok=True) # make dir
  58. last, best = w / 'last.pt', w / 'best.pt'
  59. # Hyperparameters
  60. if isinstance(hyp, str):
  61. with open(hyp) as f:
  62. hyp = yaml.safe_load(f) # load hyps dict
  63. LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
  64. # Save run settings
  65. with open(save_dir / 'hyp.yaml', 'w') as f:
  66. yaml.safe_dump(hyp, f, sort_keys=False)
  67. with open(save_dir / 'opt.yaml', 'w') as f:
  68. yaml.safe_dump(vars(opt), f, sort_keys=False)
  69. data_dict = None
  70. # Loggers
  71. if RANK in [-1, 0]:
  72. loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
  73. if loggers.wandb:
  74. data_dict = loggers.wandb.data_dict
  75. if resume:
  76. weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp
  77. # Register actions
  78. for k in methods(loggers):
  79. callbacks.register_action(k, callback=getattr(loggers, k))
  80. # Config
  81. plots = not evolve # create plots
  82. cuda = device.type != 'cpu'
  83. init_seeds(1 + RANK)
  84. with torch_distributed_zero_first(RANK):
  85. data_dict = data_dict or check_dataset(data) # check if None
  86. train_path, val_path = data_dict['train'], data_dict['val']
  87. nc = 1 if single_cls else int(data_dict['nc']) # number of classes
  88. names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
  89. assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check
  90. is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset
  91. # Model
  92. pretrained = weights.endswith('.pt')
  93. if pretrained:
  94. with torch_distributed_zero_first(RANK):
  95. weights = attempt_download(weights) # download if not found locally
  96. ckpt = torch.load(weights, map_location=device) # load checkpoint
  97. model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
  98. exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
  99. csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
  100. csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
  101. model.load_state_dict(csd, strict=False) # load
  102. LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
  103. else:
  104. model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
  105. # Freeze
  106. freeze = [f'model.{x}.' for x in range(freeze)] # layers to freeze
  107. for k, v in model.named_parameters():
  108. v.requires_grad = True # train all layers
  109. if any(x in k for x in freeze):
  110. print(f'freezing {k}')
  111. v.requires_grad = False
  112. # Optimizer
  113. nbs = 64 # nominal batch size
  114. accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
  115. hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
  116. LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}")
  117. g0, g1, g2 = [], [], [] # optimizer parameter groups
  118. for v in model.modules():
  119. if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias
  120. g2.append(v.bias)
  121. if isinstance(v, nn.BatchNorm2d): # weight (no decay)
  122. g0.append(v.weight)
  123. elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay)
  124. g1.append(v.weight)
  125. if opt.adam:
  126. optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
  127. else:
  128. optimizer = SGD(g0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
  129. optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # add g1 with weight_decay
  130. optimizer.add_param_group({'params': g2}) # add g2 (biases)
  131. LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups "
  132. f"{len(g0)} weight, {len(g1)} weight (no decay), {len(g2)} bias")
  133. del g0, g1, g2
  134. # Scheduler
  135. if opt.linear_lr:
  136. lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
  137. else:
  138. lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
  139. scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
  140. # EMA
  141. ema = ModelEMA(model) if RANK in [-1, 0] else None
  142. # Resume
  143. start_epoch, best_fitness = 0, 0.0
  144. if pretrained:
  145. # Optimizer
  146. if ckpt['optimizer'] is not None:
  147. optimizer.load_state_dict(ckpt['optimizer'])
  148. best_fitness = ckpt['best_fitness']
  149. # EMA
  150. if ema and ckpt.get('ema'):
  151. ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
  152. ema.updates = ckpt['updates']
  153. # Epochs
  154. start_epoch = ckpt['epoch'] + 1
  155. if resume:
  156. assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.'
  157. if epochs < start_epoch:
  158. LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.")
  159. epochs += ckpt['epoch'] # finetune additional epochs
  160. del ckpt, csd
  161. # Image sizes
  162. gs = max(int(model.stride.max()), 32) # grid size (max stride)
  163. nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
  164. imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
  165. # DP mode
  166. if cuda and RANK == -1 and torch.cuda.device_count() > 1:
  167. logging.warning('DP not recommended, instead use torch.distributed.run for best DDP Multi-GPU results.\n'
  168. 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
  169. model = torch.nn.DataParallel(model)
  170. # SyncBatchNorm
  171. if opt.sync_bn and cuda and RANK != -1:
  172. model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
  173. LOGGER.info('Using SyncBatchNorm()')
  174. # Trainloader
  175. train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls,
  176. hyp=hyp, augment=True, cache=opt.cache, rect=opt.rect, rank=RANK,
  177. workers=workers, image_weights=opt.image_weights, quad=opt.quad,
  178. prefix=colorstr('train: '))
  179. mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class
  180. nb = len(train_loader) # number of batches
  181. assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
  182. # Process 0
  183. if RANK in [-1, 0]:
  184. val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls,
  185. hyp=hyp, cache=None if noval else opt.cache, rect=True, rank=-1,
  186. workers=workers, pad=0.5,
  187. prefix=colorstr('val: '))[0]
  188. if not resume:
  189. labels = np.concatenate(dataset.labels, 0)
  190. # c = torch.tensor(labels[:, 0]) # classes
  191. # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
  192. # model._initialize_biases(cf.to(device))
  193. if plots:
  194. plot_labels(labels, names, save_dir)
  195. # Anchors
  196. if not opt.noautoanchor:
  197. check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
  198. model.half().float() # pre-reduce anchor precision
  199. callbacks.on_pretrain_routine_end()
  200. # DDP mode
  201. if cuda and RANK != -1:
  202. model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)
  203. # Model parameters
  204. hyp['box'] *= 3. / nl # scale to layers
  205. hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
  206. hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
  207. hyp['label_smoothing'] = opt.label_smoothing
  208. model.nc = nc # attach number of classes to model
  209. model.hyp = hyp # attach hyperparameters to model
  210. model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
  211. model.names = names
  212. # Start training
  213. t0 = time.time()
  214. nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
  215. # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
  216. last_opt_step = -1
  217. maps = np.zeros(nc) # mAP per class
  218. results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
  219. scheduler.last_epoch = start_epoch - 1 # do not move
  220. scaler = amp.GradScaler(enabled=cuda)
  221. compute_loss = ComputeLoss(model) # init loss class
  222. LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
  223. f'Using {train_loader.num_workers} dataloader workers\n'
  224. f'Logging results to {save_dir}\n'
  225. f'Starting training for {epochs} epochs...')
  226. for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
  227. model.train()
  228. # Update image weights (optional)
  229. if opt.image_weights:
  230. # Generate indices
  231. if RANK in [-1, 0]:
  232. cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
  233. iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
  234. dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
  235. # Broadcast if DDP
  236. if RANK != -1:
  237. indices = (torch.tensor(dataset.indices) if RANK == 0 else torch.zeros(dataset.n)).int()
  238. dist.broadcast(indices, 0)
  239. if RANK != 0:
  240. dataset.indices = indices.cpu().numpy()
  241. # Update mosaic border
  242. # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
  243. # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
  244. mloss = torch.zeros(3, device=device) # mean losses
  245. if RANK != -1:
  246. train_loader.sampler.set_epoch(epoch)
  247. pbar = enumerate(train_loader)
  248. LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size'))
  249. if RANK in [-1, 0]:
  250. pbar = tqdm(pbar, total=nb) # progress bar
  251. optimizer.zero_grad()
  252. for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
  253. ni = i + nb * epoch # number integrated batches (since train start)
  254. imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
  255. # Warmup
  256. if ni <= nw:
  257. xi = [0, nw] # x interp
  258. # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
  259. accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
  260. for j, x in enumerate(optimizer.param_groups):
  261. # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
  262. x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
  263. if 'momentum' in x:
  264. x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
  265. # Multi-scale
  266. if opt.multi_scale:
  267. sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
  268. sf = sz / max(imgs.shape[2:]) # scale factor
  269. if sf != 1:
  270. ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
  271. imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
  272. # Forward
  273. with amp.autocast(enabled=cuda):
  274. pred = model(imgs) # forward
  275. loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
  276. if RANK != -1:
  277. loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
  278. if opt.quad:
  279. loss *= 4.
  280. # Backward
  281. scaler.scale(loss).backward()
  282. # Optimize
  283. if ni - last_opt_step >= accumulate:
  284. scaler.step(optimizer) # optimizer.step
  285. scaler.update()
  286. optimizer.zero_grad()
  287. if ema:
  288. ema.update(model)
  289. last_opt_step = ni
  290. # Log
  291. if RANK in [-1, 0]:
  292. mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
  293. mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
  294. pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % (
  295. f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
  296. callbacks.on_train_batch_end(ni, model, imgs, targets, paths, plots)
  297. # end batch ------------------------------------------------------------------------------------------------
  298. # Scheduler
  299. lr = [x['lr'] for x in optimizer.param_groups] # for loggers
  300. scheduler.step()
  301. if RANK in [-1, 0]:
  302. # mAP
  303. callbacks.on_train_epoch_end(epoch=epoch)
  304. ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
  305. final_epoch = epoch + 1 == epochs
  306. if not noval or final_epoch: # Calculate mAP
  307. results, maps, _ = val.run(data_dict,
  308. batch_size=batch_size // WORLD_SIZE * 2,
  309. imgsz=imgsz,
  310. model=ema.ema,
  311. single_cls=single_cls,
  312. dataloader=val_loader,
  313. save_dir=save_dir,
  314. save_json=is_coco and final_epoch,
  315. verbose=nc < 50 and final_epoch,
  316. plots=plots and final_epoch,
  317. callbacks=callbacks,
  318. compute_loss=compute_loss)
  319. # Update best mAP
  320. fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
  321. if fi > best_fitness:
  322. best_fitness = fi
  323. log_vals = list(mloss) + list(results) + lr
  324. callbacks.on_fit_epoch_end(log_vals, epoch, best_fitness, fi)
  325. # Save model
  326. if (not nosave) or (final_epoch and not evolve): # if save
  327. ckpt = {'epoch': epoch,
  328. 'best_fitness': best_fitness,
  329. 'model': deepcopy(de_parallel(model)).half(),
  330. 'ema': deepcopy(ema.ema).half(),
  331. 'updates': ema.updates,
  332. 'optimizer': optimizer.state_dict(),
  333. 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None}
  334. # Save last, best and delete
  335. torch.save(ckpt, last)
  336. if best_fitness == fi:
  337. torch.save(ckpt, best)
  338. del ckpt
  339. callbacks.on_model_save(last, epoch, final_epoch, best_fitness, fi)
  340. # end epoch ----------------------------------------------------------------------------------------------------
  341. # end training -----------------------------------------------------------------------------------------------------
  342. if RANK in [-1, 0]:
  343. LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
  344. if not evolve:
  345. if is_coco: # COCO dataset
  346. for m in [last, best] if best.exists() else [last]: # speed, mAP tests
  347. results, _, _ = val.run(data_dict,
  348. batch_size=batch_size // WORLD_SIZE * 2,
  349. imgsz=imgsz,
  350. model=attempt_load(m, device).half(),
  351. iou_thres=0.7, # NMS IoU threshold for best pycocotools results
  352. single_cls=single_cls,
  353. dataloader=val_loader,
  354. save_dir=save_dir,
  355. save_json=True,
  356. plots=False)
  357. # Strip optimizers
  358. for f in last, best:
  359. if f.exists():
  360. strip_optimizer(f) # strip optimizers
  361. callbacks.on_train_end(last, best, plots, epoch)
  362. LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
  363. torch.cuda.empty_cache()
  364. return results
  365. def parse_opt(known=False):
  366. parser = argparse.ArgumentParser()
  367. parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
  368. parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
  369. parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path')
  370. parser.add_argument('--hyp', type=str, default='data/hyps/hyp.scratch.yaml', help='hyperparameters path')
  371. parser.add_argument('--epochs', type=int, default=300)
  372. parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
  373. parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
  374. parser.add_argument('--rect', action='store_true', help='rectangular training')
  375. parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
  376. parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
  377. parser.add_argument('--noval', action='store_true', help='only validate final epoch')
  378. parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
  379. parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
  380. parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
  381. parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
  382. parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
  383. parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
  384. parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
  385. parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
  386. parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
  387. parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
  388. parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
  389. parser.add_argument('--project', default='runs/train', help='save to project/name')
  390. parser.add_argument('--entity', default=None, help='W&B entity')
  391. parser.add_argument('--name', default='exp', help='save to project/name')
  392. parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
  393. parser.add_argument('--quad', action='store_true', help='quad dataloader')
  394. parser.add_argument('--linear-lr', action='store_true', help='linear LR')
  395. parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
  396. parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
  397. parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B')
  398. parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
  399. parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
  400. parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
  401. parser.add_argument('--freeze', type=int, default=0, help='Number of layers to freeze. backbone=10, all=24')
  402. opt = parser.parse_known_args()[0] if known else parser.parse_args()
  403. return opt
  404. def main(opt):
  405. # Checks
  406. set_logging(RANK)
  407. if RANK in [-1, 0]:
  408. print(colorstr('train: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
  409. check_git_status()
  410. check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=['thop'])
  411. # Resume
  412. if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run
  413. ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
  414. assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
  415. with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
  416. opt = argparse.Namespace(**yaml.safe_load(f)) # replace
  417. opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate
  418. LOGGER.info(f'Resuming training from {ckpt}')
  419. else:
  420. opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
  421. assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
  422. if opt.evolve:
  423. opt.project = 'runs/evolve'
  424. opt.exist_ok = opt.resume
  425. opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
  426. # DDP mode
  427. device = select_device(opt.device, batch_size=opt.batch_size)
  428. if LOCAL_RANK != -1:
  429. from datetime import timedelta
  430. assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
  431. assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count'
  432. assert not opt.image_weights, '--image-weights argument is not compatible with DDP training'
  433. assert not opt.evolve, '--evolve argument is not compatible with DDP training'
  434. assert not opt.sync_bn, '--sync-bn known training issue, see https://github.com/ultralytics/yolov5/issues/3998'
  435. torch.cuda.set_device(LOCAL_RANK)
  436. device = torch.device('cuda', LOCAL_RANK)
  437. dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
  438. # Train
  439. if not opt.evolve:
  440. train(opt.hyp, opt, device)
  441. if WORLD_SIZE > 1 and RANK == 0:
  442. _ = [print('Destroying process group... ', end=''), dist.destroy_process_group(), print('Done.')]
  443. # Evolve hyperparameters (optional)
  444. else:
  445. # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
  446. meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
  447. 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
  448. 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
  449. 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
  450. 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
  451. 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
  452. 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
  453. 'box': (1, 0.02, 0.2), # box loss gain
  454. 'cls': (1, 0.2, 4.0), # cls loss gain
  455. 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
  456. 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
  457. 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
  458. 'iou_t': (0, 0.1, 0.7), # IoU training threshold
  459. 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
  460. 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
  461. 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
  462. 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
  463. 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
  464. 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
  465. 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
  466. 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
  467. 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
  468. 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
  469. 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
  470. 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
  471. 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
  472. 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
  473. 'mixup': (1, 0.0, 1.0), # image mixup (probability)
  474. 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
  475. with open(opt.hyp) as f:
  476. hyp = yaml.safe_load(f) # load hyps dict
  477. if 'anchors' not in hyp: # anchors commented in hyp.yaml
  478. hyp['anchors'] = 3
  479. opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
  480. # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
  481. evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
  482. if opt.bucket:
  483. os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {save_dir}') # download evolve.csv if exists
  484. for _ in range(opt.evolve): # generations to evolve
  485. if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
  486. # Select parent(s)
  487. parent = 'single' # parent selection method: 'single' or 'weighted'
  488. x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
  489. n = min(5, len(x)) # number of previous results to consider
  490. x = x[np.argsort(-fitness(x))][:n] # top n mutations
  491. w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
  492. if parent == 'single' or len(x) == 1:
  493. # x = x[random.randint(0, n - 1)] # random selection
  494. x = x[random.choices(range(n), weights=w)[0]] # weighted selection
  495. elif parent == 'weighted':
  496. x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
  497. # Mutate
  498. mp, s = 0.8, 0.2 # mutation probability, sigma
  499. npr = np.random
  500. npr.seed(int(time.time()))
  501. g = np.array([x[0] for x in meta.values()]) # gains 0-1
  502. ng = len(meta)
  503. v = np.ones(ng)
  504. while all(v == 1): # mutate until a change occurs (prevent duplicates)
  505. v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
  506. for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
  507. hyp[k] = float(x[i + 7] * v[i]) # mutate
  508. # Constrain to limits
  509. for k, v in meta.items():
  510. hyp[k] = max(hyp[k], v[1]) # lower limit
  511. hyp[k] = min(hyp[k], v[2]) # upper limit
  512. hyp[k] = round(hyp[k], 5) # significant digits
  513. # Train mutation
  514. results = train(hyp.copy(), opt, device)
  515. # Write mutation results
  516. print_mutation(results, hyp.copy(), save_dir, opt.bucket)
  517. # Plot results
  518. plot_evolve(evolve_csv)
  519. print(f'Hyperparameter evolution finished\n'
  520. f"Results saved to {colorstr('bold', save_dir)}\n"
  521. f'Use best hyperparameters example: $ python train.py --hyp {evolve_yaml}')
  522. def run(**kwargs):
  523. # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
  524. opt = parse_opt(True)
  525. for k, v in kwargs.items():
  526. setattr(opt, k, v)
  527. main(opt)
  528. if __name__ == "__main__":
  529. opt = parse_opt()
  530. main(opt)