You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

543 lines
28KB

  1. import argparse
  2. import logging
  3. import os
  4. import random
  5. import shutil
  6. import time
  7. from pathlib import Path
  8. from warnings import warn
  9. import math
  10. import numpy as np
  11. import torch.distributed as dist
  12. import torch.nn.functional as F
  13. import torch.optim as optim
  14. import torch.optim.lr_scheduler as lr_scheduler
  15. import torch.utils.data
  16. import yaml
  17. from torch.cuda import amp
  18. from torch.nn.parallel import DistributedDataParallel as DDP
  19. from torch.utils.tensorboard import SummaryWriter
  20. from tqdm import tqdm
  21. import test # import test.py to get mAP after each epoch
  22. from models.yolo import Model
  23. from utils.datasets import create_dataloader
  24. from utils.general import (
  25. torch_distributed_zero_first, labels_to_class_weights, plot_labels, check_anchors, labels_to_image_weights,
  26. compute_loss, plot_images, fitness, strip_optimizer, plot_results, get_latest_run, check_dataset, check_file,
  27. check_git_status, check_img_size, increment_dir, print_mutation, plot_evolution, set_logging, init_seeds)
  28. from utils.google_utils import attempt_download
  29. from utils.torch_utils import ModelEMA, select_device, intersect_dicts
  30. logger = logging.getLogger(__name__)
  31. def train(hyp, opt, device, tb_writer=None):
  32. logger.info(f'Hyperparameters {hyp}')
  33. log_dir = Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / 'evolve' # logging directory
  34. wdir = log_dir / 'weights' # weights directory
  35. os.makedirs(wdir, exist_ok=True)
  36. last = wdir / 'last.pt'
  37. best = wdir / 'best.pt'
  38. results_file = str(log_dir / 'results.txt')
  39. epochs, batch_size, total_batch_size, weights, rank = \
  40. opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
  41. # Save run settings
  42. with open(log_dir / 'hyp.yaml', 'w') as f:
  43. yaml.dump(hyp, f, sort_keys=False)
  44. with open(log_dir / 'opt.yaml', 'w') as f:
  45. yaml.dump(vars(opt), f, sort_keys=False)
  46. # Configure
  47. cuda = device.type != 'cpu'
  48. init_seeds(2 + rank)
  49. with open(opt.data) as f:
  50. data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict
  51. with torch_distributed_zero_first(rank):
  52. check_dataset(data_dict) # check
  53. train_path = data_dict['train']
  54. test_path = data_dict['val']
  55. nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names']) # number classes, names
  56. assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
  57. # Model
  58. pretrained = weights.endswith('.pt')
  59. if pretrained:
  60. with torch_distributed_zero_first(rank):
  61. attempt_download(weights) # download if not found locally
  62. ckpt = torch.load(weights, map_location=device) # load checkpoint
  63. if hyp.get('anchors'):
  64. ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor
  65. model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create
  66. exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [] # exclude keys
  67. state_dict = ckpt['model'].float().state_dict() # to FP32
  68. state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
  69. model.load_state_dict(state_dict, strict=False) # load
  70. logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
  71. else:
  72. model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
  73. # Freeze
  74. freeze = ['', ] # parameter names to freeze (full or partial)
  75. if any(freeze):
  76. for k, v in model.named_parameters():
  77. if any(x in k for x in freeze):
  78. print('freezing %s' % k)
  79. v.requires_grad = False
  80. # Optimizer
  81. nbs = 64 # nominal batch size
  82. accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
  83. hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
  84. pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
  85. for k, v in model.named_parameters():
  86. v.requires_grad = True
  87. if '.bias' in k:
  88. pg2.append(v) # biases
  89. elif '.weight' in k and '.bn' not in k:
  90. pg1.append(v) # apply weight decay
  91. else:
  92. pg0.append(v) # all else
  93. if opt.adam:
  94. optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
  95. else:
  96. optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
  97. optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
  98. optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
  99. logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
  100. del pg0, pg1, pg2
  101. # Scheduler https://arxiv.org/pdf/1812.01187.pdf
  102. # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
  103. lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf'] # cosine
  104. scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
  105. # plot_lr_scheduler(optimizer, scheduler, epochs)
  106. # Resume
  107. start_epoch, best_fitness = 0, 0.0
  108. if pretrained:
  109. # Optimizer
  110. if ckpt['optimizer'] is not None:
  111. optimizer.load_state_dict(ckpt['optimizer'])
  112. best_fitness = ckpt['best_fitness']
  113. # Results
  114. if ckpt.get('training_results') is not None:
  115. with open(results_file, 'w') as file:
  116. file.write(ckpt['training_results']) # write results.txt
  117. # Epochs
  118. start_epoch = ckpt['epoch'] + 1
  119. if opt.resume:
  120. assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
  121. shutil.copytree(wdir, wdir.parent / f'weights_backup_epoch{start_epoch - 1}') # save previous weights
  122. if epochs < start_epoch:
  123. logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
  124. (weights, ckpt['epoch'], epochs))
  125. epochs += ckpt['epoch'] # finetune additional epochs
  126. del ckpt, state_dict
  127. # Image sizes
  128. gs = int(max(model.stride)) # grid size (max stride)
  129. imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
  130. # DP mode
  131. if cuda and rank == -1 and torch.cuda.device_count() > 1:
  132. model = torch.nn.DataParallel(model)
  133. # SyncBatchNorm
  134. if opt.sync_bn and cuda and rank != -1:
  135. model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
  136. logger.info('Using SyncBatchNorm()')
  137. # Exponential moving average
  138. ema = ModelEMA(model) if rank in [-1, 0] else None
  139. # DDP mode
  140. if cuda and rank != -1:
  141. model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
  142. # Trainloader
  143. dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
  144. hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect,
  145. rank=rank, world_size=opt.world_size, workers=opt.workers)
  146. mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
  147. nb = len(dataloader) # number of batches
  148. assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
  149. # Process 0
  150. if rank in [-1, 0]:
  151. ema.updates = start_epoch * nb // accumulate # set EMA updates
  152. testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt,
  153. hyp=hyp, augment=False, cache=opt.cache_images and not opt.notest, rect=True,
  154. rank=-1, world_size=opt.world_size, workers=opt.workers)[0] # testloader
  155. if not opt.resume:
  156. labels = np.concatenate(dataset.labels, 0)
  157. c = torch.tensor(labels[:, 0]) # classes
  158. # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
  159. # model._initialize_biases(cf.to(device))
  160. plot_labels(labels, save_dir=log_dir)
  161. if tb_writer:
  162. # tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
  163. tb_writer.add_histogram('classes', c, 0)
  164. # Anchors
  165. if not opt.noautoanchor:
  166. check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
  167. # Model parameters
  168. hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset
  169. model.nc = nc # attach number of classes to model
  170. model.hyp = hyp # attach hyperparameters to model
  171. model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
  172. model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
  173. model.names = names
  174. # Start training
  175. t0 = time.time()
  176. nw = max(round(hyp['warmup_epochs'] * nb), 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
  177. # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
  178. maps = np.zeros(nc) # mAP per class
  179. results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
  180. scheduler.last_epoch = start_epoch - 1 # do not move
  181. scaler = amp.GradScaler(enabled=cuda)
  182. logger.info('Image sizes %g train, %g test\n'
  183. 'Using %g dataloader workers\nLogging results to %s\n'
  184. 'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, log_dir, epochs))
  185. for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
  186. model.train()
  187. # Update image weights (optional)
  188. if opt.image_weights:
  189. # Generate indices
  190. if rank in [-1, 0]:
  191. cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
  192. iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
  193. dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
  194. # Broadcast if DDP
  195. if rank != -1:
  196. indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
  197. dist.broadcast(indices, 0)
  198. if rank != 0:
  199. dataset.indices = indices.cpu().numpy()
  200. # Update mosaic border
  201. # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
  202. # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
  203. mloss = torch.zeros(4, device=device) # mean losses
  204. if rank != -1:
  205. dataloader.sampler.set_epoch(epoch)
  206. pbar = enumerate(dataloader)
  207. logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size'))
  208. if rank in [-1, 0]:
  209. pbar = tqdm(pbar, total=nb) # progress bar
  210. optimizer.zero_grad()
  211. for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
  212. ni = i + nb * epoch # number integrated batches (since train start)
  213. imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
  214. # Warmup
  215. if ni <= nw:
  216. xi = [0, nw] # x interp
  217. # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
  218. accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
  219. for j, x in enumerate(optimizer.param_groups):
  220. # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
  221. x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
  222. if 'momentum' in x:
  223. x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
  224. # Multi-scale
  225. if opt.multi_scale:
  226. sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
  227. sf = sz / max(imgs.shape[2:]) # scale factor
  228. if sf != 1:
  229. ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
  230. imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
  231. # Forward
  232. with amp.autocast(enabled=cuda):
  233. pred = model(imgs) # forward
  234. loss, loss_items = compute_loss(pred, targets.to(device), model) # loss scaled by batch_size
  235. if rank != -1:
  236. loss *= opt.world_size # gradient averaged between devices in DDP mode
  237. # Backward
  238. scaler.scale(loss).backward()
  239. # Optimize
  240. if ni % accumulate == 0:
  241. scaler.step(optimizer) # optimizer.step
  242. scaler.update()
  243. optimizer.zero_grad()
  244. if ema:
  245. ema.update(model)
  246. # Print
  247. if rank in [-1, 0]:
  248. mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
  249. mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
  250. s = ('%10s' * 2 + '%10.4g' * 6) % (
  251. '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
  252. pbar.set_description(s)
  253. # Plot
  254. if ni < 3:
  255. f = str(log_dir / ('train_batch%g.jpg' % ni)) # filename
  256. result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
  257. if tb_writer and result is not None:
  258. tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
  259. # tb_writer.add_graph(model, imgs) # add model to tensorboard
  260. # end batch ------------------------------------------------------------------------------------------------
  261. # Scheduler
  262. lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
  263. scheduler.step()
  264. # DDP process 0 or single-GPU
  265. if rank in [-1, 0]:
  266. # mAP
  267. if ema:
  268. ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride'])
  269. final_epoch = epoch + 1 == epochs
  270. if not opt.notest or final_epoch: # Calculate mAP
  271. results, maps, times = test.test(opt.data,
  272. batch_size=total_batch_size,
  273. imgsz=imgsz_test,
  274. model=ema.ema,
  275. single_cls=opt.single_cls,
  276. dataloader=testloader,
  277. save_dir=log_dir,
  278. plots=epoch == 0 or final_epoch) # plot first and last
  279. # Write
  280. with open(results_file, 'a') as f:
  281. f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
  282. if len(opt.name) and opt.bucket:
  283. os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
  284. # Tensorboard
  285. if tb_writer:
  286. tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
  287. 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
  288. 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
  289. 'x/lr0', 'x/lr1', 'x/lr2'] # params
  290. for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
  291. tb_writer.add_scalar(tag, x, epoch)
  292. # Update best mAP
  293. fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
  294. if fi > best_fitness:
  295. best_fitness = fi
  296. # Save model
  297. save = (not opt.nosave) or (final_epoch and not opt.evolve)
  298. if save:
  299. with open(results_file, 'r') as f: # create checkpoint
  300. ckpt = {'epoch': epoch,
  301. 'best_fitness': best_fitness,
  302. 'training_results': f.read(),
  303. 'model': ema.ema,
  304. 'optimizer': None if final_epoch else optimizer.state_dict()}
  305. # Save last, best and delete
  306. torch.save(ckpt, last)
  307. if best_fitness == fi:
  308. torch.save(ckpt, best)
  309. del ckpt
  310. # end epoch ----------------------------------------------------------------------------------------------------
  311. # end training
  312. if rank in [-1, 0]:
  313. # Strip optimizers
  314. n = opt.name if opt.name.isnumeric() else ''
  315. fresults, flast, fbest = log_dir / f'results{n}.txt', wdir / f'last{n}.pt', wdir / f'best{n}.pt'
  316. for f1, f2 in zip([wdir / 'last.pt', wdir / 'best.pt', results_file], [flast, fbest, fresults]):
  317. if os.path.exists(f1):
  318. os.rename(f1, f2) # rename
  319. if str(f2).endswith('.pt'): # is *.pt
  320. strip_optimizer(f2) # strip optimizer
  321. os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket else None # upload
  322. # Finish
  323. if not opt.evolve:
  324. plot_results(save_dir=log_dir) # save as results.png
  325. logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
  326. dist.destroy_process_group() if rank not in [-1, 0] else None
  327. torch.cuda.empty_cache()
  328. return results
  329. if __name__ == '__main__':
  330. parser = argparse.ArgumentParser()
  331. parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
  332. parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
  333. parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
  334. parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')
  335. parser.add_argument('--epochs', type=int, default=300)
  336. parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
  337. parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
  338. parser.add_argument('--rect', action='store_true', help='rectangular training')
  339. parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
  340. parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
  341. parser.add_argument('--notest', action='store_true', help='only test final epoch')
  342. parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
  343. parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
  344. parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
  345. parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
  346. parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
  347. parser.add_argument('--name', default='', help='renames experiment folder exp{N} to exp{N}_{name} if supplied')
  348. parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
  349. parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
  350. parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
  351. parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
  352. parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
  353. parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
  354. parser.add_argument('--logdir', type=str, default='runs/', help='logging directory')
  355. parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
  356. opt = parser.parse_args()
  357. # Set DDP variables
  358. opt.total_batch_size = opt.batch_size
  359. opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
  360. opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
  361. set_logging(opt.global_rank)
  362. if opt.global_rank in [-1, 0]:
  363. check_git_status()
  364. # Resume
  365. if opt.resume: # resume an interrupted run
  366. ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
  367. log_dir = Path(ckpt).parent.parent # runs/exp0
  368. assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
  369. with open(log_dir / 'opt.yaml') as f:
  370. opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
  371. opt.cfg, opt.weights, opt.resume = '', ckpt, True
  372. logger.info('Resuming training from %s' % ckpt)
  373. else:
  374. # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
  375. opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
  376. assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
  377. opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
  378. log_dir = increment_dir(Path(opt.logdir) / 'exp', opt.name) # runs/exp1
  379. # DDP mode
  380. device = select_device(opt.device, batch_size=opt.batch_size)
  381. if opt.local_rank != -1:
  382. assert torch.cuda.device_count() > opt.local_rank
  383. torch.cuda.set_device(opt.local_rank)
  384. device = torch.device('cuda', opt.local_rank)
  385. dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
  386. assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
  387. opt.batch_size = opt.total_batch_size // opt.world_size
  388. # Hyperparameters
  389. with open(opt.hyp) as f:
  390. hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps
  391. if 'box' not in hyp:
  392. warn('Compatibility: %s missing "box" which was renamed from "giou" in %s' %
  393. (opt.hyp, 'https://github.com/ultralytics/yolov5/pull/1120'))
  394. hyp['box'] = hyp.pop('giou')
  395. # Train
  396. logger.info(opt)
  397. if not opt.evolve:
  398. tb_writer = None
  399. if opt.global_rank in [-1, 0]:
  400. logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.logdir}", view at http://localhost:6006/')
  401. tb_writer = SummaryWriter(log_dir=log_dir) # runs/exp0
  402. train(hyp, opt, device, tb_writer)
  403. # Evolve hyperparameters (optional)
  404. else:
  405. # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
  406. meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
  407. 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
  408. 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
  409. 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
  410. 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
  411. 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
  412. 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
  413. 'box': (1, 0.02, 0.2), # box loss gain
  414. 'cls': (1, 0.2, 4.0), # cls loss gain
  415. 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
  416. 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
  417. 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
  418. 'iou_t': (0, 0.1, 0.7), # IoU training threshold
  419. 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
  420. 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
  421. 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
  422. 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
  423. 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
  424. 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
  425. 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
  426. 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
  427. 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
  428. 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
  429. 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
  430. 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
  431. 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
  432. 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
  433. 'mixup': (1, 0.0, 1.0)} # image mixup (probability)
  434. assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
  435. opt.notest, opt.nosave = True, True # only test/save final epoch
  436. # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
  437. yaml_file = Path(opt.logdir) / 'evolve' / 'hyp_evolved.yaml' # save best result here
  438. if opt.bucket:
  439. os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
  440. for _ in range(300): # generations to evolve
  441. if os.path.exists('evolve.txt'): # if evolve.txt exists: select best hyps and mutate
  442. # Select parent(s)
  443. parent = 'single' # parent selection method: 'single' or 'weighted'
  444. x = np.loadtxt('evolve.txt', ndmin=2)
  445. n = min(5, len(x)) # number of previous results to consider
  446. x = x[np.argsort(-fitness(x))][:n] # top n mutations
  447. w = fitness(x) - fitness(x).min() # weights
  448. if parent == 'single' or len(x) == 1:
  449. # x = x[random.randint(0, n - 1)] # random selection
  450. x = x[random.choices(range(n), weights=w)[0]] # weighted selection
  451. elif parent == 'weighted':
  452. x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
  453. # Mutate
  454. mp, s = 0.8, 0.2 # mutation probability, sigma
  455. npr = np.random
  456. npr.seed(int(time.time()))
  457. g = np.array([x[0] for x in meta.values()]) # gains 0-1
  458. ng = len(meta)
  459. v = np.ones(ng)
  460. while all(v == 1): # mutate until a change occurs (prevent duplicates)
  461. v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
  462. for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
  463. hyp[k] = float(x[i + 7] * v[i]) # mutate
  464. # Constrain to limits
  465. for k, v in meta.items():
  466. hyp[k] = max(hyp[k], v[1]) # lower limit
  467. hyp[k] = min(hyp[k], v[2]) # upper limit
  468. hyp[k] = round(hyp[k], 5) # significant digits
  469. # Train mutation
  470. results = train(hyp.copy(), opt, device)
  471. # Write mutation results
  472. print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
  473. # Plot results
  474. plot_evolution(yaml_file)
  475. print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
  476. f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')