選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

535 行
27KB

  1. import argparse
  2. import logging
  3. import math
  4. import os
  5. import random
  6. import shutil
  7. import time
  8. from pathlib import Path
  9. import numpy as np
  10. import torch.distributed as dist
  11. import torch.nn.functional as F
  12. import torch.optim as optim
  13. import torch.optim.lr_scheduler as lr_scheduler
  14. import torch.utils.data
  15. import yaml
  16. from torch.cuda import amp
  17. from torch.nn.parallel import DistributedDataParallel as DDP
  18. from torch.utils.tensorboard import SummaryWriter
  19. from tqdm import tqdm
  20. import test # import test.py to get mAP after each epoch
  21. from models.yolo import Model
  22. from utils.datasets import create_dataloader
  23. from utils.general import (
  24. torch_distributed_zero_first, labels_to_class_weights, plot_labels, check_anchors, labels_to_image_weights,
  25. compute_loss, plot_images, fitness, strip_optimizer, plot_results, get_latest_run, check_dataset, check_file,
  26. check_git_status, check_img_size, increment_dir, print_mutation, plot_evolution, set_logging)
  27. from utils.google_utils import attempt_download
  28. from utils.torch_utils import init_seeds, ModelEMA, select_device, intersect_dicts
  29. logger = logging.getLogger(__name__)
  30. def train(hyp, opt, device, tb_writer=None):
  31. logger.info(f'Hyperparameters {hyp}')
  32. log_dir = Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / 'evolve' # logging directory
  33. wdir = log_dir / 'weights' # weights directory
  34. os.makedirs(wdir, exist_ok=True)
  35. last = wdir / 'last.pt'
  36. best = wdir / 'best.pt'
  37. results_file = str(log_dir / 'results.txt')
  38. epochs, batch_size, total_batch_size, weights, rank = \
  39. opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
  40. # Save run settings
  41. with open(log_dir / 'hyp.yaml', 'w') as f:
  42. yaml.dump(hyp, f, sort_keys=False)
  43. with open(log_dir / 'opt.yaml', 'w') as f:
  44. yaml.dump(vars(opt), f, sort_keys=False)
  45. # Configure
  46. cuda = device.type != 'cpu'
  47. init_seeds(2 + rank)
  48. with open(opt.data) as f:
  49. data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict
  50. with torch_distributed_zero_first(rank):
  51. check_dataset(data_dict) # check
  52. train_path = data_dict['train']
  53. test_path = data_dict['val']
  54. nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names']) # number classes, names
  55. assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
  56. # Model
  57. pretrained = weights.endswith('.pt')
  58. if pretrained:
  59. with torch_distributed_zero_first(rank):
  60. attempt_download(weights) # download if not found locally
  61. ckpt = torch.load(weights, map_location=device) # load checkpoint
  62. # if hyp['anchors']:
  63. # ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor
  64. model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create
  65. exclude = ['anchor'] if opt.cfg else [] # exclude keys
  66. state_dict = ckpt['model'].float().state_dict() # to FP32
  67. state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
  68. model.load_state_dict(state_dict, strict=False) # load
  69. logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
  70. else:
  71. model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
  72. # Freeze
  73. freeze = ['', ] # parameter names to freeze (full or partial)
  74. if any(freeze):
  75. for k, v in model.named_parameters():
  76. if any(x in k for x in freeze):
  77. print('freezing %s' % k)
  78. v.requires_grad = False
  79. # Optimizer
  80. nbs = 64 # nominal batch size
  81. accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
  82. hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
  83. pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
  84. for k, v in model.named_parameters():
  85. v.requires_grad = True
  86. if '.bias' in k:
  87. pg2.append(v) # biases
  88. elif '.weight' in k and '.bn' not in k:
  89. pg1.append(v) # apply weight decay
  90. else:
  91. pg0.append(v) # all else
  92. if opt.adam:
  93. optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
  94. else:
  95. optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
  96. optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
  97. optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
  98. logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
  99. del pg0, pg1, pg2
  100. # Scheduler https://arxiv.org/pdf/1812.01187.pdf
  101. # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
  102. lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf'] # cosine
  103. scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
  104. # plot_lr_scheduler(optimizer, scheduler, epochs)
  105. # Resume
  106. start_epoch, best_fitness = 0, 0.0
  107. if pretrained:
  108. # Optimizer
  109. if ckpt['optimizer'] is not None:
  110. optimizer.load_state_dict(ckpt['optimizer'])
  111. best_fitness = ckpt['best_fitness']
  112. # Results
  113. if ckpt.get('training_results') is not None:
  114. with open(results_file, 'w') as file:
  115. file.write(ckpt['training_results']) # write results.txt
  116. # Epochs
  117. start_epoch = ckpt['epoch'] + 1
  118. if opt.resume:
  119. assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
  120. shutil.copytree(wdir, wdir.parent / f'weights_backup_epoch{start_epoch - 1}') # save previous weights
  121. if epochs < start_epoch:
  122. logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
  123. (weights, ckpt['epoch'], epochs))
  124. epochs += ckpt['epoch'] # finetune additional epochs
  125. del ckpt, state_dict
  126. # Image sizes
  127. gs = int(max(model.stride)) # grid size (max stride)
  128. imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
  129. # DP mode
  130. if cuda and rank == -1 and torch.cuda.device_count() > 1:
  131. model = torch.nn.DataParallel(model)
  132. # SyncBatchNorm
  133. if opt.sync_bn and cuda and rank != -1:
  134. model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
  135. logger.info('Using SyncBatchNorm()')
  136. # Exponential moving average
  137. ema = ModelEMA(model) if rank in [-1, 0] else None
  138. # DDP mode
  139. if cuda and rank != -1:
  140. model = DDP(model, device_ids=[opt.local_rank], output_device=(opt.local_rank))
  141. # Trainloader
  142. dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
  143. hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
  144. world_size=opt.world_size, workers=opt.workers)
  145. mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
  146. nb = len(dataloader) # number of batches
  147. assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
  148. # Testloader
  149. if rank in [-1, 0]:
  150. ema.updates = start_epoch * nb // accumulate # set EMA updates
  151. testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt,
  152. hyp=hyp, augment=False, cache=opt.cache_images, rect=True, rank=-1,
  153. world_size=opt.world_size, workers=opt.workers)[0] # only runs on process 0
  154. # Model parameters
  155. hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset
  156. model.nc = nc # attach number of classes to model
  157. model.hyp = hyp # attach hyperparameters to model
  158. model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
  159. model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
  160. model.names = names
  161. # Class frequency
  162. if rank in [-1, 0]:
  163. labels = np.concatenate(dataset.labels, 0)
  164. c = torch.tensor(labels[:, 0]) # classes
  165. # cf = torch.bincount(c.long(), minlength=nc) + 1.
  166. # model._initialize_biases(cf.to(device))
  167. plot_labels(labels, save_dir=log_dir)
  168. if tb_writer:
  169. # tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
  170. tb_writer.add_histogram('classes', c, 0)
  171. # Check anchors
  172. if not opt.noautoanchor:
  173. check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
  174. # Start training
  175. t0 = time.time()
  176. nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
  177. # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
  178. maps = np.zeros(nc) # mAP per class
  179. results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
  180. scheduler.last_epoch = start_epoch - 1 # do not move
  181. scaler = amp.GradScaler(enabled=cuda)
  182. logger.info('Image sizes %g train, %g test' % (imgsz, imgsz_test))
  183. logger.info('Using %g dataloader workers' % dataloader.num_workers)
  184. logger.info('Starting training for %g epochs...' % epochs)
  185. # torch.autograd.set_detect_anomaly(True)
  186. for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
  187. model.train()
  188. # Update image weights (optional)
  189. if opt.image_weights:
  190. # Generate indices
  191. if rank in [-1, 0]:
  192. cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
  193. iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
  194. dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
  195. # Broadcast if DDP
  196. if rank != -1:
  197. indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
  198. dist.broadcast(indices, 0)
  199. if rank != 0:
  200. dataset.indices = indices.cpu().numpy()
  201. # Update mosaic border
  202. # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
  203. # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
  204. mloss = torch.zeros(4, device=device) # mean losses
  205. if rank != -1:
  206. dataloader.sampler.set_epoch(epoch)
  207. pbar = enumerate(dataloader)
  208. logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))
  209. if rank in [-1, 0]:
  210. pbar = tqdm(pbar, total=nb) # progress bar
  211. optimizer.zero_grad()
  212. for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
  213. ni = i + nb * epoch # number integrated batches (since train start)
  214. imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
  215. # Warmup
  216. if ni <= nw:
  217. xi = [0, nw] # x interp
  218. # model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
  219. accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
  220. for j, x in enumerate(optimizer.param_groups):
  221. # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
  222. x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
  223. if 'momentum' in x:
  224. x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']])
  225. # Multi-scale
  226. if opt.multi_scale:
  227. sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
  228. sf = sz / max(imgs.shape[2:]) # scale factor
  229. if sf != 1:
  230. ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
  231. imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
  232. # Forward
  233. with amp.autocast(enabled=cuda):
  234. pred = model(imgs) # forward
  235. loss, loss_items = compute_loss(pred, targets.to(device), model) # loss scaled by batch_size
  236. if rank != -1:
  237. loss *= opt.world_size # gradient averaged between devices in DDP mode
  238. # Backward
  239. scaler.scale(loss).backward()
  240. # Optimize
  241. if ni % accumulate == 0:
  242. scaler.step(optimizer) # optimizer.step
  243. scaler.update()
  244. optimizer.zero_grad()
  245. if ema:
  246. ema.update(model)
  247. # Print
  248. if rank in [-1, 0]:
  249. mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
  250. mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
  251. s = ('%10s' * 2 + '%10.4g' * 6) % (
  252. '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
  253. pbar.set_description(s)
  254. # Plot
  255. if ni < 3:
  256. f = str(log_dir / ('train_batch%g.jpg' % ni)) # filename
  257. result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
  258. if tb_writer and result is not None:
  259. tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
  260. # tb_writer.add_graph(model, imgs) # add model to tensorboard
  261. # end batch ------------------------------------------------------------------------------------------------
  262. # Scheduler
  263. lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
  264. scheduler.step()
  265. # DDP process 0 or single-GPU
  266. if rank in [-1, 0]:
  267. # mAP
  268. if ema:
  269. ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride'])
  270. final_epoch = epoch + 1 == epochs
  271. if not opt.notest or final_epoch: # Calculate mAP
  272. results, maps, times = test.test(opt.data,
  273. batch_size=total_batch_size,
  274. imgsz=imgsz_test,
  275. model=ema.ema,
  276. single_cls=opt.single_cls,
  277. dataloader=testloader,
  278. save_dir=log_dir)
  279. # Write
  280. with open(results_file, 'a') as f:
  281. f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
  282. if len(opt.name) and opt.bucket:
  283. os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
  284. # Tensorboard
  285. if tb_writer:
  286. tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss', # train loss
  287. 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
  288. 'val/giou_loss', 'val/obj_loss', 'val/cls_loss', # val loss
  289. 'x/lr0', 'x/lr1', 'x/lr2'] # params
  290. for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
  291. tb_writer.add_scalar(tag, x, epoch)
  292. # Update best mAP
  293. fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1]
  294. if fi > best_fitness:
  295. best_fitness = fi
  296. # Save model
  297. save = (not opt.nosave) or (final_epoch and not opt.evolve)
  298. if save:
  299. with open(results_file, 'r') as f: # create checkpoint
  300. ckpt = {'epoch': epoch,
  301. 'best_fitness': best_fitness,
  302. 'training_results': f.read(),
  303. 'model': ema.ema,
  304. 'optimizer': None if final_epoch else optimizer.state_dict()}
  305. # Save last, best and delete
  306. torch.save(ckpt, last)
  307. if best_fitness == fi:
  308. torch.save(ckpt, best)
  309. del ckpt
  310. # end epoch ----------------------------------------------------------------------------------------------------
  311. # end training
  312. if rank in [-1, 0]:
  313. # Strip optimizers
  314. n = ('_' if len(opt.name) and not opt.name.isnumeric() else '') + opt.name
  315. fresults, flast, fbest = 'results%s.txt' % n, wdir / f'last{n}.pt', wdir / f'best{n}.pt'
  316. for f1, f2 in zip([wdir / 'last.pt', wdir / 'best.pt', 'results.txt'], [flast, fbest, fresults]):
  317. if os.path.exists(f1):
  318. os.rename(f1, f2) # rename
  319. if str(f2).endswith('.pt'): # is *.pt
  320. strip_optimizer(f2) # strip optimizer
  321. os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket else None # upload
  322. # Finish
  323. if not opt.evolve:
  324. plot_results(save_dir=log_dir) # save as results.png
  325. logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
  326. dist.destroy_process_group() if rank not in [-1, 0] else None
  327. torch.cuda.empty_cache()
  328. return results
  329. if __name__ == '__main__':
  330. parser = argparse.ArgumentParser()
  331. parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
  332. parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
  333. parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
  334. parser.add_argument('--hyp', type=str, default='', help='hyperparameters path, i.e. data/hyp.scratch.yaml')
  335. parser.add_argument('--epochs', type=int, default=300)
  336. parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
  337. parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
  338. parser.add_argument('--rect', action='store_true', help='rectangular training')
  339. parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
  340. parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
  341. parser.add_argument('--notest', action='store_true', help='only test final epoch')
  342. parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
  343. parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
  344. parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
  345. parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
  346. parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
  347. parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')
  348. parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
  349. parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
  350. parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
  351. parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
  352. parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
  353. parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
  354. parser.add_argument('--logdir', type=str, default='runs/', help='logging directory')
  355. parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
  356. opt = parser.parse_args()
  357. # Set DDP variables
  358. opt.total_batch_size = opt.batch_size
  359. opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
  360. opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
  361. set_logging(opt.global_rank)
  362. if opt.global_rank in [-1, 0]:
  363. check_git_status()
  364. # Resume
  365. if opt.resume: # resume an interrupted run
  366. ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
  367. log_dir = Path(ckpt).parent.parent # runs/exp0
  368. assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
  369. with open(log_dir / 'opt.yaml') as f:
  370. opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
  371. opt.cfg, opt.weights, opt.resume = '', ckpt, True
  372. logger.info('Resuming training from %s' % ckpt)
  373. else:
  374. opt.hyp = opt.hyp or ('data/hyp.finetune.yaml' if opt.weights else 'data/hyp.scratch.yaml')
  375. opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
  376. assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
  377. opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
  378. log_dir = increment_dir(Path(opt.logdir) / 'exp', opt.name) # runs/exp1
  379. device = select_device(opt.device, batch_size=opt.batch_size)
  380. # DDP mode
  381. if opt.local_rank != -1:
  382. assert torch.cuda.device_count() > opt.local_rank
  383. torch.cuda.set_device(opt.local_rank)
  384. device = torch.device('cuda', opt.local_rank)
  385. dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
  386. assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
  387. opt.batch_size = opt.total_batch_size // opt.world_size
  388. logger.info(opt)
  389. with open(opt.hyp) as f:
  390. hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps
  391. # Train
  392. if not opt.evolve:
  393. tb_writer = None
  394. if opt.global_rank in [-1, 0]:
  395. logger.info('Start Tensorboard with "tensorboard --logdir %s", view at http://localhost:6006/' % opt.logdir)
  396. tb_writer = SummaryWriter(log_dir=log_dir) # runs/exp0
  397. train(hyp, opt, device, tb_writer)
  398. # Evolve hyperparameters (optional)
  399. else:
  400. # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
  401. meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
  402. 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
  403. 'momentum': (0.1, 0.6, 0.98), # SGD momentum/Adam beta1
  404. 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
  405. 'giou': (1, 0.02, 0.2), # GIoU loss gain
  406. 'cls': (1, 0.2, 4.0), # cls loss gain
  407. 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
  408. 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
  409. 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
  410. 'iou_t': (0, 0.1, 0.7), # IoU training threshold
  411. 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
  412. # 'anchors': (1, 2.0, 10.0), # anchors per output grid (0 to ignore)
  413. 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
  414. 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
  415. 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
  416. 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
  417. 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
  418. 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
  419. 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
  420. 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
  421. 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
  422. 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
  423. 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
  424. 'mixup': (1, 0.0, 1.0)} # image mixup (probability)
  425. assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
  426. opt.notest, opt.nosave = True, True # only test/save final epoch
  427. # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
  428. yaml_file = Path('runs/evolve/hyp_evolved.yaml') # save best result here
  429. if opt.bucket:
  430. os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
  431. for _ in range(100): # generations to evolve
  432. if os.path.exists('evolve.txt'): # if evolve.txt exists: select best hyps and mutate
  433. # Select parent(s)
  434. parent = 'single' # parent selection method: 'single' or 'weighted'
  435. x = np.loadtxt('evolve.txt', ndmin=2)
  436. n = min(5, len(x)) # number of previous results to consider
  437. x = x[np.argsort(-fitness(x))][:n] # top n mutations
  438. w = fitness(x) - fitness(x).min() # weights
  439. if parent == 'single' or len(x) == 1:
  440. # x = x[random.randint(0, n - 1)] # random selection
  441. x = x[random.choices(range(n), weights=w)[0]] # weighted selection
  442. elif parent == 'weighted':
  443. x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
  444. # Mutate
  445. mp, s = 0.9, 0.2 # mutation probability, sigma
  446. npr = np.random
  447. npr.seed(int(time.time()))
  448. g = np.array([x[0] for x in meta.values()]) # gains 0-1
  449. ng = len(meta)
  450. v = np.ones(ng)
  451. while all(v == 1): # mutate until a change occurs (prevent duplicates)
  452. v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
  453. for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
  454. hyp[k] = float(x[i + 7] * v[i]) # mutate
  455. # Constrain to limits
  456. for k, v in meta.items():
  457. hyp[k] = max(hyp[k], v[1]) # lower limit
  458. hyp[k] = min(hyp[k], v[2]) # upper limit
  459. hyp[k] = round(hyp[k], 5) # significant digits
  460. # Train mutation
  461. results = train(hyp.copy(), opt, device)
  462. # Write mutation results
  463. print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
  464. # Plot results
  465. plot_evolution(yaml_file)
  466. print('Hyperparameter evolution complete. Best results saved as: %s\nCommand to train a new model with these '
  467. 'hyperparameters: $ python train.py --hyp %s' % (yaml_file, yaml_file))