選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

520 行
26KB

  1. import argparse
  2. import torch.distributed as dist
  3. import torch.nn.functional as F
  4. import torch.optim as optim
  5. import torch.optim.lr_scheduler as lr_scheduler
  6. import torch.utils.data
  7. from torch.nn.parallel import DistributedDataParallel as DDP
  8. from torch.utils.tensorboard import SummaryWriter
  9. import test # import test.py to get mAP after each epoch
  10. from models.yolo import Model
  11. from utils import google_utils
  12. from utils.datasets import *
  13. from utils.utils import *
  14. mixed_precision = True
  15. try: # Mixed precision training https://github.com/NVIDIA/apex
  16. from apex import amp
  17. except:
  18. print('Apex recommended for faster mixed precision training: https://github.com/NVIDIA/apex')
  19. mixed_precision = False # not installed
  20. # Hyperparameters
  21. hyp = {'optimizer': 'SGD', # ['adam', 'SGD', None] if none, default is SGD
  22. 'lr0': 0.01, # initial learning rate (SGD=1E-2, Adam=1E-3)
  23. 'momentum': 0.937, # SGD momentum/Adam beta1
  24. 'weight_decay': 5e-4, # optimizer weight decay
  25. 'giou': 0.05, # giou loss gain
  26. 'cls': 0.5, # cls loss gain
  27. 'cls_pw': 1.0, # cls BCELoss positive_weight
  28. 'obj': 1.0, # obj loss gain (*=img_size/320 if img_size != 320)
  29. 'obj_pw': 1.0, # obj BCELoss positive_weight
  30. 'iou_t': 0.20, # iou training threshold
  31. 'anchor_t': 4.0, # anchor-multiple threshold
  32. 'fl_gamma': 0.0, # focal loss gamma (efficientDet default is gamma=1.5)
  33. 'hsv_h': 0.015, # image HSV-Hue augmentation (fraction)
  34. 'hsv_s': 0.7, # image HSV-Saturation augmentation (fraction)
  35. 'hsv_v': 0.4, # image HSV-Value augmentation (fraction)
  36. 'degrees': 0.0, # image rotation (+/- deg)
  37. 'translate': 0.0, # image translation (+/- fraction)
  38. 'scale': 0.5, # image scale (+/- gain)
  39. 'shear': 0.0} # image shear (+/- deg)
  40. def train(hyp, tb_writer, opt, device):
  41. print(f'Hyperparameters {hyp}')
  42. log_dir = tb_writer.log_dir if tb_writer else 'runs/evolution' # run directory
  43. wdir = str(Path(log_dir) / 'weights') + os.sep # weights directory
  44. os.makedirs(wdir, exist_ok=True)
  45. last = wdir + 'last.pt'
  46. best = wdir + 'best.pt'
  47. results_file = log_dir + os.sep + 'results.txt'
  48. epochs, batch_size, total_batch_size, weights, rank = \
  49. opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.local_rank
  50. # TODO: Init DDP logging. Only the first process is allowed to log.
  51. # Since I see lots of print here, the logging configuration is skipped here. We may see repeated outputs.
  52. # Save run settings
  53. with open(Path(log_dir) / 'hyp.yaml', 'w') as f:
  54. yaml.dump(hyp, f, sort_keys=False)
  55. with open(Path(log_dir) / 'opt.yaml', 'w') as f:
  56. yaml.dump(vars(opt), f, sort_keys=False)
  57. # Configure
  58. init_seeds(2 + rank)
  59. with open(opt.data) as f:
  60. data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
  61. train_path = data_dict['train']
  62. test_path = data_dict['val']
  63. nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names']) # number classes, names
  64. assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
  65. # Remove previous results
  66. if rank in [-1, 0]:
  67. for f in glob.glob('*_batch*.jpg') + glob.glob(results_file):
  68. os.remove(f)
  69. # Create model
  70. model = Model(opt.cfg, nc=nc).to(device)
  71. # Image sizes
  72. gs = int(max(model.stride)) # grid size (max stride)
  73. imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
  74. # Optimizer
  75. nbs = 64 # nominal batch size
  76. # default DDP implementation is slow for accumulation according to: https://pytorch.org/docs/stable/notes/ddp.html
  77. # all-reduce operation is carried out during loss.backward().
  78. # Thus, there would be redundant all-reduce communications in a accumulation procedure,
  79. # which means, the result is still right but the training speed gets slower.
  80. # TODO: If acceleration is needed, there is an implementation of allreduce_post_accumulation
  81. # in https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/run_pretraining.py
  82. accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
  83. hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
  84. pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
  85. for k, v in model.named_parameters():
  86. if v.requires_grad:
  87. if '.bias' in k:
  88. pg2.append(v) # biases
  89. elif '.weight' in k and '.bn' not in k:
  90. pg1.append(v) # apply weight decay
  91. else:
  92. pg0.append(v) # all else
  93. if hyp['optimizer'] == 'adam': # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
  94. optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
  95. else:
  96. optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
  97. optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
  98. optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
  99. print('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
  100. del pg0, pg1, pg2
  101. # Scheduler https://arxiv.org/pdf/1812.01187.pdf
  102. lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.8 + 0.2 # cosine
  103. scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
  104. # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822
  105. # plot_lr_scheduler(optimizer, scheduler, epochs)
  106. # Load Model
  107. with torch_distributed_zero_first(rank):
  108. google_utils.attempt_download(weights)
  109. start_epoch, best_fitness = 0, 0.0
  110. if weights.endswith('.pt'): # pytorch format
  111. ckpt = torch.load(weights, map_location=device) # load checkpoint
  112. # load model
  113. try:
  114. exclude = ['anchor'] # exclude keys
  115. ckpt['model'] = {k: v for k, v in ckpt['model'].float().state_dict().items()
  116. if k in model.state_dict() and not any(x in k for x in exclude)
  117. and model.state_dict()[k].shape == v.shape}
  118. model.load_state_dict(ckpt['model'], strict=False)
  119. print('Transferred %g/%g items from %s' % (len(ckpt['model']), len(model.state_dict()), weights))
  120. except KeyError as e:
  121. s = "%s is not compatible with %s. This may be due to model differences or %s may be out of date. " \
  122. "Please delete or update %s and try again, or use --weights '' to train from scratch." \
  123. % (weights, opt.cfg, weights, weights)
  124. raise KeyError(s) from e
  125. # load optimizer
  126. if ckpt['optimizer'] is not None:
  127. optimizer.load_state_dict(ckpt['optimizer'])
  128. best_fitness = ckpt['best_fitness']
  129. # load results
  130. if ckpt.get('training_results') is not None:
  131. with open(results_file, 'w') as file:
  132. file.write(ckpt['training_results']) # write results.txt
  133. # epochs
  134. start_epoch = ckpt['epoch'] + 1
  135. if epochs < start_epoch:
  136. print('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
  137. (weights, ckpt['epoch'], epochs))
  138. epochs += ckpt['epoch'] # finetune additional epochs
  139. del ckpt
  140. # Mixed precision training https://github.com/NVIDIA/apex
  141. if mixed_precision:
  142. model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)
  143. # DP mode
  144. if device.type != 'cpu' and rank == -1 and torch.cuda.device_count() > 1:
  145. model = torch.nn.DataParallel(model)
  146. # SyncBatchNorm
  147. if opt.sync_bn and device.type != 'cpu' and rank != -1:
  148. model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
  149. print('Using SyncBatchNorm()')
  150. # Exponential moving average
  151. ema = torch_utils.ModelEMA(model) if rank in [-1, 0] else None
  152. # DDP mode
  153. if device.type != 'cpu' and rank != -1:
  154. model = DDP(model, device_ids=[rank], output_device=rank)
  155. # Trainloader
  156. dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True,
  157. cache=opt.cache_images, rect=opt.rect, local_rank=rank,
  158. world_size=opt.world_size)
  159. mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
  160. nb = len(dataloader) # number of batches
  161. assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
  162. # Testloader
  163. if rank in [-1, 0]:
  164. # local_rank is set to -1. Because only the first process is expected to do evaluation.
  165. testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, hyp=hyp, augment=False,
  166. cache=opt.cache_images, rect=True, local_rank=-1, world_size=opt.world_size)[0]
  167. # Model parameters
  168. hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset
  169. model.nc = nc # attach number of classes to model
  170. model.hyp = hyp # attach hyperparameters to model
  171. model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
  172. model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
  173. model.names = names
  174. # Class frequency
  175. if rank in [-1, 0]:
  176. labels = np.concatenate(dataset.labels, 0)
  177. c = torch.tensor(labels[:, 0]) # classes
  178. # cf = torch.bincount(c.long(), minlength=nc) + 1.
  179. # model._initialize_biases(cf.to(device))
  180. plot_labels(labels, save_dir=log_dir)
  181. if tb_writer:
  182. # tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
  183. tb_writer.add_histogram('classes', c, 0)
  184. # Check anchors
  185. if not opt.noautoanchor:
  186. check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
  187. # Start training
  188. t0 = time.time()
  189. nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
  190. maps = np.zeros(nc) # mAP per class
  191. results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
  192. scheduler.last_epoch = start_epoch - 1 # do not move
  193. if rank in [0, -1]:
  194. print('Image sizes %g train, %g test' % (imgsz, imgsz_test))
  195. print('Using %g dataloader workers' % dataloader.num_workers)
  196. print('Starting training for %g epochs...' % epochs)
  197. # torch.autograd.set_detect_anomaly(True)
  198. for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
  199. model.train()
  200. # Update image weights (optional)
  201. # When in DDP mode, the generated indices will be broadcasted to synchronize dataset.
  202. if dataset.image_weights:
  203. # Generate indices.
  204. if rank in [-1, 0]:
  205. w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
  206. image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w)
  207. dataset.indices = random.choices(range(dataset.n), weights=image_weights,
  208. k=dataset.n) # rand weighted idx
  209. # Broadcast.
  210. if rank != -1:
  211. indices = torch.zeros([dataset.n], dtype=torch.int)
  212. if rank == 0:
  213. indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int)
  214. dist.broadcast(indices, 0)
  215. if rank != 0:
  216. dataset.indices = indices.cpu().numpy()
  217. # Update mosaic border
  218. # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
  219. # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
  220. mloss = torch.zeros(4, device=device) # mean losses
  221. if rank != -1:
  222. dataloader.sampler.set_epoch(epoch)
  223. pbar = enumerate(dataloader)
  224. if rank in [-1, 0]:
  225. print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))
  226. pbar = tqdm(pbar, total=nb) # progress bar
  227. optimizer.zero_grad()
  228. for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
  229. ni = i + nb * epoch # number integrated batches (since train start)
  230. imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
  231. # Warmup
  232. if ni <= nw:
  233. xi = [0, nw] # x interp
  234. # model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
  235. accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
  236. for j, x in enumerate(optimizer.param_groups):
  237. # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
  238. x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
  239. if 'momentum' in x:
  240. x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']])
  241. # Multi-scale
  242. if opt.multi_scale:
  243. sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
  244. sf = sz / max(imgs.shape[2:]) # scale factor
  245. if sf != 1:
  246. ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
  247. imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
  248. # Forward
  249. pred = model(imgs)
  250. # Loss
  251. loss, loss_items = compute_loss(pred, targets.to(device), model) # scaled by batch_size
  252. if rank != -1:
  253. loss *= opt.world_size # gradient averaged between devices in DDP mode
  254. if not torch.isfinite(loss):
  255. print('WARNING: non-finite loss, ending training ', loss_items)
  256. return results
  257. # Backward
  258. if mixed_precision:
  259. with amp.scale_loss(loss, optimizer) as scaled_loss:
  260. scaled_loss.backward()
  261. else:
  262. loss.backward()
  263. # Optimize
  264. if ni % accumulate == 0:
  265. optimizer.step()
  266. optimizer.zero_grad()
  267. if ema is not None:
  268. ema.update(model)
  269. # Print
  270. if rank in [-1, 0]:
  271. mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
  272. mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB)
  273. s = ('%10s' * 2 + '%10.4g' * 6) % (
  274. '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
  275. pbar.set_description(s)
  276. # Plot
  277. if ni < 3:
  278. f = str(Path(log_dir) / ('train_batch%g.jpg' % ni)) # filename
  279. result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
  280. if tb_writer and result is not None:
  281. tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
  282. # tb_writer.add_graph(model, imgs) # add model to tensorboard
  283. # end batch ------------------------------------------------------------------------------------------------
  284. # Scheduler
  285. scheduler.step()
  286. # Only the first process in DDP mode is allowed to log or save checkpoints.
  287. if rank in [-1, 0]:
  288. # mAP
  289. if ema is not None:
  290. ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride'])
  291. final_epoch = epoch + 1 == epochs
  292. if not opt.notest or final_epoch: # Calculate mAP
  293. results, maps, times = test.test(opt.data,
  294. batch_size=total_batch_size,
  295. imgsz=imgsz_test,
  296. save_json=final_epoch and opt.data.endswith(os.sep + 'coco.yaml'),
  297. model=ema.ema.module if hasattr(ema.ema, 'module') else ema.ema,
  298. single_cls=opt.single_cls,
  299. dataloader=testloader,
  300. save_dir=log_dir)
  301. # Write
  302. with open(results_file, 'a') as f:
  303. f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
  304. if len(opt.name) and opt.bucket:
  305. os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
  306. # Tensorboard
  307. if tb_writer:
  308. tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss',
  309. 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
  310. 'val/giou_loss', 'val/obj_loss', 'val/cls_loss']
  311. for x, tag in zip(list(mloss[:-1]) + list(results), tags):
  312. tb_writer.add_scalar(tag, x, epoch)
  313. # Update best mAP
  314. fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1]
  315. if fi > best_fitness:
  316. best_fitness = fi
  317. # Save model
  318. save = (not opt.nosave) or (final_epoch and not opt.evolve)
  319. if save:
  320. with open(results_file, 'r') as f: # create checkpoint
  321. ckpt = {'epoch': epoch,
  322. 'best_fitness': best_fitness,
  323. 'training_results': f.read(),
  324. 'model': ema.ema.module if hasattr(ema, 'module') else ema.ema,
  325. 'optimizer': None if final_epoch else optimizer.state_dict()}
  326. # Save last, best and delete
  327. torch.save(ckpt, last)
  328. if best_fitness == fi:
  329. torch.save(ckpt, best)
  330. del ckpt
  331. # end epoch ----------------------------------------------------------------------------------------------------
  332. # end training
  333. if rank in [-1, 0]:
  334. # Strip optimizers
  335. n = ('_' if len(opt.name) and not opt.name.isnumeric() else '') + opt.name
  336. fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n
  337. for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]):
  338. if os.path.exists(f1):
  339. os.rename(f1, f2) # rename
  340. ispt = f2.endswith('.pt') # is *.pt
  341. strip_optimizer(f2) if ispt else None # strip optimizer
  342. os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket and ispt else None # upload
  343. # Finish
  344. if not opt.evolve:
  345. plot_results(save_dir=log_dir) # save as results.png
  346. print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
  347. dist.destroy_process_group() if rank not in [-1, 0] else None
  348. torch.cuda.empty_cache()
  349. return results
  350. if __name__ == '__main__':
  351. parser = argparse.ArgumentParser()
  352. parser.add_argument('--cfg', type=str, default='models/yolov5s.yaml', help='model.yaml path')
  353. parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
  354. parser.add_argument('--hyp', type=str, default='', help='hyp.yaml path (optional)')
  355. parser.add_argument('--epochs', type=int, default=300)
  356. parser.add_argument('--batch-size', type=int, default=16, help="Total batch size for all gpus.")
  357. parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='train,test sizes')
  358. parser.add_argument('--rect', action='store_true', help='rectangular training')
  359. parser.add_argument('--resume', nargs='?', const='get_last', default=False,
  360. help='resume from given path/to/last.pt, or most recent run if blank.')
  361. parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
  362. parser.add_argument('--notest', action='store_true', help='only test final epoch')
  363. parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
  364. parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
  365. parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
  366. parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
  367. parser.add_argument('--weights', type=str, default='', help='initial weights path')
  368. parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')
  369. parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
  370. parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
  371. parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
  372. parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
  373. parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
  374. opt = parser.parse_args()
  375. last = get_latest_run() if opt.resume == 'get_last' else opt.resume # resume from most recent run
  376. if last and not opt.weights:
  377. print(f'Resuming training from {last}')
  378. opt.weights = last if opt.resume and not opt.weights else opt.weights
  379. if opt.local_rank in [-1, 0]:
  380. check_git_status()
  381. opt.cfg = check_file(opt.cfg) # check file
  382. opt.data = check_file(opt.data) # check file
  383. if opt.hyp: # update hyps
  384. opt.hyp = check_file(opt.hyp) # check file
  385. with open(opt.hyp) as f:
  386. hyp.update(yaml.load(f, Loader=yaml.FullLoader)) # update hyps
  387. opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
  388. device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size)
  389. opt.total_batch_size = opt.batch_size
  390. opt.world_size = 1
  391. if device.type == 'cpu':
  392. mixed_precision = False
  393. elif opt.local_rank != -1:
  394. # DDP mode
  395. assert torch.cuda.device_count() > opt.local_rank
  396. torch.cuda.set_device(opt.local_rank)
  397. device = torch.device("cuda", opt.local_rank)
  398. dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
  399. opt.world_size = dist.get_world_size()
  400. assert opt.batch_size % opt.world_size == 0, "Batch size is not a multiple of the number of devices given!"
  401. opt.batch_size = opt.total_batch_size // opt.world_size
  402. print(opt)
  403. # Train
  404. if not opt.evolve:
  405. if opt.local_rank in [-1, 0]:
  406. print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
  407. tb_writer = SummaryWriter(log_dir=increment_dir('runs/exp', opt.name))
  408. else:
  409. tb_writer = None
  410. train(hyp, tb_writer, opt, device)
  411. # Evolve hyperparameters (optional)
  412. else:
  413. assert opt.local_rank == -1, "DDP mode currently not implemented for Evolve!"
  414. tb_writer = None
  415. opt.notest, opt.nosave = True, True # only test/save final epoch
  416. if opt.bucket:
  417. os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
  418. for _ in range(10): # generations to evolve
  419. if os.path.exists('evolve.txt'): # if evolve.txt exists: select best hyps and mutate
  420. # Select parent(s)
  421. parent = 'single' # parent selection method: 'single' or 'weighted'
  422. x = np.loadtxt('evolve.txt', ndmin=2)
  423. n = min(5, len(x)) # number of previous results to consider
  424. x = x[np.argsort(-fitness(x))][:n] # top n mutations
  425. w = fitness(x) - fitness(x).min() # weights
  426. if parent == 'single' or len(x) == 1:
  427. # x = x[random.randint(0, n - 1)] # random selection
  428. x = x[random.choices(range(n), weights=w)[0]] # weighted selection
  429. elif parent == 'weighted':
  430. x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
  431. # Mutate
  432. mp, s = 0.9, 0.2 # mutation probability, sigma
  433. npr = np.random
  434. npr.seed(int(time.time()))
  435. g = np.array([1, 1, 1, 1, 1, 1, 1, 0, .1, 1, 0, 1, 1, 1, 1, 1, 1, 1]) # gains
  436. ng = len(g)
  437. v = np.ones(ng)
  438. while all(v == 1): # mutate until a change occurs (prevent duplicates)
  439. v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
  440. for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
  441. hyp[k] = x[i + 7] * v[i] # mutate
  442. # Clip to limits
  443. keys = ['lr0', 'iou_t', 'momentum', 'weight_decay', 'hsv_s', 'hsv_v', 'translate', 'scale', 'fl_gamma']
  444. limits = [(1e-5, 1e-2), (0.00, 0.70), (0.60, 0.98), (0, 0.001), (0, .9), (0, .9), (0, .9), (0, .9), (0, 3)]
  445. for k, v in zip(keys, limits):
  446. hyp[k] = np.clip(hyp[k], v[0], v[1])
  447. # Train mutation
  448. results = train(hyp.copy(), tb_writer, opt, device)
  449. # Write mutation results
  450. print_mutation(hyp, results, opt.bucket)
  451. # Plot results
  452. # plot_evolution_results(hyp)