- import argparse
- import logging
- import math
- import os
- import random
- import shutil
- import time
- from pathlib import Path
-
- import numpy as np
- import torch.distributed as dist
- import torch.nn.functional as F
- import torch.optim as optim
- import torch.optim.lr_scheduler as lr_scheduler
- import torch.utils.data
- import yaml
- from torch.cuda import amp
- from torch.nn.parallel import DistributedDataParallel as DDP
- from torch.utils.tensorboard import SummaryWriter
- from tqdm import tqdm
-
- import test # import test.py to get mAP after each epoch
- from models.yolo import Model
- from utils.datasets import create_dataloader
- from utils.general import (
- torch_distributed_zero_first, labels_to_class_weights, plot_labels, check_anchors, labels_to_image_weights,
- compute_loss, plot_images, fitness, strip_optimizer, plot_results, get_latest_run, check_dataset, check_file,
- check_git_status, check_img_size, increment_dir, print_mutation, plot_evolution, set_logging)
- from utils.google_utils import attempt_download
- from utils.torch_utils import init_seeds, ModelEMA, select_device, intersect_dicts
-
- logger = logging.getLogger(__name__)
-
-
- def train(hyp, opt, device, tb_writer=None):
- logger.info(f'Hyperparameters {hyp}')
- log_dir = Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / 'evolve' # logging directory
- wdir = log_dir / 'weights' # weights directory
- os.makedirs(wdir, exist_ok=True)
- last = wdir / 'last.pt'
- best = wdir / 'best.pt'
- results_file = str(log_dir / 'results.txt')
- epochs, batch_size, total_batch_size, weights, rank = \
- opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
-
- # Save run settings
- with open(log_dir / 'hyp.yaml', 'w') as f:
- yaml.dump(hyp, f, sort_keys=False)
- with open(log_dir / 'opt.yaml', 'w') as f:
- yaml.dump(vars(opt), f, sort_keys=False)
-
- # Configure
- cuda = device.type != 'cpu'
- init_seeds(2 + rank)
- with open(opt.data) as f:
- data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict
- with torch_distributed_zero_first(rank):
- check_dataset(data_dict) # check
- train_path = data_dict['train']
- test_path = data_dict['val']
- nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names']) # number classes, names
- assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
-
- # Model
- pretrained = weights.endswith('.pt')
- if pretrained:
- with torch_distributed_zero_first(rank):
- attempt_download(weights) # download if not found locally
- ckpt = torch.load(weights, map_location=device) # load checkpoint
- # if hyp['anchors']:
- # ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor
- model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create
- exclude = ['anchor'] if opt.cfg else [] # exclude keys
- state_dict = ckpt['model'].float().state_dict() # to FP32
- state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
- model.load_state_dict(state_dict, strict=False) # load
- logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
- else:
- model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
-
- # Freeze
- freeze = ['', ] # parameter names to freeze (full or partial)
- if any(freeze):
- for k, v in model.named_parameters():
- if any(x in k for x in freeze):
- print('freezing %s' % k)
- v.requires_grad = False
-
- # Optimizer
- nbs = 64 # nominal batch size
- accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
- hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
-
- pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
- for k, v in model.named_parameters():
- v.requires_grad = True
- if '.bias' in k:
- pg2.append(v) # biases
- elif '.weight' in k and '.bn' not in k:
- pg1.append(v) # apply weight decay
- else:
- pg0.append(v) # all else
-
- if opt.adam:
- optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
- else:
- optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
-
- optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
- optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
- logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
- del pg0, pg1, pg2
-
- # Scheduler https://arxiv.org/pdf/1812.01187.pdf
- # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
- lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf'] # cosine
- scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
- # plot_lr_scheduler(optimizer, scheduler, epochs)
-
- # Resume
- start_epoch, best_fitness = 0, 0.0
- if pretrained:
- # Optimizer
- if ckpt['optimizer'] is not None:
- optimizer.load_state_dict(ckpt['optimizer'])
- best_fitness = ckpt['best_fitness']
-
- # Results
- if ckpt.get('training_results') is not None:
- with open(results_file, 'w') as file:
- file.write(ckpt['training_results']) # write results.txt
-
- # Epochs
- start_epoch = ckpt['epoch'] + 1
- if opt.resume:
- assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
- shutil.copytree(wdir, wdir.parent / f'weights_backup_epoch{start_epoch - 1}') # save previous weights
- if epochs < start_epoch:
- logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
- (weights, ckpt['epoch'], epochs))
- epochs += ckpt['epoch'] # finetune additional epochs
-
- del ckpt, state_dict
-
- # Image sizes
- gs = int(max(model.stride)) # grid size (max stride)
- imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
-
- # DP mode
- if cuda and rank == -1 and torch.cuda.device_count() > 1:
- model = torch.nn.DataParallel(model)
-
- # SyncBatchNorm
- if opt.sync_bn and cuda and rank != -1:
- model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
- logger.info('Using SyncBatchNorm()')
-
- # Exponential moving average
- ema = ModelEMA(model) if rank in [-1, 0] else None
-
- # DDP mode
- if cuda and rank != -1:
- model = DDP(model, device_ids=[opt.local_rank], output_device=(opt.local_rank))
-
- # Trainloader
- dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
- hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
- world_size=opt.world_size, workers=opt.workers)
- mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
- nb = len(dataloader) # number of batches
- assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
-
- # Testloader
- if rank in [-1, 0]:
- ema.updates = start_epoch * nb // accumulate # set EMA updates
- testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt,
- hyp=hyp, augment=False, cache=opt.cache_images, rect=True, rank=-1,
- world_size=opt.world_size, workers=opt.workers)[0] # only runs on process 0
-
- # Model parameters
- hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset
- model.nc = nc # attach number of classes to model
- model.hyp = hyp # attach hyperparameters to model
- model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
- model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
- model.names = names
-
- # Class frequency
- if rank in [-1, 0]:
- labels = np.concatenate(dataset.labels, 0)
- c = torch.tensor(labels[:, 0]) # classes
- # cf = torch.bincount(c.long(), minlength=nc) + 1.
- # model._initialize_biases(cf.to(device))
- plot_labels(labels, save_dir=log_dir)
- if tb_writer:
- # tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
- tb_writer.add_histogram('classes', c, 0)
-
- # Check anchors
- if not opt.noautoanchor:
- check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
-
- # Start training
- t0 = time.time()
- nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
- # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
- maps = np.zeros(nc) # mAP per class
- results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
- scheduler.last_epoch = start_epoch - 1 # do not move
- scaler = amp.GradScaler(enabled=cuda)
- logger.info('Image sizes %g train, %g test' % (imgsz, imgsz_test))
- logger.info('Using %g dataloader workers' % dataloader.num_workers)
- logger.info('Starting training for %g epochs...' % epochs)
- # torch.autograd.set_detect_anomaly(True)
- for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
- model.train()
-
- # Update image weights (optional)
- if opt.image_weights:
- # Generate indices
- if rank in [-1, 0]:
- cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
- iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
- dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
- # Broadcast if DDP
- if rank != -1:
- indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
- dist.broadcast(indices, 0)
- if rank != 0:
- dataset.indices = indices.cpu().numpy()
-
- # Update mosaic border
- # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
- # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
-
- mloss = torch.zeros(4, device=device) # mean losses
- if rank != -1:
- dataloader.sampler.set_epoch(epoch)
- pbar = enumerate(dataloader)
- logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))
- if rank in [-1, 0]:
- pbar = tqdm(pbar, total=nb) # progress bar
- optimizer.zero_grad()
- for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
- ni = i + nb * epoch # number integrated batches (since train start)
- imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
-
- # Warmup
- if ni <= nw:
- xi = [0, nw] # x interp
- # model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
- accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
- for j, x in enumerate(optimizer.param_groups):
- # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
- x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
- if 'momentum' in x:
- x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']])
-
- # Multi-scale
- if opt.multi_scale:
- sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
- sf = sz / max(imgs.shape[2:]) # scale factor
- if sf != 1:
- ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
- imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
-
- # Forward
- with amp.autocast(enabled=cuda):
- pred = model(imgs) # forward
- loss, loss_items = compute_loss(pred, targets.to(device), model) # loss scaled by batch_size
- if rank != -1:
- loss *= opt.world_size # gradient averaged between devices in DDP mode
-
- # Backward
- scaler.scale(loss).backward()
-
- # Optimize
- if ni % accumulate == 0:
- scaler.step(optimizer) # optimizer.step
- scaler.update()
- optimizer.zero_grad()
- if ema:
- ema.update(model)
-
- # Print
- if rank in [-1, 0]:
- mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
- mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
- s = ('%10s' * 2 + '%10.4g' * 6) % (
- '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
- pbar.set_description(s)
-
- # Plot
- if ni < 3:
- f = str(log_dir / ('train_batch%g.jpg' % ni)) # filename
- result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
- if tb_writer and result is not None:
- tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
- # tb_writer.add_graph(model, imgs) # add model to tensorboard
-
- # end batch ------------------------------------------------------------------------------------------------
-
- # Scheduler
- lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
- scheduler.step()
-
- # DDP process 0 or single-GPU
- if rank in [-1, 0]:
- # mAP
- if ema:
- ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride'])
- final_epoch = epoch + 1 == epochs
- if not opt.notest or final_epoch: # Calculate mAP
- results, maps, times = test.test(opt.data,
- batch_size=total_batch_size,
- imgsz=imgsz_test,
- model=ema.ema,
- single_cls=opt.single_cls,
- dataloader=testloader,
- save_dir=log_dir)
-
- # Write
- with open(results_file, 'a') as f:
- f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
- if len(opt.name) and opt.bucket:
- os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
-
- # Tensorboard
- if tb_writer:
- tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss', # train loss
- 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
- 'val/giou_loss', 'val/obj_loss', 'val/cls_loss', # val loss
- 'x/lr0', 'x/lr1', 'x/lr2'] # params
- for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
- tb_writer.add_scalar(tag, x, epoch)
-
- # Update best mAP
- fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1]
- if fi > best_fitness:
- best_fitness = fi
-
- # Save model
- save = (not opt.nosave) or (final_epoch and not opt.evolve)
- if save:
- with open(results_file, 'r') as f: # create checkpoint
- ckpt = {'epoch': epoch,
- 'best_fitness': best_fitness,
- 'training_results': f.read(),
- 'model': ema.ema,
- 'optimizer': None if final_epoch else optimizer.state_dict()}
-
- # Save last, best and delete
- torch.save(ckpt, last)
- if best_fitness == fi:
- torch.save(ckpt, best)
- del ckpt
- # end epoch ----------------------------------------------------------------------------------------------------
- # end training
-
- if rank in [-1, 0]:
- # Strip optimizers
- n = ('_' if len(opt.name) and not opt.name.isnumeric() else '') + opt.name
- fresults, flast, fbest = 'results%s.txt' % n, wdir / f'last{n}.pt', wdir / f'best{n}.pt'
- for f1, f2 in zip([wdir / 'last.pt', wdir / 'best.pt', 'results.txt'], [flast, fbest, fresults]):
- if os.path.exists(f1):
- os.rename(f1, f2) # rename
- if str(f2).endswith('.pt'): # is *.pt
- strip_optimizer(f2) # strip optimizer
- os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket else None # upload
- # Finish
- if not opt.evolve:
- plot_results(save_dir=log_dir) # save as results.png
- logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
-
- dist.destroy_process_group() if rank not in [-1, 0] else None
- torch.cuda.empty_cache()
- return results
-
-
- if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
- parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
- parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
- parser.add_argument('--hyp', type=str, default='', help='hyperparameters path, i.e. data/hyp.scratch.yaml')
- parser.add_argument('--epochs', type=int, default=300)
- parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
- parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
- parser.add_argument('--rect', action='store_true', help='rectangular training')
- parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
- parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
- parser.add_argument('--notest', action='store_true', help='only test final epoch')
- parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
- parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
- parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
- parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
- parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
- parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
- parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
- parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
- parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
- parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
- parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
- parser.add_argument('--logdir', type=str, default='runs/', help='logging directory')
- parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
- opt = parser.parse_args()
-
- # Set DDP variables
- opt.total_batch_size = opt.batch_size
- opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
- opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
- set_logging(opt.global_rank)
- if opt.global_rank in [-1, 0]:
- check_git_status()
-
- # Resume
- if opt.resume: # resume an interrupted run
- ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
- log_dir = Path(ckpt).parent.parent # runs/exp0
- assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
- with open(log_dir / 'opt.yaml') as f:
- opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
- opt.cfg, opt.weights, opt.resume = '', ckpt, True
- logger.info('Resuming training from %s' % ckpt)
-
- else:
- opt.hyp = opt.hyp or ('data/hyp.finetune.yaml' if opt.weights else 'data/hyp.scratch.yaml')
- opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
- assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
- opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
- log_dir = increment_dir(Path(opt.logdir) / 'exp', opt.name) # runs/exp1
-
- device = select_device(opt.device, batch_size=opt.batch_size)
-
- # DDP mode
- if opt.local_rank != -1:
- assert torch.cuda.device_count() > opt.local_rank
- torch.cuda.set_device(opt.local_rank)
- device = torch.device('cuda', opt.local_rank)
- dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
- assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
- opt.batch_size = opt.total_batch_size // opt.world_size
-
- logger.info(opt)
- with open(opt.hyp) as f:
- hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps
-
- # Train
- if not opt.evolve:
- tb_writer = None
- if opt.global_rank in [-1, 0]:
- logger.info('Start Tensorboard with "tensorboard --logdir %s", view at http://localhost:6006/' % opt.logdir)
- tb_writer = SummaryWriter(log_dir=log_dir) # runs/exp0
-
- train(hyp, opt, device, tb_writer)
-
- # Evolve hyperparameters (optional)
- else:
- # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
- meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
- 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
- 'momentum': (0.1, 0.6, 0.98), # SGD momentum/Adam beta1
- 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
- 'giou': (1, 0.02, 0.2), # GIoU loss gain
- 'cls': (1, 0.2, 4.0), # cls loss gain
- 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
- 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
- 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
- 'iou_t': (0, 0.1, 0.7), # IoU training threshold
- 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
- # 'anchors': (1, 2.0, 10.0), # anchors per output grid (0 to ignore)
- 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
- 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
- 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
- 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
- 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
- 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
- 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
- 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
- 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
- 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
- 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
- 'mixup': (1, 0.0, 1.0)} # image mixup (probability)
-
- assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
- opt.notest, opt.nosave = True, True # only test/save final epoch
- # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
- yaml_file = Path('runs/evolve/hyp_evolved.yaml') # save best result here
- if opt.bucket:
- os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
-
- for _ in range(100): # generations to evolve
- if os.path.exists('evolve.txt'): # if evolve.txt exists: select best hyps and mutate
- # Select parent(s)
- parent = 'single' # parent selection method: 'single' or 'weighted'
- x = np.loadtxt('evolve.txt', ndmin=2)
- n = min(5, len(x)) # number of previous results to consider
- x = x[np.argsort(-fitness(x))][:n] # top n mutations
- w = fitness(x) - fitness(x).min() # weights
- if parent == 'single' or len(x) == 1:
- # x = x[random.randint(0, n - 1)] # random selection
- x = x[random.choices(range(n), weights=w)[0]] # weighted selection
- elif parent == 'weighted':
- x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
-
- # Mutate
- mp, s = 0.9, 0.2 # mutation probability, sigma
- npr = np.random
- npr.seed(int(time.time()))
- g = np.array([x[0] for x in meta.values()]) # gains 0-1
- ng = len(meta)
- v = np.ones(ng)
- while all(v == 1): # mutate until a change occurs (prevent duplicates)
- v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
- for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
- hyp[k] = float(x[i + 7] * v[i]) # mutate
-
- # Constrain to limits
- for k, v in meta.items():
- hyp[k] = max(hyp[k], v[1]) # lower limit
- hyp[k] = min(hyp[k], v[2]) # upper limit
- hyp[k] = round(hyp[k], 5) # significant digits
-
- # Train mutation
- results = train(hyp.copy(), opt, device)
-
- # Write mutation results
- print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
-
- # Plot results
- plot_evolution(yaml_file)
- print('Hyperparameter evolution complete. Best results saved as: %s\nCommand to train a new model with these '
- 'hyperparameters: $ python train.py --hyp %s' % (yaml_file, yaml_file))
|