|
|
@@ -60,7 +60,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary |
|
|
|
device, |
|
|
|
callbacks |
|
|
|
): |
|
|
|
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \ |
|
|
|
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ |
|
|
|
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ |
|
|
|
opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze |
|
|
|
|
|
|
@@ -124,7 +124,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary |
|
|
|
model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create |
|
|
|
|
|
|
|
# Freeze |
|
|
|
freeze = [f'model.{x}.' for x in (freeze if isinstance(freeze, list) else range(freeze))] # layers to freeze |
|
|
|
freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze |
|
|
|
for k, v in model.named_parameters(): |
|
|
|
v.requires_grad = True # train all layers |
|
|
|
if any(x in k for x in freeze): |
|
|
@@ -469,7 +469,7 @@ def parse_opt(known=False): |
|
|
|
parser.add_argument('--linear-lr', action='store_true', help='linear LR') |
|
|
|
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') |
|
|
|
parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') |
|
|
|
parser.add_argument('--freeze', nargs='+', type=int, default=0, help='Freeze layers: backbone=10, first3=0 1 2') |
|
|
|
parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') |
|
|
|
parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') |
|
|
|
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') |
|
|
|
|