|
|
@@ -266,7 +266,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary |
|
|
|
stopper = EarlyStopping(patience=opt.patience) |
|
|
|
compute_loss = ComputeLoss(model) # init loss class |
|
|
|
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' |
|
|
|
f'Using {train_loader.num_workers} dataloader workers\n' |
|
|
|
f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' |
|
|
|
f"Logging results to {colorstr('bold', save_dir)}\n" |
|
|
|
f'Starting training for {epochs} epochs...') |
|
|
|
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ |
|
|
@@ -460,7 +460,7 @@ def parse_opt(known=False): |
|
|
|
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') |
|
|
|
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') |
|
|
|
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') |
|
|
|
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') |
|
|
|
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') |
|
|
|
parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') |
|
|
|
parser.add_argument('--name', default='exp', help='save to project/name') |
|
|
|
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') |