|
|
@@ -66,6 +66,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio |
|
|
|
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ |
|
|
|
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ |
|
|
|
opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze |
|
|
|
callbacks.run('on_pretrain_routine_start') |
|
|
|
|
|
|
|
# Directories |
|
|
|
w = save_dir / 'weights' # weights dir |
|
|
@@ -291,11 +292,13 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio |
|
|
|
scaler = amp.GradScaler(enabled=cuda) |
|
|
|
stopper = EarlyStopping(patience=opt.patience) |
|
|
|
compute_loss = ComputeLoss(model) # init loss class |
|
|
|
callbacks.run('on_train_start') |
|
|
|
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' |
|
|
|
f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' |
|
|
|
f"Logging results to {colorstr('bold', save_dir)}\n" |
|
|
|
f'Starting training for {epochs} epochs...') |
|
|
|
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ |
|
|
|
callbacks.run('on_train_epoch_start') |
|
|
|
model.train() |
|
|
|
|
|
|
|
# Update image weights (optional, single-GPU only) |
|
|
@@ -317,6 +320,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio |
|
|
|
pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar |
|
|
|
optimizer.zero_grad() |
|
|
|
for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- |
|
|
|
callbacks.run('on_train_batch_start') |
|
|
|
ni = i + nb * epoch # number integrated batches (since train start) |
|
|
|
imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 |
|
|
|
|