Add callbacks (#7315)
* Add `on_train_start()` callback * Update * Update
This commit is contained in:
parent
32661f75ac
commit
245d6459a9
4
train.py
4
train.py
|
|
@ -66,6 +66,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
|
||||||
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \
|
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \
|
||||||
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
|
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
|
||||||
opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
|
opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
|
||||||
|
callbacks.run('on_pretrain_routine_start')
|
||||||
|
|
||||||
# Directories
|
# Directories
|
||||||
w = save_dir / 'weights' # weights dir
|
w = save_dir / 'weights' # weights dir
|
||||||
|
|
@ -291,11 +292,13 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
|
||||||
scaler = amp.GradScaler(enabled=cuda)
|
scaler = amp.GradScaler(enabled=cuda)
|
||||||
stopper = EarlyStopping(patience=opt.patience)
|
stopper = EarlyStopping(patience=opt.patience)
|
||||||
compute_loss = ComputeLoss(model) # init loss class
|
compute_loss = ComputeLoss(model) # init loss class
|
||||||
|
callbacks.run('on_train_start')
|
||||||
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
|
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
|
||||||
f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
|
f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
|
||||||
f"Logging results to {colorstr('bold', save_dir)}\n"
|
f"Logging results to {colorstr('bold', save_dir)}\n"
|
||||||
f'Starting training for {epochs} epochs...')
|
f'Starting training for {epochs} epochs...')
|
||||||
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
|
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
|
||||||
|
callbacks.run('on_train_epoch_start')
|
||||||
model.train()
|
model.train()
|
||||||
|
|
||||||
# Update image weights (optional, single-GPU only)
|
# Update image weights (optional, single-GPU only)
|
||||||
|
|
@ -317,6 +320,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
|
||||||
pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
|
pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
|
||||||
optimizer.zero_grad()
|
optimizer.zero_grad()
|
||||||
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
|
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
|
||||||
|
callbacks.run('on_train_batch_start')
|
||||||
ni = i + nb * epoch # number integrated batches (since train start)
|
ni = i + nb * epoch # number integrated batches (since train start)
|
||||||
imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
|
imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -84,6 +84,10 @@ class Loggers():
|
||||||
else:
|
else:
|
||||||
self.wandb = None
|
self.wandb = None
|
||||||
|
|
||||||
|
def on_train_start(self):
|
||||||
|
# Callback runs on train start
|
||||||
|
pass
|
||||||
|
|
||||||
def on_pretrain_routine_end(self):
|
def on_pretrain_routine_end(self):
|
||||||
# Callback runs on pre-train routine end
|
# Callback runs on pre-train routine end
|
||||||
paths = self.save_dir.glob('*labels*.jpg') # training labels
|
paths = self.save_dir.glob('*labels*.jpg') # training labels
|
||||||
|
|
|
||||||
4
val.py
4
val.py
|
|
@ -188,8 +188,10 @@ def run(
|
||||||
dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
|
dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
|
||||||
loss = torch.zeros(3, device=device)
|
loss = torch.zeros(3, device=device)
|
||||||
jdict, stats, ap, ap_class = [], [], [], []
|
jdict, stats, ap, ap_class = [], [], [], []
|
||||||
|
callbacks.run('on_val_start')
|
||||||
pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
|
pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
|
||||||
for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
|
for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
|
||||||
|
callbacks.run('on_val_batch_start')
|
||||||
t1 = time_sync()
|
t1 = time_sync()
|
||||||
if cuda:
|
if cuda:
|
||||||
im = im.to(device, non_blocking=True)
|
im = im.to(device, non_blocking=True)
|
||||||
|
|
@ -260,6 +262,8 @@ def run(
|
||||||
f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
|
f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
|
||||||
Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start()
|
Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start()
|
||||||
|
|
||||||
|
callbacks.run('on_val_batch_end')
|
||||||
|
|
||||||
# Compute metrics
|
# Compute metrics
|
||||||
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
|
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
|
||||||
if len(stats) and stats[0].any():
|
if len(stats) and stats[0].any():
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue