Uncomment plot_lr_scheduler in train() and pass log_dir as save location5.0
@@ -148,7 +148,7 @@ def train(hyp): | |||
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) | |||
scheduler.last_epoch = start_epoch - 1 # do not move | |||
# https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822 | |||
# plot_lr_scheduler(optimizer, scheduler, epochs) | |||
plot_lr_scheduler(optimizer, scheduler, epochs, save_dir = log_dir) | |||
# Initialize distributed training | |||
if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available(): |
@@ -1005,7 +1005,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max | |||
return mosaic | |||
def plot_lr_scheduler(optimizer, scheduler, epochs=300): | |||
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir='./'): | |||
# Plot LR simulating training for full epochs | |||
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals | |||
y = [] |