From 490f1e7b9c46f1e3fd04fe52cd3025eab0844788 Mon Sep 17 00:00:00 2001 From: Alex Stoken Date: Tue, 16 Jun 2020 15:13:03 -0500 Subject: [PATCH] add save_dir arg to plot_lr_scheduler, default to current dir. Uncomment plot_lr_scheduler in train() and pass log_dir as save location --- train.py | 2 +- utils/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 4fa9005..df5e1ed 100644 --- a/train.py +++ b/train.py @@ -148,7 +148,7 @@ def train(hyp): scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) scheduler.last_epoch = start_epoch - 1 # do not move # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822 - # plot_lr_scheduler(optimizer, scheduler, epochs) + plot_lr_scheduler(optimizer, scheduler, epochs, save_dir = log_dir) # Initialize distributed training if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available(): diff --git a/utils/utils.py b/utils/utils.py index 95d1198..8ac73e3 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -1005,7 +1005,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max return mosaic -def plot_lr_scheduler(optimizer, scheduler, epochs=300): +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir='./'): # Plot LR simulating training for full epochs optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals y = []