Browse Source

add save_dir arg to plot_lr_scheduler, default to current dir.

Uncomment plot_lr_scheduler in train() and pass log_dir as save location
5.0
Alex Stoken 4 years ago
parent
commit
490f1e7b9c
2 changed files with 2 additions and 2 deletions
  1. +1
    -1
      train.py
  2. +1
    -1
      utils/utils.py

+ 1
- 1
train.py View File

@@ -148,7 +148,7 @@ def train(hyp):
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
scheduler.last_epoch = start_epoch - 1 # do not move
# https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822
# plot_lr_scheduler(optimizer, scheduler, epochs)
plot_lr_scheduler(optimizer, scheduler, epochs, save_dir = log_dir)

# Initialize distributed training
if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available():

+ 1
- 1
utils/utils.py View File

@@ -1005,7 +1005,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max
return mosaic


def plot_lr_scheduler(optimizer, scheduler, epochs=300):
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir='./'):
# Plot LR simulating training for full epochs
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
y = []

Loading…
Cancel
Save