Browse Source

Move hyp and opt yaml save to top of train()

Fixes bug where scaled values were saved in hyp.yaml, which would cause continuity issues with --resume
5.0
Alex Stoken GitHub 4 years ago
parent
commit
9d631408a2
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 6 additions and 6 deletions
  1. +6
    -6
      train.py

+ 6
- 6
train.py View File

best = wdir + 'best.pt' best = wdir + 'best.pt'
results_file = log_dir + os.sep + 'results.txt' results_file = log_dir + os.sep + 'results.txt'


# Save run settings
with open(Path(log_dir) / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(Path(log_dir) / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
epochs = opt.epochs # 300 epochs = opt.epochs # 300
batch_size = opt.batch_size # 64 batch_size = opt.batch_size # 64
weights = opt.weights # initial training weights weights = opt.weights # initial training weights
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
model.names = data_dict['names'] model.names = data_dict['names']


# Save run settings
with open(Path(log_dir) / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(Path(log_dir) / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)

# Class frequency # Class frequency
labels = np.concatenate(dataset.labels, 0) labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes c = torch.tensor(labels[:, 0]) # classes

Loading…
Cancel
Save