浏览代码

Move hyp and opt yaml save to top of train()

Fixes bug where scaled values were saved in hyp.yaml, which would cause continuity issues with --resume
5.0
Alex Stoken GitHub 4 年前
父节点
当前提交
9d631408a2
找不到此签名对应的密钥 GPG 密钥 ID: 4AEE18F83AFDEB23
共有 1 个文件被更改,包括 6 次插入6 次删除
  1. +6
    -6
      train.py

+ 6
- 6
train.py 查看文件

@@ -52,6 +52,12 @@ def train(hyp):
best = wdir + 'best.pt'
results_file = log_dir + os.sep + 'results.txt'

# Save run settings
with open(Path(log_dir) / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(Path(log_dir) / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
epochs = opt.epochs # 300
batch_size = opt.batch_size # 64
weights = opt.weights # initial training weights
@@ -171,12 +177,6 @@ def train(hyp):
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
model.names = data_dict['names']

# Save run settings
with open(Path(log_dir) / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(Path(log_dir) / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)

# Class frequency
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes

正在加载...
取消
保存