From 3a56cac41455308b4a3441043b4c14805a502e2e Mon Sep 17 00:00:00 2001 From: NanoCode012 Date: Fri, 15 Jan 2021 00:53:13 +0700 Subject: [PATCH] Fix batch-size on resume for multi-gpu (#1942) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 459c81c..9aff01a 100644 --- a/train.py +++ b/train.py @@ -477,7 +477,7 @@ if __name__ == '__main__': apriori = opt.global_rank, opt.local_rank with open(Path(ckpt).parent.parent / 'opt.yaml') as f: opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace - opt.cfg, opt.weights, opt.resume, opt.global_rank, opt.local_rank = '', ckpt, True, *apriori # reinstate + opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate logger.info('Resuming training from %s' % ckpt) else: # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')