* Allow config cahnge * Allow val change in wandb config * Don't resume transfer learning runs * Add entity in log datasetmodifyDataloader
@@ -89,6 +89,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary | |||
# W&B | |||
opt.hyp = hyp # add hyperparameters | |||
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None | |||
run_id = run_id if opt.resume else None # start fresh run if transfer learning | |||
wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) | |||
loggers['wandb'] = wandb_logger.wandb | |||
if loggers['wandb']: |
@@ -18,6 +18,8 @@ if __name__ == '__main__': | |||
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') | |||
parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') | |||
parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') | |||
parser.add_argument('--entity', default=None, help='W&B entity') | |||
opt = parser.parse_args() | |||
opt.resume = False # Explicitly disallow resume check for dataset upload job | |||
@@ -126,8 +126,7 @@ class WandbLogger(): | |||
if not opt.resume: | |||
wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict | |||
# Info useful for resuming from artifacts | |||
self.wandb_run.config.opt = vars(opt) | |||
self.wandb_run.config.data_dict = wandb_data_dict | |||
self.wandb_run.config.update({'opt': vars(opt), 'data_dict': data_dict}, allow_val_change=True) | |||
self.data_dict = self.setup_training(opt, data_dict) | |||
if self.job_type == 'Dataset Creation': | |||
self.data_dict = self.check_and_upload_dataset(opt) |