DDP after autoanchor reorder (#2421)
This commit is contained in:
parent
d5ca8ca34e
commit
886f1c03d8
8
train.py
8
train.py
|
|
@ -181,10 +181,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
|
||||||
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
|
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
|
||||||
logger.info('Using SyncBatchNorm()')
|
logger.info('Using SyncBatchNorm()')
|
||||||
|
|
||||||
# DDP mode
|
|
||||||
if cuda and rank != -1:
|
|
||||||
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
|
|
||||||
|
|
||||||
# Trainloader
|
# Trainloader
|
||||||
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
|
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
|
||||||
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
|
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
|
||||||
|
|
@ -216,6 +212,10 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
|
||||||
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
|
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
|
||||||
model.half().float() # pre-reduce anchor precision
|
model.half().float() # pre-reduce anchor precision
|
||||||
|
|
||||||
|
# DDP mode
|
||||||
|
if cuda and rank != -1:
|
||||||
|
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
|
||||||
|
|
||||||
# Model parameters
|
# Model parameters
|
||||||
hyp['box'] *= 3. / nl # scale to layers
|
hyp['box'] *= 3. / nl # scale to layers
|
||||||
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
|
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue