Browse Source

EMA and non_blocking=True

5.0
Glenn Jocher 4 years ago
parent
commit
a1c8406af3
2 changed files with 3 additions and 3 deletions
  1. +1
    -1
      test.py
  2. +2
    -2
      train.py

+ 1
- 1
test.py View File

@@ -69,7 +69,7 @@ def test(data,
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device)
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)

+ 2
- 2
train.py View File

@@ -193,7 +193,7 @@ def train(hyp):
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)

# Exponential moving average
ema = torch_utils.ModelEMA(model, updates=start_epoch * nb / accumulate)
ema = torch_utils.ModelEMA(model)

# Start training
t0 = time.time()
@@ -223,7 +223,7 @@ def train(hyp):
pbar = tqdm(enumerate(dataloader), total=nb) # progress bar
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0

# Warmup
if ni <= nw:

Loading…
Cancel
Save