* Update torch_utils.py * Additional code refactoring * tuples to sets * CleanupmodifyDataloader
if save_txt: # Write to file | if save_txt: # Write to file | ||||
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh | xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh | ||||
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format | line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format | ||||
with open(txt_path + '.txt', 'a') as f: | |||||
with open(f'{txt_path}.txt', 'a') as f: | |||||
f.write(('%g ' * len(line)).rstrip() % line + '\n') | f.write(('%g ' * len(line)).rstrip() % line + '\n') | ||||
if save_img or save_crop or view_img: # Add bbox to image | if save_img or save_crop or view_img: # Add bbox to image | ||||
c = int(cls) # integer class | c = int(cls) # integer class | ||||
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') | label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') | ||||
annotator.box_label(xyxy, label, color=colors(c, True)) | annotator.box_label(xyxy, label, color=colors(c, True)) | ||||
if save_crop: | |||||
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) | |||||
if save_crop: | |||||
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) | |||||
# Stream results | # Stream results | ||||
im0 = annotator.result() | im0 = annotator.result() |
import openvino.inference_engine as ie | import openvino.inference_engine as ie | ||||
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') | LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') | ||||
f = str(file).replace('.pt', '_openvino_model' + os.sep) | |||||
f = str(file).replace('.pt', f'_openvino_model{os.sep}') | |||||
cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" | cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" | ||||
subprocess.check_output(cmd, shell=True) | subprocess.check_output(cmd, shell=True) | ||||
cmd = 'edgetpu_compiler --version' | cmd = 'edgetpu_compiler --version' | ||||
help_url = 'https://coral.ai/docs/edgetpu/compiler/' | help_url = 'https://coral.ai/docs/edgetpu/compiler/' | ||||
assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' | assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' | ||||
if subprocess.run(cmd + ' >/dev/null', shell=True).returncode != 0: | |||||
if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: | |||||
LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') | LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') | ||||
sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system | sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system | ||||
for c in ( | for c in ( | ||||
LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') | LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') | ||||
f = str(file).replace('.pt', '_web_model') # js dir | f = str(file).replace('.pt', '_web_model') # js dir | ||||
f_pb = file.with_suffix('.pb') # *.pb path | f_pb = file.with_suffix('.pb') # *.pb path | ||||
f_json = f + '/model.json' # *.json path | |||||
f_json = f'{f}/model.json' # *.json path | |||||
cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ | cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ | ||||
f'--output_node_names="Identity,Identity_1,Identity_2,Identity_3" {f_pb} {f}' | f'--output_node_names="Identity,Identity_1,Identity_2,Identity_3" {f_pb} {f}' |
# Loggers | # Loggers | ||||
data_dict = None | data_dict = None | ||||
if RANK in [-1, 0]: | |||||
if RANK in {-1, 0}: | |||||
loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance | loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance | ||||
if loggers.wandb: | if loggers.wandb: | ||||
data_dict = loggers.wandb.data_dict | data_dict = loggers.wandb.data_dict | ||||
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) | scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) | ||||
# EMA | # EMA | ||||
ema = ModelEMA(model) if RANK in [-1, 0] else None | |||||
ema = ModelEMA(model) if RANK in {-1, 0} else None | |||||
# Resume | # Resume | ||||
start_epoch, best_fitness = 0, 0.0 | start_epoch, best_fitness = 0, 0.0 | ||||
assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' | assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' | ||||
# Process 0 | # Process 0 | ||||
if RANK in [-1, 0]: | |||||
if RANK in {-1, 0}: | |||||
val_loader = create_dataloader(val_path, | val_loader = create_dataloader(val_path, | ||||
imgsz, | imgsz, | ||||
batch_size // WORLD_SIZE * 2, | batch_size // WORLD_SIZE * 2, | ||||
train_loader.sampler.set_epoch(epoch) | train_loader.sampler.set_epoch(epoch) | ||||
pbar = enumerate(train_loader) | pbar = enumerate(train_loader) | ||||
LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) | LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) | ||||
if RANK in (-1, 0): | |||||
if RANK in {-1, 0}: | |||||
pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar | pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar | ||||
optimizer.zero_grad() | optimizer.zero_grad() | ||||
for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- | for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- | ||||
last_opt_step = ni | last_opt_step = ni | ||||
# Log | # Log | ||||
if RANK in (-1, 0): | |||||
if RANK in {-1, 0}: | |||||
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses | mloss = (mloss * i + loss_items) / (i + 1) # update mean losses | ||||
mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) | mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) | ||||
pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % | pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % | ||||
lr = [x['lr'] for x in optimizer.param_groups] # for loggers | lr = [x['lr'] for x in optimizer.param_groups] # for loggers | ||||
scheduler.step() | scheduler.step() | ||||
if RANK in (-1, 0): | |||||
if RANK in {-1, 0}: | |||||
# mAP | # mAP | ||||
callbacks.run('on_train_epoch_end', epoch=epoch) | callbacks.run('on_train_epoch_end', epoch=epoch) | ||||
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) | ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) | ||||
# end epoch ---------------------------------------------------------------------------------------------------- | # end epoch ---------------------------------------------------------------------------------------------------- | ||||
# end training ----------------------------------------------------------------------------------------------------- | # end training ----------------------------------------------------------------------------------------------------- | ||||
if RANK in (-1, 0): | |||||
if RANK in {-1, 0}: | |||||
LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') | LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') | ||||
for f in last, best: | for f in last, best: | ||||
if f.exists(): | if f.exists(): | ||||
def main(opt, callbacks=Callbacks()): | def main(opt, callbacks=Callbacks()): | ||||
# Checks | # Checks | ||||
if RANK in (-1, 0): | |||||
if RANK in {-1, 0}: | |||||
print_args(vars(opt)) | print_args(vars(opt)) | ||||
check_git_status() | check_git_status() | ||||
check_requirements(exclude=['thop']) | check_requirements(exclude=['thop']) |
s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ | s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ | ||||
f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ | f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ | ||||
f'past_thr={x[x > thr].mean():.3f}-mean: ' | f'past_thr={x[x > thr].mean():.3f}-mean: ' | ||||
for i, x in enumerate(k): | |||||
for x in k: | |||||
s += '%i,%i, ' % (round(x[0]), round(x[1])) | s += '%i,%i, ' % (round(x[0]), round(x[1])) | ||||
if verbose: | if verbose: | ||||
LOGGER.info(s[:-2]) | LOGGER.info(s[:-2]) |
s = img.size # (width, height) | s = img.size # (width, height) | ||||
try: | try: | ||||
rotation = dict(img._getexif().items())[orientation] | rotation = dict(img._getexif().items())[orientation] | ||||
if rotation == 6: # rotation 270 | |||||
s = (s[1], s[0]) | |||||
elif rotation == 8: # rotation 90 | |||||
if rotation in [6, 8]: # rotation 270 or 90 | |||||
s = (s[1], s[0]) | s = (s[1], s[0]) | ||||
except Exception: | except Exception: | ||||
pass | pass | ||||
return len(self.batch_sampler.sampler) | return len(self.batch_sampler.sampler) | ||||
def __iter__(self): | def __iter__(self): | ||||
for i in range(len(self)): | |||||
for _ in range(len(self)): | |||||
yield next(self.iterator) | yield next(self.iterator) | ||||
self.cap.release() | self.cap.release() | ||||
if self.count == self.nf: # last video | if self.count == self.nf: # last video | ||||
raise StopIteration | raise StopIteration | ||||
else: | |||||
path = self.files[self.count] | |||||
self.new_video(path) | |||||
ret_val, img0 = self.cap.read() | |||||
path = self.files[self.count] | |||||
self.new_video(path) | |||||
ret_val, img0 = self.cap.read() | |||||
self.frame += 1 | self.frame += 1 | ||||
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' | s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' | ||||
def img2label_paths(img_paths): | def img2label_paths(img_paths): | ||||
# Define label paths as a function of image paths | # Define label paths as a function of image paths | ||||
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings | |||||
sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings | |||||
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] | return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] | ||||
# Display cache | # Display cache | ||||
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total | nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total | ||||
if exists and LOCAL_RANK in (-1, 0): | |||||
if exists and LOCAL_RANK in {-1, 0}: | |||||
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" | d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" | ||||
tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results | tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results | ||||
if cache['msgs']: | if cache['msgs']: |
for h in logging.root.handlers: | for h in logging.root.handlers: | ||||
logging.root.removeHandler(h) # remove all handlers associated with the root logger object | logging.root.removeHandler(h) # remove all handlers associated with the root logger object | ||||
rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings | rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings | ||||
level = logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING | |||||
level = logging.INFO if verbose and rank in {-1, 0} else logging.WARNING | |||||
log = logging.getLogger(name) | log = logging.getLogger(name) | ||||
log.setLevel(level) | log.setLevel(level) | ||||
handler = logging.StreamHandler() | handler = logging.StreamHandler() |
import wandb | import wandb | ||||
assert hasattr(wandb, '__version__') # verify package import not local dir | assert hasattr(wandb, '__version__') # verify package import not local dir | ||||
if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]: | |||||
if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}: | |||||
try: | try: | ||||
wandb_login_success = wandb.login(timeout=30) | wandb_login_success = wandb.login(timeout=30) | ||||
except wandb.errors.UsageError: # known non-TTY terminal issue | except wandb.errors.UsageError: # known non-TTY terminal issue | ||||
if not self.opt.evolve: | if not self.opt.evolve: | ||||
wandb.log_artifact(str(best if best.exists() else last), | wandb.log_artifact(str(best if best.exists() else last), | ||||
type='model', | type='model', | ||||
name='run_' + self.wandb.wandb_run.id + '_model', | |||||
name=f'run_{self.wandb.wandb_run.id}_model', | |||||
aliases=['latest', 'best', 'stripped']) | aliases=['latest', 'best', 'stripped']) | ||||
self.wandb.finish_run() | self.wandb.finish_run() | ||||
i = pred_cls == c | i = pred_cls == c | ||||
n_l = nt[ci] # number of labels | n_l = nt[ci] # number of labels | ||||
n_p = i.sum() # number of predictions | n_p = i.sum() # number of predictions | ||||
if n_p == 0 or n_l == 0: | if n_p == 0 or n_l == 0: | ||||
continue | continue | ||||
else: | |||||
# Accumulate FPs and TPs | |||||
fpc = (1 - tp[i]).cumsum(0) | |||||
tpc = tp[i].cumsum(0) | |||||
# Recall | |||||
recall = tpc / (n_l + eps) # recall curve | |||||
r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases | |||||
# Accumulate FPs and TPs | |||||
fpc = (1 - tp[i]).cumsum(0) | |||||
tpc = tp[i].cumsum(0) | |||||
# Recall | |||||
recall = tpc / (n_l + eps) # recall curve | |||||
r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases | |||||
# Precision | |||||
precision = tpc / (tpc + fpc) # precision curve | |||||
p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score | |||||
# Precision | |||||
precision = tpc / (tpc + fpc) # precision curve | |||||
p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score | |||||
# AP from recall-precision curve | |||||
for j in range(tp.shape[1]): | |||||
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) | |||||
if plot and j == 0: | |||||
py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 | |||||
# AP from recall-precision curve | |||||
for j in range(tp.shape[1]): | |||||
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) | |||||
if plot and j == 0: | |||||
py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 | |||||
# Compute F1 (harmonic mean of precision and recall) | # Compute F1 (harmonic mean of precision and recall) | ||||
f1 = 2 * p * r / (p + r + eps) | f1 = 2 * p * r / (p + r + eps) | ||||
names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data | names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data | ||||
names = {i: v for i, v in enumerate(names)} # to dict | |||||
names = dict(enumerate(names)) # to dict | |||||
if plot: | if plot: | ||||
plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) | plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) | ||||
plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') | plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') | ||||
# Plots ---------------------------------------------------------------------------------------------------------------- | # Plots ---------------------------------------------------------------------------------------------------------------- | ||||
def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): | |||||
def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): | |||||
# Precision-recall curve | # Precision-recall curve | ||||
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) | fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) | ||||
py = np.stack(py, axis=1) | py = np.stack(py, axis=1) | ||||
ax.set_xlim(0, 1) | ax.set_xlim(0, 1) | ||||
ax.set_ylim(0, 1) | ax.set_ylim(0, 1) | ||||
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") | plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") | ||||
fig.savefig(Path(save_dir), dpi=250) | |||||
fig.savefig(save_dir, dpi=250) | |||||
plt.close() | plt.close() | ||||
def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): | |||||
def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'): | |||||
# Metric-confidence curve | # Metric-confidence curve | ||||
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) | fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) | ||||
ax.set_xlim(0, 1) | ax.set_xlim(0, 1) | ||||
ax.set_ylim(0, 1) | ax.set_ylim(0, 1) | ||||
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") | plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") | ||||
fig.savefig(Path(save_dir), dpi=250) | |||||
fig.savefig(save_dir, dpi=250) | |||||
plt.close() | plt.close() |
def select_device(device='', batch_size=0, newline=True): | def select_device(device='', batch_size=0, newline=True): | ||||
# device = 'cpu' or '0' or '0,1,2,3' | |||||
# device = None or 'cpu' or 0 or '0' or '0,1,2,3' | |||||
s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} ' | s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} ' | ||||
device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' | |||||
device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0' | |||||
cpu = device == 'cpu' | cpu = device == 'cpu' | ||||
if cpu: | if cpu: | ||||
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False | os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False | ||||
# profile(input, [m1, m2], n=100) # profile over 100 iterations | # profile(input, [m1, m2], n=100) # profile over 100 iterations | ||||
results = [] | results = [] | ||||
device = device or select_device() | |||||
if not isinstance(device, torch.device): | |||||
device = select_device(device) | |||||
print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" | print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" | ||||
f"{'input':>24s}{'output':>24s}") | f"{'input':>24s}{'output':>24s}") | ||||
tf += (t[1] - t[0]) * 1000 / n # ms per op forward | tf += (t[1] - t[0]) * 1000 / n # ms per op forward | ||||
tb += (t[2] - t[1]) * 1000 / n # ms per op backward | tb += (t[2] - t[1]) * 1000 / n # ms per op backward | ||||
mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) | mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) | ||||
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' | |||||
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' | |||||
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters | |||||
s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes | |||||
p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters | |||||
print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') | print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') | ||||
results.append([p, flops, mem, tf, tb, s_in, s_out]) | results.append([p, flops, mem, tf, tb, s_in, s_out]) | ||||
except Exception as e: | except Exception as e: | ||||
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs | flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs | ||||
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float | img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float | ||||
fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs | fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs | ||||
except (ImportError, Exception): | |||||
except Exception: | |||||
fs = '' | fs = '' | ||||
name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' | name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' | ||||
# Scales img(bs,3,y,x) by ratio constrained to gs-multiple | # Scales img(bs,3,y,x) by ratio constrained to gs-multiple | ||||
if ratio == 1.0: | if ratio == 1.0: | ||||
return img | return img | ||||
else: | |||||
h, w = img.shape[2:] | |||||
s = (int(h * ratio), int(w * ratio)) # new size | |||||
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize | |||||
if not same_shape: # pad/crop img | |||||
h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) | |||||
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean | |||||
h, w = img.shape[2:] | |||||
s = (int(h * ratio), int(w * ratio)) # new size | |||||
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize | |||||
if not same_shape: # pad/crop img | |||||
h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) | |||||
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean | |||||
def copy_attr(a, b, include=(), exclude=()): | def copy_attr(a, b, include=(), exclude=()): |