|
|
@@ -25,7 +25,8 @@ from models.experimental import * |
|
|
|
from utils.autoanchor import check_anchor_order |
|
|
|
from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args |
|
|
|
from utils.plots import feature_visualization |
|
|
|
from utils.torch_utils import fuse_conv_and_bn, initialize_weights, model_info, scale_img, select_device, time_sync |
|
|
|
from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, |
|
|
|
time_sync) |
|
|
|
|
|
|
|
try: |
|
|
|
import thop # for FLOPs computation |
|
|
@@ -300,8 +301,10 @@ def parse_model(d, ch): # model_dict, input_channels(3) |
|
|
|
if __name__ == '__main__': |
|
|
|
parser = argparse.ArgumentParser() |
|
|
|
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') |
|
|
|
parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs') |
|
|
|
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') |
|
|
|
parser.add_argument('--profile', action='store_true', help='profile model speed') |
|
|
|
parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer') |
|
|
|
parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') |
|
|
|
opt = parser.parse_args() |
|
|
|
opt.cfg = check_yaml(opt.cfg) # check YAML |
|
|
@@ -309,24 +312,19 @@ if __name__ == '__main__': |
|
|
|
device = select_device(opt.device) |
|
|
|
|
|
|
|
# Create model |
|
|
|
im = torch.rand(opt.batch_size, 3, 640, 640).to(device) |
|
|
|
model = Model(opt.cfg).to(device) |
|
|
|
|
|
|
|
# Profile |
|
|
|
if opt.profile: |
|
|
|
model.eval().fuse() |
|
|
|
img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) |
|
|
|
y = model(img, profile=True) |
|
|
|
# Options |
|
|
|
if opt.line_profile: # profile layer by layer |
|
|
|
_ = model(im, profile=True) |
|
|
|
|
|
|
|
# Test all models |
|
|
|
if opt.test: |
|
|
|
elif opt.profile: # profile forward-backward |
|
|
|
results = profile(input=im, ops=[model], n=3) |
|
|
|
|
|
|
|
elif opt.test: # test all models |
|
|
|
for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): |
|
|
|
try: |
|
|
|
_ = Model(cfg) |
|
|
|
except Exception as e: |
|
|
|
print(f'Error in {cfg}: {e}') |
|
|
|
|
|
|
|
# Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) |
|
|
|
# from torch.utils.tensorboard import SummaryWriter |
|
|
|
# tb_writer = SummaryWriter('.') |
|
|
|
# LOGGER.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") |
|
|
|
# tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph |