Bläddra i källkod

model fusion and onnx export

5.0
Glenn Jocher 4 år sedan
förälder
incheckning
12b0c046d5
3 ändrade filer med 20 tillägg och 7 borttagningar
  1. +3
    -0
      models/common.py
  2. +8
    -7
      models/onnx_export.py
  3. +9
    -0
      models/yolo.py

+ 3
- 0
models/common.py Visa fil

@@ -20,6 +20,9 @@ class Conv(nn.Module): # standard convolution
def forward(self, x):
return self.act(self.bn(self.conv(x)))

def fuseforward(self, x):
return self.act(self.conv(x))


class Bottleneck(nn.Module):
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion

+ 8
- 7
models/onnx_export.py Visa fil

@@ -1,6 +1,6 @@
# Exports a pytorch *.pt model to *.onnx format. Example usage (run from ./yolov5 directory):
# $ export PYTHONPATH="$PWD"
# $ python models/onnx_export.py --weights ./weights/yolov5s.pt --img 640 --batch 1
# Exports a pytorch *.pt model to *.onnx format
# Example usage (run from ./yolov5 directory):
# $ export PYTHONPATH="$PWD" && python models/onnx_export.py --weights ./weights/yolov5s.pt --img 640 --batch 1

import argparse

@@ -10,10 +10,11 @@ from models.common import *

if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', default='./weights/yolov5s.pt', help='weights path')
parser.add_argument('--img-size', default=640, help='inference size (pixels)')
parser.add_argument('--batch-size', default=1, help='batch size')
parser.add_argument('--weights', type=str, default='./weights/yolov5s.pt', help='weights path')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
opt = parser.parse_args()
print(opt)

# Parameters
f = opt.weights.replace('.pt', '.onnx') # onnx filename
@@ -23,7 +24,7 @@ if __name__ == '__main__':
google_utils.attempt_download(opt.weights)
model = torch.load(opt.weights)['model']
model.eval()
# model.fuse() # optionally fuse Conv2d + BatchNorm2d layers TODO
# model.fuse()

# Export to onnx
model.model[-1].export = True # set Detect() layer export=True

+ 9
- 0
models/yolo.py Visa fil

@@ -123,6 +123,15 @@ class Model(nn.Module):
b = self.model[f].bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
print(('%g Conv2d.bias:' + '%10.3g' * 6) % (f, *b[:5].mean(1).tolist(), b[5:].mean()))

def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers...')
for m in self.model.modules():
if type(m) is Conv:
m.conv = torch_utils.fuse_conv_and_bn(m.conv, m.bn) # update conv
m.bn = None # remove batchnorm
m.forward = m.fuseforward # update forward
torch_utils.model_info(self)


def parse_model(md, ch): # model_dict, input_channels(3)
print('\n%3s%15s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))

Laddar…
Avbryt
Spara