Browse Source

Update export.py with v3.0 Hardswish() support

5.0
Glenn Jocher 4 years ago
parent
commit
4d7f222f73
2 changed files with 15 additions and 10 deletions
  1. +12
    -7
      models/export.py
  2. +3
    -3
      utils/activations.py

+ 12
- 7
models/export.py View File



import torch import torch


from models.common import Conv
from models.experimental import attempt_load
from utils.activations import Hardswish
from utils.general import set_logging from utils.general import set_logging
from utils.google_utils import attempt_download


if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path')
parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size')
parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--batch-size', type=int, default=1, help='batch size')
opt = parser.parse_args() opt = parser.parse_args()
img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection


# Load PyTorch model # Load PyTorch model
attempt_download(opt.weights)
model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float()
model.eval()
model.fuse()
model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model


# Update model # Update model
for k, m in model.named_modules():
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability
if isinstance(m, Conv):
m.act = Hardswish() # assign activation
# if isinstance(m, Detect):
# m.forward = m.forward_export # assign forward (optional)
model.model[-1].export = True # set Detect() layer export=True model.model[-1].export = True # set Detect() layer export=True
y = model(img) # dry run y = model(img) # dry run


# Checks # Checks
onnx_model = onnx.load(f) # load onnx model onnx_model = onnx.load(f) # load onnx model
onnx.checker.check_model(onnx_model) # check onnx model onnx.checker.check_model(onnx_model) # check onnx model
print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
# print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
print('ONNX export success, saved as %s' % f) print('ONNX export success, saved as %s' % f)
except Exception as e: except Exception as e:
print('ONNX export failure: %s' % e) print('ONNX export failure: %s' % e)

+ 3
- 3
utils/activations.py View File

return x * torch.sigmoid(x) return x * torch.sigmoid(x)




class Hardswish(nn.Module): # alternative to nn.Hardswish() for export
class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
@staticmethod @staticmethod
def forward(x): def forward(x):
# return x * F.hardsigmoid(x)
return x * F.hardtanh(x + 3, 0., 6.) / 6.
# return x * F.hardsigmoid(x) # for torchscript and CoreML
return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX




class MemoryEfficientSwish(nn.Module): class MemoryEfficientSwish(nn.Module):

Loading…
Cancel
Save