Browse Source

onnx_export.py

5.0
Glenn Jocher 4 years ago
parent
commit
df7988d8d0
4 changed files with 37 additions and 19 deletions
  1. +1
    -1
      .github/workflows/greetings.yml
  2. +1
    -1
      README.md
  3. +3
    -17
      detect.py
  4. +32
    -0
      models/onnx_export.py

+ 1
- 1
.github/workflows/greetings.yml View File

@@ -10,7 +10,7 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
pr-message: 'Hello @${{ github.actor }}, thank you for submitting a PR! We will respond as soon as possible.'
issue-message: >
issue-message: |
Hello @${{ github.actor }}, thank you for your interest in our work! Please visit our [Custom Training Tutorial](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) to get started, and see our [Google Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/tutorial.ipynb), [Docker Image](https://hub.docker.com/r/ultralytics/yolov5), and [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) for example environments.
If this is a bug report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you.

+ 1
- 1
README.md View File

@@ -108,4 +108,4 @@ To access an up-to-date working environment (with all dependencies including CUD

## Contact

**Issues should be raised directly in the repository.** For business inquiries or professional support requests please visit us at https://www.ultralytics.com.
**Issues should be raised directly in the repository.** For business inquiries or professional support requests please visit https://www.ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com.

+ 3
- 17
detect.py View File

@@ -7,12 +7,12 @@ ONNX_EXPORT = False


def detect(save_img=False):
imgsz = (320, 192) if ONNX_EXPORT else opt.img_size # (320, 192) or (416, 256) or (608, 352) for (height, width)
out, source, weights, half, view_img, save_txt = opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt
out, source, weights, half, view_img, save_txt, imgsz = \
opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt, opt.img_size
webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')

# Initialize
device = torch_utils.select_device(device='cpu' if ONNX_EXPORT else opt.device)
device = torch_utils.select_device(opt.device)
if os.path.exists(out):
shutil.rmtree(out) # delete output folder
os.makedirs(out) # make new output folder
@@ -35,20 +35,6 @@ def detect(save_img=False):
# Fuse Conv2d + BatchNorm2d layers
# model.fuse()

# Export mode
if ONNX_EXPORT:
model.fuse()
img = torch.zeros((1, 3) + imgsz) # (1, 3, 320, 192)
f = opt.weights.replace(opt.weights.split('.')[-1], 'onnx') # *.onnx filename
torch.onnx.export(model, img, f, verbose=False, opset_version=11)

# Validate exported model
import onnx
model = onnx.load(f) # Load the ONNX model
onnx.checker.check_model(model) # Check that the IR is well formed
print(onnx.helper.printable_graph(model.graph)) # Print a human readable representation of the graph
return

# Half precision
half = half and device.type != 'cpu' # half precision only supported on CUDA
if half:

+ 32
- 0
models/onnx_export.py View File

@@ -0,0 +1,32 @@
import argparse

import onnx

from models.common import *

if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', default='../weights/yolov5s.pt', help='model path RELATIVE to ./models/')
parser.add_argument('--img-size', default=640, help='inference size (pixels)')
parser.add_argument('--batch-size', default=1, help='batch size')
opt = parser.parse_args()

# Parameters
f = opt.weights.replace('.pt', '.onnx') # onnx filename
img = torch.zeros((opt.batch_size, 3, opt.img_size, opt.img_size)) # image size, (1, 3, 320, 192) iDetection

# Load pytorch model
google_utils.attempt_download(opt.weights)
model = torch.load(opt.weights)['model']
model.eval()
# model.fuse() # optionally fuse Conv2d + BatchNorm2d layers TODO

# Export to onnx
model.model[-1].export = True # set Detect() layer export=True
torch.onnx.export(model, img, f, verbose=False, opset_version=11)

# Check onnx model
model = onnx.load(f) # load onnx model
onnx.checker.check_model(model) # check onnx model
print(onnx.helper.printable_graph(model.graph)) # print a human readable representation of the graph
print('Export complete. ONNX model saved to %s\nView with https://github.com/lutzroeder/netron' % f)

Loading…
Cancel
Save