|
|
|
|
|
|
|
|
LOGGER.info(f'{prefix} export failure: {e}') |
|
|
LOGGER.info(f'{prefix} export failure: {e}') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')): |
|
|
|
|
|
|
|
|
def export_openvino(model, im, file, half, prefix=colorstr('OpenVINO:')): |
|
|
# YOLOv5 OpenVINO export |
|
|
# YOLOv5 OpenVINO export |
|
|
try: |
|
|
try: |
|
|
check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ |
|
|
check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ |
|
|
|
|
|
|
|
|
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') |
|
|
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') |
|
|
f = str(file).replace('.pt', '_openvino_model' + os.sep) |
|
|
f = str(file).replace('.pt', '_openvino_model' + os.sep) |
|
|
|
|
|
|
|
|
cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}" |
|
|
|
|
|
|
|
|
cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" |
|
|
subprocess.check_output(cmd, shell=True) |
|
|
subprocess.check_output(cmd, shell=True) |
|
|
|
|
|
|
|
|
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') |
|
|
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') |
|
|
|
|
|
|
|
|
# Load PyTorch model |
|
|
# Load PyTorch model |
|
|
device = select_device(device) |
|
|
device = select_device(device) |
|
|
if half: |
|
|
if half: |
|
|
assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' |
|
|
|
|
|
|
|
|
assert device.type != 'cpu' or coreml or xml, '--half only compatible with GPU export, i.e. use --device 0' |
|
|
model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model |
|
|
model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model |
|
|
nc, names = model.nc, model.names # number of classes, class names |
|
|
nc, names = model.nc, model.names # number of classes, class names |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection |
|
|
im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection |
|
|
|
|
|
|
|
|
# Update model |
|
|
# Update model |
|
|
if half and not coreml: |
|
|
|
|
|
|
|
|
if half and not (coreml or xml): |
|
|
im, model = im.half(), model.half() # to FP16 |
|
|
im, model = im.half(), model.half() # to FP16 |
|
|
model.train() if train else model.eval() # training mode = no Detect() layer grid construction |
|
|
model.train() if train else model.eval() # training mode = no Detect() layer grid construction |
|
|
for k, m in model.named_modules(): |
|
|
for k, m in model.named_modules(): |
|
|
|
|
|
|
|
|
if onnx or xml: # OpenVINO requires ONNX |
|
|
if onnx or xml: # OpenVINO requires ONNX |
|
|
f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify) |
|
|
f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify) |
|
|
if xml: # OpenVINO |
|
|
if xml: # OpenVINO |
|
|
f[3] = export_openvino(model, im, file) |
|
|
|
|
|
|
|
|
f[3] = export_openvino(model, im, file, half) |
|
|
if coreml: |
|
|
if coreml: |
|
|
_, f[4] = export_coreml(model, im, file, int8, half) |
|
|
_, f[4] = export_coreml(model, im, file, int8, half) |
|
|
|
|
|
|