Browse Source

Prefer `tflite_runtime` for TFLite inference if installed (#6406)

* import tflite_runtime if tensorflow not installed

* rename tflite to tfli

* Attempt tflite_runtime for all TFLite workflows

Also rename tfli to tfl

Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
modifyDataloader
Motoki Kimura GitHub 2 years ago
parent
commit
16563ac5b5
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 7 additions and 5 deletions
  1. +7
    -5
      models/common.py

+ 7
- 5
models/common.py View File

graph_def.ParseFromString(open(w, 'rb').read()) graph_def.ParseFromString(open(w, 'rb').read())
frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0")
elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
if 'edgetpu' in w.lower(): # Edge TPU
try:
import tflite_runtime.interpreter as tfl # prefer tflite_runtime if installed
except ImportError:
import tensorflow.lite as tfl
if 'edgetpu' in w.lower(): # Edge TPU https://coral.ai/software/#edgetpu-runtime
LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
import tflite_runtime.interpreter as tfli # install https://coral.ai/software/#edgetpu-runtime
delegate = {'Linux': 'libedgetpu.so.1', delegate = {'Linux': 'libedgetpu.so.1',
'Darwin': 'libedgetpu.1.dylib', 'Darwin': 'libedgetpu.1.dylib',
'Windows': 'edgetpu.dll'}[platform.system()] 'Windows': 'edgetpu.dll'}[platform.system()]
interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)])
interpreter = tfl.Interpreter(model_path=w, experimental_delegates=[tfl.load_delegate(delegate)])
else: # Lite else: # Lite
LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
import tensorflow as tf
interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model
interpreter = tfl.Interpreter(model_path=w) # load TFLite model
interpreter.allocate_tensors() # allocate interpreter.allocate_tensors() # allocate
input_details = interpreter.get_input_details() # inputs input_details = interpreter.get_input_details() # inputs
output_details = interpreter.get_output_details() # outputs output_details = interpreter.get_output_details() # outputs

Loading…
Cancel
Save