Prefer `tflite_runtime` for TFLite inference if installed (#6406)
* import tflite_runtime if tensorflow not installed * rename tflite to tfli * Attempt tflite_runtime for all TFLite workflows Also rename tfli to tfl Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
ed9bac8392
commit
16563ac5b5
|
|
@ -374,17 +374,19 @@ class DetectMultiBackend(nn.Module):
|
||||||
graph_def.ParseFromString(open(w, 'rb').read())
|
graph_def.ParseFromString(open(w, 'rb').read())
|
||||||
frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0")
|
frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0")
|
||||||
elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
|
elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
|
||||||
if 'edgetpu' in w.lower(): # Edge TPU
|
try:
|
||||||
|
import tflite_runtime.interpreter as tfl # prefer tflite_runtime if installed
|
||||||
|
except ImportError:
|
||||||
|
import tensorflow.lite as tfl
|
||||||
|
if 'edgetpu' in w.lower(): # Edge TPU https://coral.ai/software/#edgetpu-runtime
|
||||||
LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
|
LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
|
||||||
import tflite_runtime.interpreter as tfli # install https://coral.ai/software/#edgetpu-runtime
|
|
||||||
delegate = {'Linux': 'libedgetpu.so.1',
|
delegate = {'Linux': 'libedgetpu.so.1',
|
||||||
'Darwin': 'libedgetpu.1.dylib',
|
'Darwin': 'libedgetpu.1.dylib',
|
||||||
'Windows': 'edgetpu.dll'}[platform.system()]
|
'Windows': 'edgetpu.dll'}[platform.system()]
|
||||||
interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)])
|
interpreter = tfl.Interpreter(model_path=w, experimental_delegates=[tfl.load_delegate(delegate)])
|
||||||
else: # Lite
|
else: # Lite
|
||||||
LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
|
LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
|
||||||
import tensorflow as tf
|
interpreter = tfl.Interpreter(model_path=w) # load TFLite model
|
||||||
interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model
|
|
||||||
interpreter.allocate_tensors() # allocate
|
interpreter.allocate_tensors() # allocate
|
||||||
input_details = interpreter.get_input_details() # inputs
|
input_details = interpreter.get_input_details() # inputs
|
||||||
output_details = interpreter.get_output_details() # outputs
|
output_details = interpreter.get_output_details() # outputs
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue