def testOptions(self): delegate_a = interpreter_wrapper.load_delegate(self._delegate_file) lib = delegate_a._library self.assertEqual(lib.get_num_delegates_created(), 1) self.assertEqual(lib.get_num_delegates_destroyed(), 0) self.assertEqual(lib.get_num_delegates_invoked(), 0) self.assertEqual(lib.get_options_counter(), 0) delegate_b = interpreter_wrapper.load_delegate(self._delegate_file, options={ 'unused': False, 'options_counter': 2 }) lib = delegate_b._library self.assertEqual(lib.get_num_delegates_created(), 2) self.assertEqual(lib.get_num_delegates_destroyed(), 0) self.assertEqual(lib.get_num_delegates_invoked(), 0) self.assertEqual(lib.get_options_counter(), 2) del delegate_a del delegate_b self.assertEqual(lib.get_num_delegates_created(), 2) self.assertEqual(lib.get_num_delegates_destroyed(), 2) self.assertEqual(lib.get_num_delegates_invoked(), 0) self.assertEqual(lib.get_options_counter(), 2)
def testOptions(self): # TODO(b/137299813): Enable when we fix for mac if sys.platform == 'darwin': return delegate_a = interpreter_wrapper.load_delegate(self._delegate_file) lib = delegate_a._library self.assertEqual(lib.get_num_delegates_created(), 1) self.assertEqual(lib.get_num_delegates_destroyed(), 0) self.assertEqual(lib.get_num_delegates_invoked(), 0) self.assertEqual(lib.get_options_counter(), 0) delegate_b = interpreter_wrapper.load_delegate(self._delegate_file, options={ 'unused': False, 'options_counter': 2 }) lib = delegate_b._library self.assertEqual(lib.get_num_delegates_created(), 2) self.assertEqual(lib.get_num_delegates_destroyed(), 0) self.assertEqual(lib.get_num_delegates_invoked(), 0) self.assertEqual(lib.get_options_counter(), 2) del delegate_a del delegate_b self.assertEqual(lib.get_num_delegates_created(), 2) self.assertEqual(lib.get_num_delegates_destroyed(), 2) self.assertEqual(lib.get_num_delegates_invoked(), 0) self.assertEqual(lib.get_options_counter(), 2)
def testFail(self): # TODO(b/137299813): Enable when we fix for mac if sys.platform == 'darwin': return with self.assertRaisesRegexp( ValueError, 'Failed to load delegate from .*\nFail argument sent.'): interpreter_wrapper.load_delegate(self._delegate_file, options={'fail': 'fail'})
def testFail(self): with self.assertRaisesRegex( # Due to exception chaining in PY3, we can't be more specific here and # check that the phrase 'Fail argument sent' is present. ValueError, 'Failed to load delegate from'): interpreter_wrapper.load_delegate(self._delegate_file, options={'fail': 'fail'})
def testDestructionOrder(self): """Make sure internal _interpreter object is destroyed before delegate.""" self.skipTest('TODO(b/142136355): fix flakiness and re-enable') # Track which order destructions were doned in # TODO(b/137299813): Enable when we fix for mac if sys.platform == 'darwin': return destructions = [] def register_destruction(x): destructions.append(x if isinstance(x, str) else x.decode('utf-8')) return 0 # Make a wrapper for the callback so we can send this to ctypes delegate = interpreter_wrapper.load_delegate(self._delegate_file) # Make an interpreter with the delegate interpreter = interpreter_wrapper.Interpreter( model_path=resource_loader.get_path_to_datafile( 'testdata/permute_float.tflite'), experimental_delegates=[delegate]) class InterpreterDestroyCallback(object): def __del__(self): register_destruction('interpreter') interpreter._interpreter.stuff = InterpreterDestroyCallback() # Destroy both delegate and interpreter library = delegate._library prototype = ctypes.CFUNCTYPE(ctypes.c_int, (ctypes.c_char_p)) library.set_destroy_callback(prototype(register_destruction)) del delegate del interpreter library.set_destroy_callback(None) # check the interpreter was destroyed before the delegate self.assertEqual(destructions, ['interpreter', 'test_delegate'])
def load(self, model_path, inputs=None, outputs=None): self.sess = interpreter_wrapper.Interpreter(model_path=model_path) #self.sess = interpreter_wrapper.Interpreter(model_path=model_path, experimental_delegates=[delegate]) if self.do_batches: input_details = self.sess.get_input_details() self.sess.resize_tensor_input(input_details[0]['index'], (self.batch_size, 224, 224, 3)) # We have to load the delegate after resizing the input tensor for batches if self.do_delegate: print('Loading delegate... ' + os.getenv("NCORE_DELEGATE")) delegate = interpreter_wrapper.load_delegate( os.getenv("NCORE_DELEGATE")) self.sess.add_delegates(experimental_delegates=[delegate]) self.sess.allocate_tensors() # keep input/output name to index mapping self.input2index = { i["name"]: i["index"] for i in self.sess.get_input_details() } self.output2index = { i["name"]: i["index"] for i in self.sess.get_output_details() } # keep input/output names self.inputs = list(self.input2index.keys()) self.outputs = list(self.output2index.keys()) return self
def testMultipleInterpreters(self): delegate = interpreter_wrapper.load_delegate(self._delegate_file) lib = delegate._library self.assertEqual(lib.get_num_delegates_created(), 1) self.assertEqual(lib.get_num_delegates_destroyed(), 0) self.assertEqual(lib.get_num_delegates_invoked(), 0) interpreter_a = interpreter_wrapper.Interpreter( model_path=self._model_file, experimental_delegates=[delegate]) self.assertEqual(lib.get_num_delegates_created(), 1) self.assertEqual(lib.get_num_delegates_destroyed(), 0) self.assertEqual(lib.get_num_delegates_invoked(), 1) interpreter_b = interpreter_wrapper.Interpreter( model_path=self._model_file, experimental_delegates=[delegate]) self.assertEqual(lib.get_num_delegates_created(), 1) self.assertEqual(lib.get_num_delegates_destroyed(), 0) self.assertEqual(lib.get_num_delegates_invoked(), 2) del delegate del interpreter_a self.assertEqual(lib.get_num_delegates_created(), 1) self.assertEqual(lib.get_num_delegates_destroyed(), 0) self.assertEqual(lib.get_num_delegates_invoked(), 2) del interpreter_b self.assertEqual(lib.get_num_delegates_created(), 1) self.assertEqual(lib.get_num_delegates_destroyed(), 1) self.assertEqual(lib.get_num_delegates_invoked(), 2)
def testDestructionOrder(self): """Make sure internal _interpreter object is destroyed before delegate.""" # Track which order destructions were doned in destructions = [] def register_destruction(x): destructions.append(x) return 0 # Make a wrapper for the callback so we can send this to ctypes delegate = interpreter_wrapper.load_delegate(self._delegate_file) prototype = ctypes.CFUNCTYPE(ctypes.c_int, (ctypes.c_char_p)) destroy_callback = prototype(register_destruction) delegate._library.set_destroy_callback(destroy_callback) # Make an interpreter with the delegate interpreter = interpreter_wrapper.Interpreter( model_path=resource_loader.get_path_to_datafile( 'testdata/permute_float.tflite'), experimental_delegates=[delegate]) class InterpreterDestroyCallback(object): def __del__(self): register_destruction('interpreter') interpreter._interpreter.stuff = InterpreterDestroyCallback() # Destroy both delegate and interpreter del delegate del interpreter # check the interpreter was destroyed before the delegate self.assertEqual(destructions, ['interpreter', 'test_delegate'])
def _TestInterpreter(self, model_path, options=None): """Test wrapper function that creates an interpreter with the delegate.""" # TODO(b/137299813): Enable when we fix for mac if sys.platform == 'darwin': return delegate = interpreter_wrapper.load_delegate(self._delegate_file, options) return interpreter_wrapper.Interpreter( model_path=model_path, experimental_delegates=[delegate])
def model_eval(model_name, test_set): interpreter_quant = tf.lite.Interpreter( model_name, experimental_delegates=[load_delegate('libedgetpu.so.1.0')]) interpreter_quant.allocate_tensors() input_index = interpreter_quant.get_input_details()[0]["index"] output_index = interpreter_quant.get_output_details()[0]["index"] total_seen = 0 print(model_name) model_prediction = [] for img, label in test_set: print("image", total_seen) total_seen += 1 interpreter_quant.set_tensor(input_index, img) interpreter_quant.invoke() predictions = interpreter_quant.get_tensor(output_index) model_prediction.append(predictions) global model_pool_prediction model_pool_prediction[model_name] = model_prediction
def load(self, model_path, inputs=None, outputs=None): print('Loading delegate... ' + os.getenv("NCORE_DELEGATE")) # /n/scr_ncore/parvizp/Git/mlperf_inference/v0.5/classification_and_detection/ncore_py_delegate.so delegate = interpreter_wrapper.load_delegate( os.getenv("NCORE_DELEGATE")) self.sess = interpreter_wrapper.Interpreter( model_path=model_path, experimental_delegates=[delegate]) self.sess.allocate_tensors() # keep input/output name to index mapping self.input2index = { i["name"]: i["index"] for i in self.sess.get_input_details() } self.output2index = { i["name"]: i["index"] for i in self.sess.get_output_details() } # keep input/output names self.inputs = list(self.input2index.keys()) self.outputs = list(self.output2index.keys()) return self
def testMultipleInterpreters(self): # TODO(b/137299813): Enable when we fix for mac if sys.platform == 'darwin': return delegate = interpreter_wrapper.load_delegate(self._delegate_file) lib = delegate._library self.assertEqual(lib.get_num_delegates_created(), 1) self.assertEqual(lib.get_num_delegates_destroyed(), 0) self.assertEqual(lib.get_num_delegates_invoked(), 0) interpreter_a = interpreter_wrapper.Interpreter( model_path=self._model_file, experimental_delegates=[delegate]) self.assertEqual(lib.get_num_delegates_created(), 1) self.assertEqual(lib.get_num_delegates_destroyed(), 0) self.assertEqual(lib.get_num_delegates_invoked(), 1) interpreter_b = interpreter_wrapper.Interpreter( model_path=self._model_file, experimental_delegates=[delegate]) self.assertEqual(lib.get_num_delegates_created(), 1) self.assertEqual(lib.get_num_delegates_destroyed(), 0) self.assertEqual(lib.get_num_delegates_invoked(), 2) del delegate del interpreter_a self.assertEqual(lib.get_num_delegates_created(), 1) self.assertEqual(lib.get_num_delegates_destroyed(), 0) self.assertEqual(lib.get_num_delegates_invoked(), 2) del interpreter_b self.assertEqual(lib.get_num_delegates_created(), 1) self.assertEqual(lib.get_num_delegates_destroyed(), 1) self.assertEqual(lib.get_num_delegates_invoked(), 2)
def _TestInterpreter(self, model_path, options=None): """Test wrapper function that creates an interpreter with the delegate.""" delegate = interpreter_wrapper.load_delegate(self._delegate_file, options) return interpreter_wrapper.Interpreter( model_path=model_path, experimental_delegates=[delegate])
def testFail(self): with self.assertRaisesRegexp(ValueError, 'Failed to load delegate from .*'): interpreter_wrapper.load_delegate( self._delegate_file, options={'fail': 'fail'})
from tensorflow.lite.python.interpreter import Interpreter, load_delegate # curl https://dl.google.com/coral/canned_models/mobilenet_v1_1.0_224_quant_edgetpu.tflite --output mobilenet.tflite # wget https://github.com/chengyehwang/termux_bootstrap/raw/main/libtensorflowlite_gpu_delegate.so model = 'mobilenet.tflite' interpreter = Interpreter(model) interpreter = Interpreter( model, experimental_delegates=[ load_delegate('./libtensorflowlite_gpu_delegate.so') ])
# there is a runtime tf-lite interpreter and a python3 tf-lite interpreter, we wil include both in case # the runtime-tflite is (to my knowledge) faster # depending on if we are using the TPU or not, we will need to issue different commands to start # the interpreter tf_pkg = importlib.util.find_spec('tflite_runtime') if use_TPU: if tf_pkg is None: from tensorflow.lite.python.interpreter import Interpreter from tensorflow.lite.python.interpreter import load_delegate else: from tflite_runtime.interpreter import Interpreter from tflite_runtime.interpreter import load_delegate interpreter = Interpreter( model_path=tf_model.graph, experimental_delegates=[load_delegate('libedgetpu.so.1.0')]) else: if tf_pkg is None: from tensorflow.lite.python.interpreter import Interpreter else: from tflite_runtime.interpreter import Interpreter interpreter = Interpreter(model_path=tf_model.graph) interpreter.allocate_tensors() # Get model details input_tensor_details = interpreter.get_input_details() output_tensor_details = interpreter.get_output_details() height = input_tensor_details[0]['shape'][1] width = input_tensor_details[0]['shape'][2]