コード例 #1
0
    def testMultipleInterpreters(self):
        delegate = interpreter_wrapper.load_delegate(self._delegate_file)
        lib = delegate._library

        self.assertEqual(lib.get_num_delegates_created(), 1)
        self.assertEqual(lib.get_num_delegates_destroyed(), 0)
        self.assertEqual(lib.get_num_delegates_invoked(), 0)

        interpreter_a = interpreter_wrapper.Interpreter(
            model_path=self._model_file, experimental_delegates=[delegate])

        self.assertEqual(lib.get_num_delegates_created(), 1)
        self.assertEqual(lib.get_num_delegates_destroyed(), 0)
        self.assertEqual(lib.get_num_delegates_invoked(), 1)

        interpreter_b = interpreter_wrapper.Interpreter(
            model_path=self._model_file, experimental_delegates=[delegate])

        self.assertEqual(lib.get_num_delegates_created(), 1)
        self.assertEqual(lib.get_num_delegates_destroyed(), 0)
        self.assertEqual(lib.get_num_delegates_invoked(), 2)

        del delegate
        del interpreter_a

        self.assertEqual(lib.get_num_delegates_created(), 1)
        self.assertEqual(lib.get_num_delegates_destroyed(), 0)
        self.assertEqual(lib.get_num_delegates_invoked(), 2)

        del interpreter_b

        self.assertEqual(lib.get_num_delegates_created(), 1)
        self.assertEqual(lib.get_num_delegates_destroyed(), 1)
        self.assertEqual(lib.get_num_delegates_invoked(), 2)
コード例 #2
0
ファイル: debugger.py プロジェクト: leizton/tensorflow
    def _init_from_converter(self,
                             options: QuantizationDebugOptions,
                             converter: TFLiteConverter,
                             calibrated_model: Optional[bytes] = None,
                             float_model: Optional[bytes] = None) -> None:
        """Convert the model and apply options.

    Converts the quantized model and initializes a quantized model interpreter
    with the quantized model. Returns a float model interpreter if float model
    is provided.

    Args:
      options: a QuantizationDebugOptions object.
      converter: an initialized tf.lite.TFLiteConverter.
      calibrated_model: Calibrated model bytes.
      float_model: Float model bytes.
    """
        self.quant_model = convert.mlir_quantize(
            calibrated_model,
            disable_per_channel=converter._experimental_disable_per_channel,  # pylint: disable=protected-access
            fully_quantize=options.fully_quantize,
            enable_numeric_verify=True,
            denylisted_ops=options.denylisted_ops,
            denylisted_nodes=options.denylisted_nodes)
        self._quant_interpreter = _interpreter.Interpreter(
            model_content=self.quant_model)
        self._float_interpreter = None
        if float_model is not None:
            self._float_interpreter = _interpreter.Interpreter(
                model_content=float_model)
コード例 #3
0
def load_eval_model(model_path):

    # support of tflite model
    if model_path.endswith('.tflite'):
        from tensorflow.lite.python import interpreter as interpreter_wrapper
        model = interpreter_wrapper.Interpreter(model_path=model_path)
        model.allocate_tensors()
        model_format = 'TFLITE'

    # support of MNN model
    elif model_path.endswith('.mnn'):
        model = MNN.Interpreter(model_path)
        model_format = 'MNN'

    # support of TF 1.x frozen pb model
    elif model_path.endswith('.pb'):
        model = load_graph(model_path)
        model_format = 'PB'

    # normal keras h5 model
    elif model_path.endswith('.h5'):
        model = load_model(model_path, compile=False)
        model_format = 'H5'
        K.set_learning_phase(0)
    else:
        raise ValueError('invalid model file')

    return model, model_format
コード例 #4
0
ファイル: test_tflite.py プロジェクト: MegEngine/mgeconvert
def _test_convert_result(inputs,
                         fpath,
                         mge_result,
                         max_err,
                         nhwc=True,
                         nhwc2=True,
                         disable_nhwc=False):
    if nhwc and inputs.ndim == 4:
        inputs = inputs.transpose((0, 2, 3, 1))

    mge_to_tflite(
        fpath + ".mge",
        output=tmp_file + ".tflite",
        mtk=False,
        disable_nhwc=disable_nhwc,
    )

    tfl_model = interpreter.Interpreter(model_path=tmp_file + ".tflite")
    tfl_model.allocate_tensors()

    input_details = tfl_model.get_input_details()
    tfl_model.set_tensor(input_details[0]["index"], inputs)
    tfl_model.invoke()
    pred_tfl = tfl_model.tensor(tfl_model.get_output_details()[0]["index"])()
    if nhwc2 and pred_tfl.ndim == 4:
        pred_tfl = pred_tfl.transpose((0, 3, 1, 2))
    assert pred_tfl.shape == mge_result.shape
    assert pred_tfl.dtype == mge_result.dtype
    np.testing.assert_allclose(pred_tfl, mge_result, atol=max_err)
    print("success!")
コード例 #5
0
def load_eval_model(model_path):
    # support of tflite model
    if model_path.endswith('.tflite'):
        from tensorflow.lite.python import interpreter as interpreter_wrapper
        model = interpreter_wrapper.Interpreter(model_path=model_path)
        model.allocate_tensors()
        model_format = 'TFLITE'

    # support of MNN model
    elif model_path.endswith('.mnn'):
        model = MNN.Interpreter(model_path)
        model_format = 'MNN'

    # support of TF 1.x frozen pb model
    elif model_path.endswith('.pb'):
        model = load_graph(model_path)
        model_format = 'PB'

    # support of ONNX model
    elif model_path.endswith('.onnx'):
        model = onnxruntime.InferenceSession(model_path)
        model_format = 'ONNX'

    # normal keras h5 model
    elif model_path.endswith('.h5'):
        custom_object_dict = get_custom_objects()

        model = load_model(model_path, compile=False, custom_objects=custom_object_dict)
        model_format = 'H5'
        K.set_learning_phase(0)
    else:
        raise ValueError('invalid model file')

    return model, model_format
コード例 #6
0
def evaluate_tflite_model(tflite_model, input_ndarrays):
    """Evaluates the provided tf.lite model with the given input ndarrays.

  Args:
    tflite_model: bytes. The serialized tf.lite model.
    input_ndarrays: A list of NumPy arrays to feed as input to the model.

  Returns:
    A list ndarrays produced by the model.

  Raises:
    ValueError: If the number of input arrays does not match the number of
      inputs the model expects.
  """
    the_interpreter = interpreter.Interpreter(model_content=tflite_model)
    the_interpreter.allocate_tensors()

    input_details = the_interpreter.get_input_details()
    output_details = the_interpreter.get_output_details()

    if len(input_details) != len(input_ndarrays):
        raise ValueError('Wrong number of inputs: provided=%s, '
                         'input_details=%s output_details=%s' %
                         (input_ndarrays, input_details, output_details))
    for input_tensor, data in zip(input_details, input_ndarrays):
        the_interpreter.set_tensor(input_tensor['index'], data)
    the_interpreter.invoke()
    return [
        the_interpreter.get_tensor(details['index'])
        for details in output_details
    ]
コード例 #7
0
 def testInvokeBeforeReady(self):
     interpreter = interpreter_wrapper.Interpreter(
         model_path=resource_loader.get_path_to_datafile(
             'testdata/permute_float.tflite'))
     with self.assertRaisesRegexp(
             RuntimeError, 'Invoke called on model that is not ready'):
         interpreter.invoke()
コード例 #8
0
    def testDestructionOrder(self):
        """Make sure internal _interpreter object is destroyed before delegate."""
        self.skipTest('TODO(b/142136355): fix flakiness and re-enable')
        # Track which order destructions were doned in
        # TODO(b/137299813): Enable when we fix for mac
        if sys.platform == 'darwin': return
        destructions = []

        def register_destruction(x):
            destructions.append(x if isinstance(x, str) else x.decode('utf-8'))
            return 0

        # Make a wrapper for the callback so we can send this to ctypes
        delegate = interpreter_wrapper.load_delegate(self._delegate_file)
        # Make an interpreter with the delegate
        interpreter = interpreter_wrapper.Interpreter(
            model_path=resource_loader.get_path_to_datafile(
                'testdata/permute_float.tflite'),
            experimental_delegates=[delegate])

        class InterpreterDestroyCallback(object):
            def __del__(self):
                register_destruction('interpreter')

        interpreter._interpreter.stuff = InterpreterDestroyCallback()
        # Destroy both delegate and interpreter
        library = delegate._library
        prototype = ctypes.CFUNCTYPE(ctypes.c_int, (ctypes.c_char_p))
        library.set_destroy_callback(prototype(register_destruction))
        del delegate
        del interpreter
        library.set_destroy_callback(None)
        # check the interpreter was destroyed before the delegate
        self.assertEqual(destructions, ['interpreter', 'test_delegate'])
def extractor_predict(saved_model, image_file):
    interpreter = interpreter_wrapper.Interpreter(model_path=saved_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    # NxHxWxC, H:1, W:2
    height = input_details[0]['shape'][1]
    width = input_details[0]['shape'][2]

    img = image.load_img(image_file, target_size=(height, width))
    img = image.img_to_array(img)

    # check the type of the input tensor
    if input_details[0]['dtype'] == np.float32:
        #img = preprocess_input(img)
        #img = img / 255.
        img = img / 127.5 - 1
    elif input_details[0]['dtype'] == np.uint8:
        img = img.astype(np.uint8)

    input_data = np.expand_dims(img, axis=0)

    # Predict!
    interpreter.set_tensor(input_details[0]['index'], input_data)
    interpreter.invoke()

    output_data = interpreter.get_tensor(output_details[0]['index'])
    return output_data[0]
コード例 #10
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--model', help='File path of .tflite file.', required=True)
  parser.add_argument(
      '--labels', help='File path of labels file.', required=True)
  args = parser.parse_args()

  cv2.setUseOptimized(True)
  client = mqtt.Client()
  client.connect("127.0.0.1", 1883, 600)

  labels = load_labels(args.labels)
  interpreter = ip.Interpreter(args.model)
  interpreter.allocate_tensors()
  _, height, width, _ = interpreter.get_input_details()[0]['shape']

  cap = cv2.VideoCapture("rtsp://192.168.1.164:554/user=admin&password=&channel=1&stream=1.sdp?")
  while True:
    ret, image_np = cap.read()
    if ret == False:
    	break;
    image = Image.fromarray(image_np.astype('uint8')).convert('RGB').resize((width, height), Image.ANTIALIAS)

    results = classify_image(interpreter, image)
    label_id, prob = results[0]
    output = {}
    prob = "scores %.f%%" % (prob * 100)
    output[labels[label_id]] = prob
    obj_ret = json.dumps(output)
    print("output ", output)
    client.publish('object_detection', obj_ret, qos=0)

  cap.release()
コード例 #11
0
    def testFloat(self):
        interpreter = interpreter_wrapper.Interpreter(
            model_path=resource_loader.get_path_to_datafile(
                'testdata/permute_float.tflite'))
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('input', input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 4] == input_details[0]['shape']).all())
        self.assertEqual((0.0, 0), input_details[0]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('output', output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 4] == output_details[0]['shape']).all())
        self.assertEqual((0.0, 0), output_details[0]['quantization'])

        test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)
        expected_output = np.array([[4.0, 3.0, 2.0, 1.0]], dtype=np.float32)
        interpreter.set_tensor(input_details[0]['index'], test_input)
        interpreter.invoke()

        output_data = interpreter.get_tensor(output_details[0]['index'])
        self.assertTrue((expected_output == output_data).all())
    def load(self, model_path, inputs=None, outputs=None):
        self.sess = interpreter_wrapper.Interpreter(model_path=model_path)
        #self.sess = interpreter_wrapper.Interpreter(model_path=model_path, experimental_delegates=[delegate])

        if self.do_batches:
            input_details = self.sess.get_input_details()
            self.sess.resize_tensor_input(input_details[0]['index'],
                                          (self.batch_size, 224, 224, 3))

        # We have to load the delegate after resizing the input tensor for batches
        if self.do_delegate:
            print('Loading delegate... ' + os.getenv("NCORE_DELEGATE"))
            delegate = interpreter_wrapper.load_delegate(
                os.getenv("NCORE_DELEGATE"))
            self.sess.add_delegates(experimental_delegates=[delegate])

        self.sess.allocate_tensors()
        # keep input/output name to index mapping
        self.input2index = {
            i["name"]: i["index"]
            for i in self.sess.get_input_details()
        }
        self.output2index = {
            i["name"]: i["index"]
            for i in self.sess.get_output_details()
        }
        # keep input/output names
        self.inputs = list(self.input2index.keys())
        self.outputs = list(self.output2index.keys())
        return self
コード例 #13
0
    def testUint8(self):
        model_path = resource_loader.get_path_to_datafile(
            'testdata/permute_uint8.tflite')
        with io.open(model_path, 'rb') as model_file:
            data = model_file.read()

        interpreter = interpreter_wrapper.Interpreter(model_content=data)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('input', input_details[0]['name'])
        self.assertEqual(np.uint8, input_details[0]['dtype'])
        self.assertTrue(([1, 4] == input_details[0]['shape']).all())
        self.assertEqual((1.0, 0), input_details[0]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('output', output_details[0]['name'])
        self.assertEqual(np.uint8, output_details[0]['dtype'])
        self.assertTrue(([1, 4] == output_details[0]['shape']).all())
        self.assertEqual((1.0, 0), output_details[0]['quantization'])

        test_input = np.array([[1, 2, 3, 4]], dtype=np.uint8)
        expected_output = np.array([[4, 3, 2, 1]], dtype=np.uint8)
        interpreter.resize_tensor_input(input_details[0]['index'],
                                        test_input.shape)
        interpreter.allocate_tensors()
        interpreter.set_tensor(input_details[0]['index'], test_input)
        interpreter.invoke()

        output_data = interpreter.get_tensor(output_details[0]['index'])
        self.assertTrue((expected_output == output_data).all())
コード例 #14
0
  def testDestructionOrder(self):
    """Make sure internal _interpreter object is destroyed before delegate."""
    # Track which order destructions were doned in
    destructions = []
    def register_destruction(x):
      destructions.append(x)
      return 0
    # Make a wrapper for the callback so we can send this to ctypes
    delegate = interpreter_wrapper.load_delegate(self._delegate_file)
    prototype = ctypes.CFUNCTYPE(ctypes.c_int, (ctypes.c_char_p))
    destroy_callback = prototype(register_destruction)
    delegate._library.set_destroy_callback(destroy_callback)
    # Make an interpreter with the delegate
    interpreter = interpreter_wrapper.Interpreter(
        model_path=resource_loader.get_path_to_datafile(
            'testdata/permute_float.tflite'), experimental_delegates=[delegate])

    class InterpreterDestroyCallback(object):

      def __del__(self):
        register_destruction('interpreter')

    interpreter._interpreter.stuff = InterpreterDestroyCallback()
    # Destroy both delegate and interpreter
    del delegate
    del interpreter
    # check the interpreter was destroyed before the delegate
    self.assertEqual(destructions, ['interpreter', 'test_delegate'])
コード例 #15
0
 def setUp(self):
     self.interpreter = interpreter_wrapper.Interpreter(
         model_path=resource_loader.get_path_to_datafile(
             'testdata/permute_float.tflite'))
     self.interpreter.allocate_tensors()
     self.input0 = self.interpreter.get_input_details()[0]['index']
     self.initial_data = np.array([[-1., -2., -3., -4.]], np.float32)
コード例 #16
0
ファイル: interpreter_test.py プロジェクト: MFChunga/poo
 def testDenseTensorAccess(self):
   interpreter = interpreter_wrapper.Interpreter(
       model_path=resource_loader.get_path_to_datafile('testdata/pc_conv.bin'))
   interpreter.allocate_tensors()
   weight_details = interpreter.get_tensor_details()[1]
   s_params = weight_details['sparsity_parameters']
   self.assertEqual(s_params, {})
コード例 #17
0
    def testString(self):
        interpreter = interpreter_wrapper.Interpreter(
            model_path=resource_loader.get_path_to_datafile(
                'testdata/gather_string.tflite'))
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(2, len(input_details))
        self.assertEqual('input', input_details[0]['name'])
        self.assertEqual(np.string_, input_details[0]['dtype'])
        self.assertTrue(([10] == input_details[0]['shape']).all())
        self.assertEqual((0.0, 0), input_details[0]['quantization'])
        self.assertEqual('indices', input_details[1]['name'])
        self.assertEqual(np.int64, input_details[1]['dtype'])
        self.assertTrue(([3] == input_details[1]['shape']).all())
        self.assertEqual((0.0, 0), input_details[1]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('output', output_details[0]['name'])
        self.assertEqual(np.string_, output_details[0]['dtype'])
        self.assertTrue(([3] == output_details[0]['shape']).all())
        self.assertEqual((0.0, 0), output_details[0]['quantization'])

        test_input = np.array([1, 2, 3], dtype=np.int64)
        interpreter.set_tensor(input_details[1]['index'], test_input)

        test_input = np.array(
            ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'])
        expected_output = np.array([b'b', b'c', b'd'])
        interpreter.set_tensor(input_details[0]['index'], test_input)
        interpreter.invoke()

        output_data = interpreter.get_tensor(output_details[0]['index'])
        self.assertTrue((expected_output == output_data).all())
コード例 #18
0
 def testThreads_WrongType(self):
     with self.assertRaisesRegex(ValueError,
                                 'type of num_threads should be int'):
         interpreter_wrapper.Interpreter(
             model_path=resource_loader.get_path_to_datafile(
                 'testdata/permute_float.tflite'),
             num_threads=4.2)
コード例 #19
0
 def testNotSupportedOpResolverTypes(self):
     with self.assertRaisesRegex(
             ValueError, 'Unrecognized passed in op resolver type: test'):
         interpreter_wrapper.Interpreter(
             model_path=resource_loader.get_path_to_datafile(
                 'testdata/permute_float.tflite'),
             experimental_op_resolver_type='test')
コード例 #20
0
 def testInvalidIndex(self):
   interpreter = interpreter_wrapper.Interpreter(
       model_path=resource_loader.get_path_to_datafile(
           'testdata/permute_float.tflite'))
   interpreter.allocate_tensors()
   # Invalid tensor index passed.
   with self.assertRaisesRegexp(ValueError, 'Tensor with no shape found.'):
     interpreter._get_tensor_details(4)
コード例 #21
0
 def _TestInterpreter(self, model_path, options=None):
     """Test wrapper function that creates an interpreter with the delegate."""
     # TODO(b/137299813): Enable when we fix for mac
     if sys.platform == 'darwin': return
     delegate = interpreter_wrapper.load_delegate(self._delegate_file,
                                                  options)
     return interpreter_wrapper.Interpreter(
         model_path=model_path, experimental_delegates=[delegate])
def validate_yolo_model_tflite(model_path, image_file, anchors, class_names, loop_count):
    interpreter = interpreter_wrapper.Interpreter(model_path=model_path)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    #print(input_details)
    #print(output_details)

    # check the type of the input tensor
    if input_details[0]['dtype'] == np.float32:
        floating_model = True

    img = Image.open(image_file)
    image = np.array(img, dtype='uint8')

    height = input_details[0]['shape'][1]
    width = input_details[0]['shape'][2]

    image_data = preprocess_image(img, (height, width))
    image_shape = img.size

    # predict once first to bypass the model building time
    interpreter.set_tensor(input_details[0]['index'], image_data)
    interpreter.invoke()

    start = time.time()
    for i in range(loop_count):
        interpreter.set_tensor(input_details[0]['index'], image_data)
        interpreter.invoke()
    end = time.time()
    print("Average Inference time: {:.8f}ms".format((end - start) * 1000 /loop_count))

    out_list = []
    for output_detail in output_details:
        output_data = interpreter.get_tensor(output_detail['index'])
        out_list.append(output_data)

    start = time.time()
    predictions = yolo_head(out_list, anchors, num_classes=len(class_names), input_dims=(height, width))

    boxes, classes, scores = handle_predictions(predictions, confidence=0.1, iou_threshold=0.4)
    boxes = adjust_boxes(boxes, image_shape, (height, width))
    end = time.time()
    print("PostProcess time: {:.8f}ms".format((end - start) * 1000))

    print('Found {} boxes for {}'.format(len(boxes), image_file))

    for box, cls, score in zip(boxes, classes, scores):
        print("Class: {}, Score: {}".format(class_names[cls], score))

    colors = get_colors(class_names)
    image = draw_boxes(image, boxes, classes, scores, class_names, colors)

    Image.fromarray(image).show()
コード例 #23
0
    def __init__(self, model_path):
        self.interpreter = interpreter_wrapper.Interpreter(
            model_path=model_path)
        self.interpreter.allocate_tensors()

        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

        print(self.input_details)
        print(self.output_details)
コード例 #24
0
ファイル: interpreter_test.py プロジェクト: MFChunga/poo
  def testPerChannelParams(self):
    interpreter = interpreter_wrapper.Interpreter(
        model_path=resource_loader.get_path_to_datafile('testdata/pc_conv.bin'))
    interpreter.allocate_tensors()

    # Tensor index 1 is the weight.
    weight_details = interpreter.get_tensor_details()[1]
    qparams = weight_details['quantization_parameters']
    # Ensure that we retrieve per channel quantization params correctly.
    self.assertEqual(len(qparams['scales']), 128)
コード例 #25
0
    def test_latency(self):
        latency_op = 0.0
        for test_case in TEST_CASES:
            input_tensor = tf.ragged.constant(test_case)

            rank = input_tensor.shape.rank
            model = self._make_model(rank, 3, ragged_tensor=True, flex=False)
            interpreter = interpreter_wrapper.InterpreterWithCustomOps(
                model_content=model,
                custom_op_registerers=['AddNgramsCustomOp'])
            interpreter.resize_tensor_input(0, input_tensor.flat_values.shape)
            for r in range(rank - 1):
                interpreter.resize_tensor_input(
                    r + 1, input_tensor.nested_row_splits[r].shape)
            interpreter.allocate_tensors()
            interpreter.set_tensor(interpreter.get_input_details()[0]['index'],
                                   input_tensor.flat_values.numpy())
            for r in range(rank - 1):
                interpreter.set_tensor(
                    interpreter.get_input_details()[r + 1]['index'],
                    input_tensor.nested_row_splits[r].numpy())
            start_time = timeit.default_timer()
            for _ in range(INVOKES_FOR_SINGLE_OP_BENCHMARK):
                interpreter.invoke()
            latency_op = latency_op + timeit.default_timer() - start_time
        latency_op = latency_op / (INVOKES_FOR_SINGLE_OP_BENCHMARK *
                                   len(TEST_CASES))

        latency_flex = 0.0
        for test_case in TEST_CASES:
            input_tensor = tf.ragged.constant(test_case)

            rank = input_tensor.shape.rank
            model = self._make_model(rank, 3, ragged_tensor=True, flex=True)
            interpreter = interpreter_wrapper.Interpreter(model_content=model)
            interpreter.resize_tensor_input(0, input_tensor.flat_values.shape)
            for r in range(rank - 1):
                interpreter.resize_tensor_input(
                    r + 1, input_tensor.nested_row_splits[r].shape)
            interpreter.allocate_tensors()
            interpreter.set_tensor(interpreter.get_input_details()[0]['index'],
                                   input_tensor.flat_values.numpy())
            for r in range(rank - 1):
                interpreter.set_tensor(
                    interpreter.get_input_details()[r + 1]['index'],
                    input_tensor.nested_row_splits[r].numpy())
            start_time = timeit.default_timer()
            for _ in range(INVOKES_FOR_FLEX_DELEGATE_BENCHMARK):
                interpreter.invoke()
            latency_flex = latency_flex + timeit.default_timer() - start_time
        latency_flex = latency_flex / (INVOKES_FOR_FLEX_DELEGATE_BENCHMARK *
                                       len(TEST_CASES))

        logging.info('Latency (single op): %fms', latency_op * 1000.0)
        logging.info('Latency (flex delegate): %fms', latency_flex * 1000.0)
コード例 #26
0
ファイル: test_tflite.py プロジェクト: MegEngine/mgeconvert
def _test_convert_result(
    inputs,
    tm,
    tm_result,
    max_err=max_error,
    nhwc=True,
    nhwc2=True,
    scale=1,
    zero_point=0,
    require_quantize=False,
):
    if not isinstance(inputs, Sequence):
        inputs = [
            inputs,
        ]
    if not isinstance(scale, Sequence):
        scale = (scale, )
    if not isinstance(zero_point, Sequence):
        zero_point = (zero_point, )
    for i, inp in enumerate(inputs):
        if nhwc and inp.ndim == 4:
            inputs[i] = inp.transpose((0, 2, 3, 1))

    tracedmodule_to_tflite(tm,
                           output=tmp_file + ".tflite",
                           require_quantize=require_quantize)

    tfl_model = interpreter.Interpreter(model_path=tmp_file + ".tflite")
    tfl_model.allocate_tensors()

    input_details = tfl_model.get_input_details()
    for i, inp in enumerate(inputs):
        tfl_model.set_tensor(input_details[i]["index"], inp)
    tfl_model.invoke()

    pred_tfl = []
    if not isinstance(scale, Sequence):
        scale = (scale, )
        zero_point = (zero_point, )
    for index, i in enumerate(tfl_model.get_output_details()):
        out = tfl_model.tensor(i["index"])()
        if nhwc2 and out.ndim == 4:
            out = out.transpose((0, 3, 1, 2))
        index = len(scale) - 1 if index >= len(scale) else index
        out = ((out - float(zero_point[index])) *
               scale[index]).astype("float32")
        pred_tfl.append(out)

    if not isinstance(tm_result, Sequence):
        tm_result = (tm_result, )
    for i, j, s in zip(tm_result, pred_tfl, scale):
        assert i.shape == j.shape
        assert i.dtype == j.dtype
        atol = max_err if s == 1 else s
        np.testing.assert_allclose(i, j, atol=atol)
コード例 #27
0
def generate_results(tflite_model,
                     val_dir,
                     scale_value,
                     normal_val,
                     results_dir=''):
    '''
    :param tflite_model: (必填)tflite模型地址
    :param val_dir:(必填)验证集路径
    :param results_dir: (选填)目标生成文件的路径,若不赋值默认为读取模型位置
    :param scale_value: (必填)图片下采样率
    '''
    interpreter = interpreter_wrapper.Interpreter(model_path=tflite_model)
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    if not results_dir:
        results_dir = os.path.join(os.path.dirname(tflite_model), 'result')
    if not os.path.exists(results_dir):
        os.mkdir(results_dir)

    img_list = os.listdir(val_dir)
    assert len(img_list) == 500, f'验证集图片数量为{len(img_list)},原始验证集数量为500'

    for img in tqdm(img_list):
        #读取图片
        img_path = os.path.join(val_dir, img)

        image = tf.io.read_file(img_path)
        image = tf.image.decode_png(image, channels=3, dtype=tf.dtypes.uint8)
        image = np.asarray(image).astype(np.float32)
        #图片预处理
        image_shape = np.shape(image)
        image /= 255.0

        image_new = cv2.resize(image, (int(
            image_shape[1] / scale_value), int(image_shape[0] / scale_value)),
                               interpolation=cv2.INTER_LINEAR)
        image_new = np.reshape(image_new,
                               (1, int(image_shape[0] / scale_value),
                                int(image_shape[1] / scale_value), 3))

        #推理
        interpreter.set_tensor(input_details[0]['index'], image_new)
        interpreter.invoke()

        output_data = interpreter.get_tensor(output_details[0]['index'])
        results = np.squeeze(output_data)
        results *= normal_val
        results = results.astype(np.uint16)

        result_path = os.path.join(results_dir, img)
        cv2.imwrite(result_path, results)
    print(f'结果生成从存放在{results_dir}')
コード例 #28
0
    def testStringZeroDim(self):
        data = b'abcd' + bytes(16)
        interpreter = interpreter_wrapper.Interpreter(
            model_path=resource_loader.get_path_to_datafile(
                'testdata/gather_string_0d.tflite'))
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        interpreter.set_tensor(input_details[0]['index'], np.array(data))
        test_input_tensor = interpreter.get_tensor(input_details[0]['index'])
        self.assertEqual(len(data), len(test_input_tensor.item(0)))
コード例 #29
0
def validate_deeplab_model_tflite(model_path, image_file, class_names, do_crf,
                                  label_file, loop_count):
    interpreter = interpreter_wrapper.Interpreter(model_path=model_path)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    #print(input_details)
    #print(output_details)

    # check the type of the input tensor
    if input_details[0]['dtype'] == np.float32:
        floating_model = True

    height = input_details[0]['shape'][1]
    width = input_details[0]['shape'][2]
    model_image_size = (height, width)

    num_classes = output_details[0]['shape'][-1]
    if class_names:
        # check if classes number match with model prediction
        assert num_classes == len(
            class_names), 'classes number mismatch with model.'

    # prepare input image
    img = Image.open(image_file)
    image_data = preprocess_image(img, model_image_size)
    image = image_data[0].astype('uint8')
    #origin image shape, in (width, height) format
    origin_image_size = img.size

    # predict once first to bypass the model building time
    interpreter.set_tensor(input_details[0]['index'], image_data)
    interpreter.invoke()

    start = time.time()
    for i in range(loop_count):
        interpreter.set_tensor(input_details[0]['index'], image_data)
        interpreter.invoke()
    end = time.time()
    print("Average Inference time: {:.8f}ms".format(
        (end - start) * 1000 / loop_count))

    prediction = []
    for output_detail in output_details:
        output_data = interpreter.get_tensor(output_detail['index'])
        prediction.append(output_data)

    handle_prediction(prediction, image, np.array(img), num_classes,
                      class_names, model_image_size, origin_image_size, do_crf,
                      label_file)
    return
コード例 #30
0
    def testMultipleInterpreters(self):
        # TODO(b/137299813): Enable when we fix for mac
        if sys.platform == 'darwin': return

        delegate = interpreter_wrapper.load_delegate(self._delegate_file)
        lib = delegate._library

        self.assertEqual(lib.get_num_delegates_created(), 1)
        self.assertEqual(lib.get_num_delegates_destroyed(), 0)
        self.assertEqual(lib.get_num_delegates_invoked(), 0)

        interpreter_a = interpreter_wrapper.Interpreter(
            model_path=self._model_file, experimental_delegates=[delegate])

        self.assertEqual(lib.get_num_delegates_created(), 1)
        self.assertEqual(lib.get_num_delegates_destroyed(), 0)
        self.assertEqual(lib.get_num_delegates_invoked(), 1)

        interpreter_b = interpreter_wrapper.Interpreter(
            model_path=self._model_file, experimental_delegates=[delegate])

        self.assertEqual(lib.get_num_delegates_created(), 1)
        self.assertEqual(lib.get_num_delegates_destroyed(), 0)
        self.assertEqual(lib.get_num_delegates_invoked(), 2)

        del delegate
        del interpreter_a

        self.assertEqual(lib.get_num_delegates_created(), 1)
        self.assertEqual(lib.get_num_delegates_destroyed(), 0)
        self.assertEqual(lib.get_num_delegates_invoked(), 2)

        del interpreter_b

        self.assertEqual(lib.get_num_delegates_created(), 1)
        self.assertEqual(lib.get_num_delegates_destroyed(), 1)
        self.assertEqual(lib.get_num_delegates_invoked(), 2)