Exemplo n.º 1
0
  def testGraphDefBasic(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32, name="input")
    _ = in_tensor + in_tensor
    sess = session.Session()

    tflite_model = convert.toco_convert_graph_def(
        sess.graph_def, [("input", [1, 16, 16, 3])], ["add"],
        inference_type=lite_constants.FLOAT)
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual("input", input_details[0]["name"])
    self.assertEqual(np.float32, input_details[0]["dtype"])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
    self.assertEqual((0., 0.), input_details[0]["quantization"])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual("add", output_details[0]["name"])
    self.assertEqual(np.float32, output_details[0]["dtype"])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
    self.assertEqual((0., 0.), output_details[0]["quantization"])
Exemplo n.º 2
0
  def testSimpleModel(self):
    """Test a SavedModel."""
    saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual('inputA', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    self.assertEqual('inputB', input_details[1]['name'])
    self.assertEqual(np.float32, input_details[1]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
    self.assertEqual((0., 0.), input_details[1]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemplo n.º 3
0
    def testConcreteFunc(self):
        input_data = constant_op.constant(1., shape=[1])
        root = tracking.AutoTrackable()
        root.v1 = variables.Variable(3.)
        root.v2 = variables.Variable(2.)
        root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
        concrete_func = root.f.get_concrete_function(input_data)

        # Convert model.
        converter = lite.TFLiteConverterV2.from_concrete_functions(
            [concrete_func])
        converter.experimental_enable_mlir_converter = True
        converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])

        tflite_model = mlir_convert_and_check_for_unsupported(self, converter)
        if tflite_model is None:
            return

        # Ensures the model contains TensorFlow ops.
        # TODO(nupurgarg): Check values once there is a Python delegate interface.
        interpreter = Interpreter(model_content=tflite_model)
        with self.assertRaises(RuntimeError) as error:
            interpreter.allocate_tensors()
        self.assertIn(
            'Regular TensorFlow ops are not supported by this interpreter. Make '
            'sure you invoke the Flex delegate before inference.',
            str(error.exception))
Exemplo n.º 4
0
  def testOrderInputArrays(self):
    """Test a SavedModel ordering of input arrays."""
    saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])

    converter = lite.TFLiteConverter.from_saved_model(
        saved_model_dir, input_arrays=['inputB', 'inputA'])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual('inputA', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    self.assertEqual('inputB', input_details[1]['name'])
    self.assertEqual(np.float32, input_details[1]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
    self.assertEqual((0., 0.), input_details[1]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemplo n.º 5
0
  def testSequentialModelInputShape(self):
    """Test a Sequential tf.keras model testing input shapes argument."""
    keras_file = self._getSequentialModel()

    # Passing in shape of invalid input array raises error.
    with self.assertRaises(ValueError) as error:
      converter = lite.TFLiteConverter.from_keras_model_file(
          keras_file, input_shapes={'invalid-input': [2, 3]})
    self.assertEqual(
        "Invalid tensor 'invalid-input' found in tensor shapes map.",
        str(error.exception))

    # Passing in shape of valid input array.
    converter = lite.TFLiteConverter.from_keras_model_file(
        keras_file, input_shapes={'dense_input': [2, 3]})
    tflite_model = converter.convert()
    os.remove(keras_file)
    self.assertTrue(tflite_model)

    # Check input shape from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('dense_input', input_details[0]['name'])
    self.assertTrue(([2, 3] == input_details[0]['shape']).all())
Exemplo n.º 6
0
  def testFloatWithShapesArray(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
    write_graph(sess.graph_def, '', graph_def_file, False)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_frozen_graph(
        graph_def_file, ['Placeholder'], ['add'],
        input_shapes={'Placeholder': [1, 16, 16, 3]})
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
Exemplo n.º 7
0
  def testDumpGraphviz(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    graphviz_dir = self.get_temp_dir()
    converter.dump_graphviz_dir = graphviz_dir
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure interpreter is able to allocate and check graphviz data.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    num_items_graphviz = len(os.listdir(graphviz_dir))
    self.assertTrue(num_items_graphviz)

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    graphviz_dir = self.get_temp_dir()
    converter.dump_graphviz_dir = graphviz_dir
    converter.dump_graphviz_video = True
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure graphviz folder has more data after using video flag.
    num_items_graphviz_video = len(os.listdir(graphviz_dir))
    self.assertTrue(num_items_graphviz_video > num_items_graphviz)
Exemplo n.º 8
0
  def testDumpGraphviz(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    graphviz_dir = self.get_temp_dir()
    converter.dump_graphviz_dir = graphviz_dir
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure interpreter is able to allocate and check graphviz data.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    num_items_graphviz = len(os.listdir(graphviz_dir))
    self.assertTrue(num_items_graphviz)

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    graphviz_dir = self.get_temp_dir()
    converter.dump_graphviz_dir = graphviz_dir
    converter.dump_graphviz_video = True
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure graphviz folder has more data after using video flag.
    num_items_graphviz_video = len(os.listdir(graphviz_dir))
    self.assertTrue(num_items_graphviz_video > num_items_graphviz)
Exemplo n.º 9
0
  def testFloat(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('Placeholder', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemplo n.º 10
0
  def testFloatWithShapesArray(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
    write_graph(sess.graph_def, '', graph_def_file, False)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_frozen_graph(
        graph_def_file, ['Placeholder'], ['add'],
        input_shapes={'Placeholder': [1, 16, 16, 3]})
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
Exemplo n.º 11
0
  def testPostTrainingCalibrateAndQuantize(self):
    func, calibration_gen = self._getCalibrationQuantizeModel()

    # Convert float model.
    float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
    float_tflite = float_converter.convert()
    self.assertTrue(float_tflite)

    # Convert quantized model.
    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
    quantized_converter.optimizations = [lite.Optimize.DEFAULT]
    quantized_converter.representative_dataset = calibration_gen
    quantized_tflite = quantized_converter.convert()
    self.assertTrue(quantized_tflite)

    # The default input and output types should be float.
    interpreter = Interpreter(model_content=quantized_tflite)
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual(np.float32, input_details[0]['dtype'])
    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual(np.float32, output_details[0]['dtype'])

    # Ensure that the quantized weights tflite model is smaller.
    self.assertLess(len(quantized_tflite), len(float_tflite))
Exemplo n.º 12
0
  def testSequentialModelInputShape(self):
    """Test a Sequential tf.keras model testing input shapes argument."""
    keras_file = self._getSequentialModel()

    # Passing in shape of invalid input array raises error.
    with self.assertRaises(ValueError) as error:
      converter = lite.TFLiteConverter.from_keras_model_file(
          keras_file, input_shapes={'invalid-input': [2, 3]})
    self.assertEqual(
        "Invalid tensor 'invalid-input' found in tensor shapes map.",
        str(error.exception))

    # Passing in shape of valid input array.
    converter = lite.TFLiteConverter.from_keras_model_file(
        keras_file, input_shapes={'dense_input': [2, 3]})
    tflite_model = converter.convert()
    os.remove(keras_file)
    self.assertTrue(tflite_model)

    # Check input shape from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('dense_input', input_details[0]['name'])
    self.assertTrue(([2, 3] == input_details[0]['shape']).all())
Exemplo n.º 13
0
  def testDeprecatedFlags(self):
    with ops.Graph().as_default():
      in_tensor = array_ops.placeholder(
          shape=[1, 16, 16, 3], dtype=dtypes.float32)
      out_tensor = in_tensor + in_tensor
      sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    converter.target_ops = set([lite.OpsSet.SELECT_TF_OPS])

    # Ensure `target_ops` is set to the correct value after flag deprecation.
    self.assertEqual(converter.target_ops, set([lite.OpsSet.SELECT_TF_OPS]))
    self.assertEqual(converter.target_spec.supported_ops,
                     set([lite.OpsSet.SELECT_TF_OPS]))

    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensures the model contains TensorFlow ops.
    # TODO(nupurgarg): Check values once there is a Python delegate interface.
    interpreter = Interpreter(model_content=tflite_model)
    with self.assertRaises(RuntimeError) as error:
      interpreter.allocate_tensors()
    self.assertIn(
        'Regular TensorFlow ops are not supported by this interpreter.',
        str(error.exception))
Exemplo n.º 14
0
  def testFunctionalSequentialModel(self):
    """Test a Functional tf.keras model containing a Sequential model."""
    with session.Session().as_default():
      model = keras.models.Sequential()
      model.add(keras.layers.Dense(2, input_shape=(3,)))
      model.add(keras.layers.RepeatVector(3))
      model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
      model = keras.models.Model(model.input, model.output)

      model.compile(
          loss=keras.losses.MSE,
          optimizer=keras.optimizers.RMSprop(),
          metrics=[keras.metrics.categorical_accuracy],
          sample_weight_mode='temporal')
      x = np.random.random((1, 3))
      y = np.random.random((1, 3, 3))
      model.train_on_batch(x, y)
      model.predict(x)

      model.predict(x)
      fd, keras_file = tempfile.mkstemp('.h5')
      try:
        keras.models.save_model(model, keras_file)
      finally:
        os.close(fd)

    # Convert to TFLite model.
    converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check tensor details of converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('dense_input', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])

    # Check inference of converted model.
    input_data = np.array([[1, 2, 3]], dtype=np.float32)
    interpreter.set_tensor(input_details[0]['index'], input_data)
    interpreter.invoke()
    tflite_result = interpreter.get_tensor(output_details[0]['index'])

    keras_model = keras.models.load_model(keras_file)
    keras_result = keras_model.predict(input_data)

    np.testing.assert_almost_equal(tflite_result, keras_result, 5)
    os.remove(keras_file)
Exemplo n.º 15
0
  def testCalibrateAndQuantizeBuiltinInt8(self):
    func, calibration_gen = self._getCalibrationQuantizeModel()

    # Convert float model.
    float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
    float_tflite = float_converter.convert()
    self.assertTrue(float_tflite)

    # Convert model by specifying target spec (instead of optimizations), since
    # when targeting an integer only backend, quantization is mandatory.
    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
    quantized_converter.target_spec.supported_ops = [
        lite.OpsSet.TFLITE_BUILTINS_INT8
    ]
    quantized_converter.representative_dataset = calibration_gen
    quantized_tflite = quantized_converter.convert()
    self.assertTrue(quantized_tflite)

    # The default input and output types should be float.
    interpreter = Interpreter(model_content=quantized_tflite)
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual(np.float32, input_details[0]['dtype'])
    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual(np.float32, output_details[0]['dtype'])

    # Ensure that the quantized weights tflite model is smaller.
    self.assertLess(len(quantized_tflite), len(float_tflite))
Exemplo n.º 16
0
  def testPbtxt(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
    write_graph(sess.graph_def, '', graph_def_file, True)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
                                                       ['Placeholder'], ['add'])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('Placeholder', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemplo n.º 17
0
  def testDefaultRangesStats(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    converter.inference_type = lite_constants.QUANTIZED_UINT8
    converter.quantized_input_stats = {'Placeholder': (0., 1.)}  # mean, std_dev
    converter.default_ranges_stats = (0, 6)  # min, max
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('Placeholder', input_details[0]['name'])
    self.assertEqual(np.uint8, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((1., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.uint8, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertTrue(output_details[0]['quantization'][0] > 0)  # scale
Exemplo n.º 18
0
  def testSequentialModelInputShape(self):
    """Test a Sequential tf.keras model testing input shapes argument."""
    keras_file = self._getSequentialModel()

    # Passing in shape of invalid input array has no impact as long as all input
    # arrays have a shape.
    converter = lite.TFLiteConverter.from_keras_model_file(
        keras_file, input_shapes={'invalid-input': [2, 3]})
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Passing in shape of valid input array.
    converter = lite.TFLiteConverter.from_keras_model_file(
        keras_file, input_shapes={'dense_input': [2, 3]})
    tflite_model = converter.convert()
    os.remove(keras_file)
    self.assertTrue(tflite_model)

    # Check input shape from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('dense_input', input_details[0]['name'])
    self.assertTrue(([2, 3] == input_details[0]['shape']).all())
Exemplo n.º 19
0
  def testNoneBatchSize(self):
    """Test a SavedModel, with None in input tensor's shape."""
    saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])

    converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual('inputA', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    self.assertEqual('inputB', input_details[1]['name'])
    self.assertEqual(np.float32, input_details[1]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
    self.assertEqual((0., 0.), input_details[1]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemplo n.º 20
0
    def _feed_tensors(self, dataset_gen, resize_input):
        """Feed tensors to the calibrator."""
        initialized = {}

        for sample in dataset_gen():
            if isinstance(sample, tuple):
                if not isinstance(sample[1], dict):
                    raise ValueError(
                        "You need to provide either a dictionary with input "
                        "names and values in the second arugment in the "
                        "tuple")
                # Convert signature based inputs to the tensor index based data.
                if self._interpreter is None:
                    self._interpreter = Interpreter(
                        model_content=self._model_content)
                signature_key = sample[0]
                input_array = self._create_input_array_from_dict(
                    signature_key, sample[1])
            elif isinstance(sample, dict):
                # Convert signature based inputs to the tensor index based data.
                if self._interpreter is None:
                    self._interpreter = Interpreter(
                        model_content=self._model_content)
                signature_key = None
                input_array = self._create_input_array_from_dict(None, sample)
            elif isinstance(sample, list):
                signature_key = None
                input_array = sample
            else:
                raise ValueError(
                    "You need to provide either a dictionary with input "
                    "names and values, a tuple with signature key and a "
                    "dictionary with input names and values, or an array "
                    "with input values in the order of input tensors of "
                    "the graph in the representative_dataset function. "
                    "Unsupported value from dataset: {}.".format(sample))

            if signature_key not in initialized:
                initialized[signature_key] = True
                if resize_input:
                    if signature_key is not None:
                        self._calibrator.Prepare(
                            [list(s.shape) for s in input_array],
                            signature_key)
                    else:
                        self._calibrator.Prepare(
                            [list(s.shape) for s in input_array])
                else:
                    if signature_key is not None:
                        self._calibrator.Prepare(signature_key)
                    else:
                        self._calibrator.Prepare()
            if signature_key is not None:
                self._calibrator.FeedTensor(input_array, signature_key)
            else:
                self._calibrator.FeedTensor(input_array)
Exemplo n.º 21
0
    def __init__(self):
        self.__alive = True
        with open(PATH_TO_LABELS, 'r') as f:
            self.__labels = [line.strip() for line in f.readlines()]

        if self.__labels[0] == '???':
            del (self.__labels[0])

        self.__interpreter = Interpreter(model_path=PATH_TO_CKPT)
        self.__interpreter.allocate_tensors()
Exemplo n.º 22
0
class LiteModel:
    def __init__(self, model_content):
        model_content = bytes(model_content)
        self.interpreter = Interpreter(model_content=model_content)

        input_details = self.interpreter.get_input_details()
        output_details = self.interpreter.get_output_details()

        self.input_shape = input_details[0]['shape'][1:]
        self.input_index = input_details[0]['index']
        self.output_index = output_details[0]['index']

        self.input_scale, self.input_zero_point = input_details[0][
            'quantization']
        self.output_scale, self.output_zero_point = output_details[0][
            'quantization']

        self.interpreter.allocate_tensors()

    def predict(self, X):
        X = self.input_map(X)
        self.interpreter.set_tensor(self.input_index, X)
        self.interpreter.invoke()
        Y = self.interpreter.get_tensor(self.output_index)
        Y = self.output_map(Y)
        return Y

    def input_map(self, x):
        return np.array(x, dtype=np.float32)
        # return np.array(x / self.input_scale + self.input_zero_point, dtype=np.uint8)

    def output_map(self, y):
        return np.array(y, dtype=np.float32)
Exemplo n.º 23
0
class TfLiteModel:
    def __init__(self, model_content):
        self.model_content = bytes(model_content)
        self.interpreter = Interpreter(model_content=self.model_content)
        input_details = self.interpreter.get_input_details()
        output_details = self.interpreter.get_output_details()
        print(input_details)
        self.input_index = input_details[0]['index']
        self.output_index = output_details[0]['index']

        self.input_scale, self.input_zero_point = input_details[0][
            'quantization']
        self.output_scale, self.output_zero_point = output_details[0][
            'quantization']

        self.interpreter.allocate_tensors()

    def forward(self, data_in):
        test_input = np.array(data_in / self.input_scale +
                              self.input_zero_point,
                              dtype=np.uint8).reshape(1, -1)
        self.interpreter.set_tensor(self.input_index, test_input)
        self.interpreter.invoke()

        output_data = self.interpreter.get_tensor(self.output_index)[0]
        return (np.array(output_data, dtype=np.float32) -
                self.output_zero_point) * self.output_scale
Exemplo n.º 24
0
  def testSequentialModelTocoConverter(self):
    """Test a Sequential tf.keras model with deprecated TocoConverter."""
    keras_file = self._getSequentialModel()

    converter = lite.TocoConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the model is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
  def testSequentialModelTocoConverter(self):
    """Test a Sequential tf.keras model with deprecated TocoConverter."""
    keras_file = self._getSequentialModel()

    converter = lite.TocoConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the model is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
Exemplo n.º 26
0
 def __init__(self, model_file, label_file):
     logger.info(model_file)
     self._interpreter = Interpreter(model_path=model_file)
     self._interpreter.set_num_threads(4)
     self._interpreter.allocate_tensors()
     self._labels = self.load_labels(label_file)
     self._input_details = self._interpreter.get_input_details()
     self._output_details = self._interpreter.get_output_details()
     self._input_height = self._input_details[0]['shape'][1]
     self._input_width = self._input_details[0]['shape'][2]
     self._floating_model = (self._input_details[0]['dtype'] == np.float32)
  def testSimpleModelTocoConverter(self):
    """Test a SavedModel with deprecated TocoConverter."""
    saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_saved_model(saved_model_dir)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the model is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
Exemplo n.º 28
0
  def _evaluateTFLiteModel(self, tflite_model, input_data):
    """Evaluates the model on the `input_data`."""
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    for input_tensor, tensor_data in zip(input_details, input_data):
      interpreter.set_tensor(input_tensor['index'], tensor_data.numpy())
    interpreter.invoke()
    return interpreter.get_tensor(output_details[0]['index'])
Exemplo n.º 29
0
    def load_model(self, model_path):
        self.interpreter = Interpreter(model_path, num_threads=4)
        self.interpreter.allocate_tensors()

        self.input_details = self.interpreter.get_input_details()
        input_shape = self.input_details[0]['shape']
        self.input_height = input_shape[1]
        self.input_width = input_shape[2]
        self.channels = input_shape[2]

        self.output_details = self.interpreter.get_output_details()
        self.output_shape = self.output_details[0]['shape']
Exemplo n.º 30
0
  def testSimpleModelTocoConverter(self):
    """Test a SavedModel with deprecated TocoConverter."""
    saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_saved_model(saved_model_dir)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the model is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
Exemplo n.º 31
0
    def __init__(self):

        self.car_boxes = []

        os.chdir(cwd)

        #Tensorflow localization/detection model
        # Single-shot-dectection with mobile net architecture trained on COCO
        # dataset
        # detect_model_name = 'ssd_mobilenet_v1_coco_2017_11_17'
        # PATH_TO_CKPT = detect_model_name + '/frozen_inference_graph.pb'

        # detect_model_name = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu'
        # PATH_TO_CKPT = detect_model_name + '/tflite_graph.pb'

        # # setup tensorflow graph
        # self.detection_graph = tf.Graph()

        # # configuration for possible GPU use
        # config = tf.ConfigProto()
        # config.gpu_options.allow_growth = True
        # # load frozen tensorflow detection model and initialize
        # # the tensorflow graph
        # with self.detection_graph.as_default():
        #     od_graph_def = tf.GraphDef()
        #     with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
        #        serialized_graph = fid.read()
        #        od_graph_def.ParseFromString(serialized_graph)
        #        tf.import_graph_def(od_graph_def, name='')

        #     self.sess = tf.Session(graph=self.detection_graph, config=config)
        #     self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
        #       # Each box represents a part of the image where a particular object was detected.
        #     self.boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
        #       # Each score represent how level of confidence for each of the objects.
        #       # Score is shown on the result image, together with the class label.
        #     self.scores =self.detection_graph.get_tensor_by_name('detection_scores:0')
        #     self.classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
        #     self.num_detections =self.detection_graph.get_tensor_by_name('num_detections:0')

        ## tflite
        detect_model_name = 'ssd_mobilenet_v2_coco_quantized_tflite'
        PATH_TO_CKPT = detect_model_name + '/detect.tflite'

        # Define lite graph and Load Tensorflow Lite model into memory
        self.interpreter = Interpreter(model_path=PATH_TO_CKPT)
        self.interpreter.allocate_tensors()
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()
Exemplo n.º 32
0
  def testFloatTocoConverter(self):
    """Tests deprecated test TocoConverter."""
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the interpreter is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
  def testFloatTocoConverter(self):
    """Tests deprecated test TocoConverter."""
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the interpreter is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
Exemplo n.º 34
0
    def __init__(self, model_content):
        self.model_content = bytes(model_content)
        self.interpreter = Interpreter(model_content=self.model_content)
        input_details = self.interpreter.get_input_details()
        output_details = self.interpreter.get_output_details()
        print(input_details)
        self.input_index = input_details[0]['index']
        self.output_index = output_details[0]['index']

        self.input_scale, self.input_zero_point = input_details[0][
            'quantization']
        self.output_scale, self.output_zero_point = output_details[0][
            'quantization']

        self.interpreter.allocate_tensors()
Exemplo n.º 35
0
class ClassifyBird():
    def __init__(self):
        model_path = f"{os.path.dirname(__file__)}/aiy/lite-model_aiy_vision_classifier_birds_V1_3.tflite"
        label_path = f"{os.path.dirname(__file__)}/aiy/probability-labels-en.txt"

        print(f"[model_path] {model_path}")
        print(f"[label_path] {label_path}")

        self.interpreter = Interpreter(model_path=model_path)
        self.labels = self.load_labels(label_path)

        self.interpreter.allocate_tensors()

        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

        self.height = self.input_details[0]['shape'][1]
        self.width = self.input_details[0]['shape'][2]

        self.floating_model = (self.input_details[0]['dtype'] == np.float32)

        self.input_mean = 127.5
        self.input_std = 127.5

    def load_labels(self, filename):
        with open(filename, 'r') as f:
            return [line.strip() for line in f.readlines()]

    def classify_path(self, path):
        image = cv2.imread(path)
        return self.__classify_array(image)

    def classify_image(self, image):
        return self.__classify_array(image)

    def __classify_array(self, image) -> ClassifyResultSet:
        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        imH, imW, _ = image.shape
        image_resized = cv2.resize(image_rgb, (self.width, self.height))
        input_data = np.expand_dims(image_resized, axis=0)

        if self.floating_model:
            input_data = (np.float32(input_data) - input_mean) / input_std

        self.interpreter.set_tensor(self.input_details[0]['index'], input_data)
        self.interpreter.invoke()

        output_data = self.interpreter.get_tensor(
            self.output_details[0]['index'])
        results = np.squeeze(output_data)

        return ClassifyResultSet(results, self.labels)
Exemplo n.º 36
0
    def __init__(self, PATH_TO_CKPT):
        # Load the Tensorflow Lite model.

        self.interpreter = Interpreter(model_path=PATH_TO_CKPT)

        self.interpreter.allocate_tensors()

        # Get model details
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()
        self.height = self.input_details[0]['shape'][1]
        self.width = self.input_details[0]['shape'][2]

        self.floating_model = (self.input_details[0]['dtype'] == np.float32)

        self.input_mean = 127.5
        self.input_std = 127.5
Exemplo n.º 37
0
  def _evaluateTFLiteModelUsingSignatureDef(self, tflite_model, method_name,
                                            inputs):
    """Evaluates the model on the `inputs`.

    Args:
      tflite_model: TensorFlow Lite model.
      method_name: Exported Method name of the SavedModel.
      inputs: Map from input tensor names in the SignatureDef to tensor value.

    Returns:
      Dictionary of outputs.
      Key is the output name in the SignatureDef 'method_name'
      Value is the output value
    """
    interpreter = Interpreter(model_content=tflite_model)
    signature_runner = interpreter.get_signature_runner(method_name)
    return signature_runner(**inputs)
Exemplo n.º 38
0
  def testTrainingTimeQuantizeConversion(self):
    model = self._getTrainingTimeQuantizedModel()

    float_converter = lite.TFLiteConverterV2.from_keras_model(model)
    float_tflite = float_converter.convert()
    self.assertTrue(float_tflite)

    quantized_converter = lite.TFLiteConverterV2.from_keras_model(model)
    quantized_converter.optimizations = [lite.Optimize.DEFAULT]
    quantized_tflite = quantized_converter.convert()
    self.assertTrue(quantized_tflite)

    # Ensure that the quantized weights tflite model is smaller.
    self.assertLess(len(quantized_tflite), len(float_tflite))

    interpreter = Interpreter(model_content=quantized_tflite)
    self.assertEqual(np.float32, interpreter.get_input_details()[0]['dtype'])
Exemplo n.º 39
0
  def _evaluateTFLiteModelUsingSignatureDef(self, tflite_model, signature_key,
                                            inputs):
    """Evaluates the model on the `inputs`.

    Args:
      tflite_model: TensorFlow Lite model.
      signature_key: Signature key.
      inputs: Map from input tensor names in the SignatureDef to tensor value.

    Returns:
      Dictionary of outputs.
      Key is the output name in the SignatureDef 'signature_key'
      Value is the output value
    """
    interpreter = Interpreter(model_content=tflite_model)
    signature_runner = interpreter.get_signature_runner(signature_key)
    return signature_runner(**inputs)
Exemplo n.º 40
0
    def _set_input_tensors(self, interpreter: _interpreter.Interpreter,
                           tensor_data: Sequence[np.ndarray],
                           initialize: bool) -> None:
        """Sets input tensors into TFLite model Interpreter.

    Args:
      interpreter: a tf.lite.Interpreter object with allocated tensors.
      tensor_data: a list of Numpy array data.
      initialize: set to true when input is first set for the interpreter, to
        set input shapes and allocate tensors.

    Raises:
      ValueError: when inputs can't be set, or size of provided inputs does not
      match size of model inputs.
    """
        input_details = interpreter.get_input_details()
        if len(input_details) != len(tensor_data):
            raise ValueError(
                'Number of inputs provided ({}) does not match number of inputs to '
                'the model ({})'.format(len(tensor_data), len(input_details)))

        if initialize:
            for input_detail, tensor in zip(input_details, tensor_data):
                interpreter.resize_tensor_input(input_detail['index'],
                                                tensor.shape)
            interpreter.allocate_tensors()

        for input_detail, tensor in zip(input_details, tensor_data):
            if tensor.dtype == np.float32 and input_detail['dtype'] == np.int8:
                quant_params = _get_quant_params(input_detail)
                if quant_params:
                    scale, zero_point = quant_params
                    tensor = np.round((tensor / scale) + zero_point).astype(
                        np.int8)
            interpreter.set_tensor(input_detail['index'], tensor)
Exemplo n.º 41
0
  def testQuantization(self):
    in_tensor_1 = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
    in_tensor_2 = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
    out_tensor = array_ops.fake_quant_with_min_max_args(
        in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(
        sess, [in_tensor_1, in_tensor_2], [out_tensor])
    converter.inference_type = lite_constants.QUANTIZED_UINT8
    converter.quantized_input_stats = {
        'inputA': (0., 1.),
        'inputB': (0., 1.)
    }  # mean, std_dev
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual('inputA', input_details[0]['name'])
    self.assertEqual(np.uint8, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((1., 0.),
                     input_details[0]['quantization'])  # scale, zero_point

    self.assertEqual('inputB', input_details[1]['name'])
    self.assertEqual(np.uint8, input_details[1]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
    self.assertEqual((1., 0.),
                     input_details[1]['quantization'])  # scale, zero_point

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('output', output_details[0]['name'])
    self.assertEqual(np.uint8, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertTrue(output_details[0]['quantization'][0] > 0)  # scale
Exemplo n.º 42
0
  def testGraphDefQuantization(self):
    in_tensor_1 = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA")
    in_tensor_2 = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB")
    _ = array_ops.fake_quant_with_min_max_args(
        in_tensor_1 + in_tensor_2, min=0., max=1., name="output")
    sess = session.Session()

    input_arrays_map = [("inputA", [1, 16, 16, 3]), ("inputB", [1, 16, 16, 3])]
    output_arrays = ["output"]
    tflite_model = convert.toco_convert_graph_def(
        sess.graph_def,
        input_arrays_map,
        output_arrays,
        inference_type=lite_constants.QUANTIZED_UINT8,
        quantized_input_stats=[(0., 1.), (0., 1.)])
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual("inputA", input_details[0]["name"])
    self.assertEqual(np.uint8, input_details[0]["dtype"])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
    self.assertEqual((1., 0.),
                     input_details[0]["quantization"])  # scale, zero_point

    self.assertEqual("inputB", input_details[1]["name"])
    self.assertEqual(np.uint8, input_details[1]["dtype"])
    self.assertTrue(([1, 16, 16, 3] == input_details[1]["shape"]).all())
    self.assertEqual((1., 0.),
                     input_details[1]["quantization"])  # scale, zero_point

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual("output", output_details[0]["name"])
    self.assertEqual(np.uint8, output_details[0]["dtype"])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
    self.assertTrue(output_details[0]["quantization"][0] > 0)  # scale
  def testFloatTocoConverter(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
    write_graph(sess.graph_def, '', graph_def_file, False)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
                                                     ['Placeholder'], ['add'])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the model is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
Exemplo n.º 44
0
  def testFloatTocoConverter(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
    write_graph(sess.graph_def, '', graph_def_file, False)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
                                                     ['Placeholder'], ['add'])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the model is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
Exemplo n.º 45
0
  def testFlexResourceVariables(self):

    class Model(tf.Module):

      def __init__(self):
        self.v = tf.Variable([[0.0, 0.0, 0.0, 0.0]])

      @tf.function(
          input_signature=[tf.TensorSpec(shape=[1, 4], dtype=tf.float32)])
      def eval(self, x):
        # Control flow is needed to generate "FlexReadVariableOp".
        if tf.reduce_mean(x) > 1.0:
          self.v.assign_add([[1.0, 1.0, 1.0, 1.0]])
        return self.v + x

    m = Model()
    to_save = m.eval.get_concrete_function()
    save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
    tf.saved_model.save(m, save_dir, to_save)
    converter = tf.lite.TFLiteConverter.from_saved_model(save_dir)

    converter.target_spec.supported_ops = [
        tf.lite.OpsSet.TFLITE_BUILTINS,
        tf.lite.OpsSet.SELECT_TF_OPS,
    ]
    converter.experimental_enable_resource_variables = True
    tflite_model = converter.convert()

    # Check the model works with TensorFlow ops.
    interpreter = Interpreter(model_content=tflite_model)
    signature_runner = interpreter.get_signature_runner()
    outputs = signature_runner(
        x=np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32))
    expected_output = np.array([[2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
    self.assertTrue((expected_output == list(outputs.values())[0]).all)

    # Second run.
    outputs = signature_runner(
        x=np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32))
    expected_output = np.array([[3.0, 4.0, 5.0, 6.0]], dtype=np.float32)
    self.assertTrue((expected_output == list(outputs.values())[0]).all)
Exemplo n.º 46
0
  def testMatMulQuantize(self):
    concrete_func, _ = self._getIntegerQuantizeModelWithUnknownShapes()
    float_converter = lite.TFLiteConverterV2.from_concrete_functions(
        [concrete_func])
    float_tflite_model = float_converter.convert()

    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(
        [concrete_func])
    quantized_converter.optimizations = [lite.Optimize.DEFAULT]
    quantized_tflite_model = quantized_converter.convert()

    # The default input and output types should be float.
    quantized_interpreter = Interpreter(model_content=quantized_tflite_model)
    quantized_interpreter.allocate_tensors()
    input_details = quantized_interpreter.get_input_details()
    self.assertLen(input_details, 1)
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertAllEqual([-1, 33], input_details[0]['shape_signature'])

    # Ensure that the quantized weights tflite model is smaller.
    self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
Exemplo n.º 47
0
    def testCalibrateAndQuantizeBuiltinInt8(self):
        func, calibration_gen = self._getCalibrationQuantizeModel()

        # Convert float model.
        float_converter = lite.TFLiteConverterV2.from_concrete_functions(
            [func])
        float_tflite = float_converter.convert()
        self.assertTrue(float_tflite)

        # Convert model by specifying target spec (instead of optimizations), since
        # when targeting an integer only backend, quantization is mandatory.
        quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(
            [func])
        quantized_converter.target_spec.supported_ops = [
            lite.OpsSet.TFLITE_BUILTINS_INT8
        ]
        quantized_converter.representative_dataset = calibration_gen
        quantized_tflite = quantized_converter.convert()
        self.assertTrue(quantized_tflite)

        # The default input and output types should be float.
        interpreter = Interpreter(model_content=quantized_tflite)
        interpreter.allocate_tensors()
        input_details = interpreter.get_input_details()
        self.assertLen(input_details, 1)
        self.assertEqual(np.float32, input_details[0]['dtype'])
        output_details = interpreter.get_output_details()
        self.assertLen(output_details, 1)
        self.assertEqual(np.float32, output_details[0]['dtype'])

        # Ensure that the quantized weights tflite model is smaller.
        self.assertLess(len(quantized_tflite), len(float_tflite))
Exemplo n.º 48
0
    def testGraphDefBasic(self):
        in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                          dtype=dtypes.float32,
                                          name="input")
        _ = in_tensor + in_tensor
        sess = session.Session()

        tflite_model = convert.toco_convert_graph_def(
            sess.graph_def, [("input", [1, 16, 16, 3])], ["add"],
            inference_type=lite_constants.FLOAT)
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual("input", input_details[0]["name"])
        self.assertEqual(np.float32, input_details[0]["dtype"])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
        self.assertEqual((0., 0.), input_details[0]["quantization"])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual("add", output_details[0]["name"])
        self.assertEqual(np.float32, output_details[0]["dtype"])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
        self.assertEqual((0., 0.), output_details[0]["quantization"])
Exemplo n.º 49
0
    def testPostTrainingCalibrateAndQuantize(self):
        func, calibration_gen = self._getCalibrationQuantizeModel()

        # Convert float model.
        float_converter = lite.TFLiteConverterV2.from_concrete_functions(
            [func])
        float_tflite = float_converter.convert()
        self.assertTrue(float_tflite)

        # Convert quantized model.
        quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(
            [func])
        quantized_converter.optimizations = [lite.Optimize.DEFAULT]
        quantized_converter.representative_dataset = calibration_gen
        quantized_tflite = quantized_converter.convert()
        self.assertTrue(quantized_tflite)

        # The default input and output types should be float.
        interpreter = Interpreter(model_content=quantized_tflite)
        interpreter.allocate_tensors()
        input_details = interpreter.get_input_details()
        self.assertLen(input_details, 1)
        self.assertEqual(np.float32, input_details[0]['dtype'])
        output_details = interpreter.get_output_details()
        self.assertLen(output_details, 1)
        self.assertEqual(np.float32, output_details[0]['dtype'])

        # Ensure that the quantized weights tflite model is smaller.
        self.assertLess(len(quantized_tflite), len(float_tflite))
Exemplo n.º 50
0
    def testString(self):
        in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string)
        out_tensor = array_ops.reshape(in_tensor, shape=[2, 2])
        sess = session.Session()

        # Convert model and ensure model is not None.
        converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                      [out_tensor])
        tflite_model = converter.convert()

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('Placeholder', input_details[0]['name'])
        self.assertEqual(np.string_, input_details[0]['dtype'])
        self.assertTrue(([4] == input_details[0]['shape']).all())

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('Reshape', output_details[0]['name'])
        self.assertEqual(np.string_, output_details[0]['dtype'])
        self.assertTrue(([2, 2] == output_details[0]['shape']).all())
Exemplo n.º 51
0
    def testFloat(self):
        in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                          dtype=dtypes.float32)
        out_tensor = in_tensor + in_tensor
        sess = session.Session()

        # Convert model and ensure model is not None.
        converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                      [out_tensor])
        converter.experimental_enable_mlir_converter = True
        tflite_model = converter.convert()

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('Placeholder', input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
        self.assertEqual((0., 0.), input_details[0]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('add', output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
        self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemplo n.º 52
0
  def testFloat(self):
    input_data = constant_op.constant(1., shape=[1])
    root = tracking.AutoTrackable()
    root.v1 = variables.Variable(3.)
    root.v2 = variables.Variable(2.)
    root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
    concrete_func = root.f.get_concrete_function(input_data)

    # Convert model.
    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
    converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
    tflite_model = converter.convert()

    # Ensures the model contains TensorFlow ops.
    # TODO(nupurgarg): Check values once there is a Python delegate interface.
    interpreter = Interpreter(model_content=tflite_model)
    with self.assertRaises(RuntimeError) as error:
      interpreter.allocate_tensors()
    self.assertIn(
        'Regular TensorFlow ops are not supported by this interpreter. Make '
        'sure you invoke the Flex delegate before inference.',
        str(error.exception))
Exemplo n.º 53
0
  def testFlexMode(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    converter.target_ops = set([lite.OpsSet.SELECT_TF_OPS])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensures the model contains TensorFlow ops.
    # TODO(nupurgarg): Check values once there is a Python delegate interface.
    interpreter = Interpreter(model_content=tflite_model)
    with self.assertRaises(RuntimeError) as error:
      interpreter.allocate_tensors()
    self.assertIn(
        'Regular TensorFlow ops are not supported by this interpreter. Make '
        'sure you invoke the Flex delegate before inference.',
        str(error.exception))
Exemplo n.º 54
0
  def testTFLiteGraphDef(self):
    # Tests the object detection model that cannot be loaded in TensorFlow.
    self._initObjectDetectionArgs()

    converter = lite.TFLiteConverter.from_frozen_graph(
        self._graph_def_file, self._input_arrays, self._output_arrays,
        self._input_shapes)
    converter.allow_custom_ops = True
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('normalized_input_image_tensor', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 300, 300, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(4, len(output_details))
    self.assertEqual('TFLite_Detection_PostProcess', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 10, 4] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])

    self.assertEqual('TFLite_Detection_PostProcess:1',
                     output_details[1]['name'])
    self.assertTrue(([1, 10] == output_details[1]['shape']).all())
    self.assertEqual('TFLite_Detection_PostProcess:2',
                     output_details[2]['name'])
    self.assertTrue(([1, 10] == output_details[2]['shape']).all())
    self.assertEqual('TFLite_Detection_PostProcess:3',
                     output_details[3]['name'])
    self.assertTrue(([1] == output_details[3]['shape']).all())
Exemplo n.º 55
0
  def testSequentialModel(self):
    """Test a Sequential tf.keras model with default inputs."""
    keras_file = self._getSequentialModel()

    converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check tensor details of converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('dense_input', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])

    # Check inference of converted model.
    input_data = np.array([[1, 2, 3]], dtype=np.float32)
    interpreter.set_tensor(input_details[0]['index'], input_data)
    interpreter.invoke()
    tflite_result = interpreter.get_tensor(output_details[0]['index'])

    keras_model = keras.models.load_model(keras_file)
    keras_result = keras_model.predict(input_data)

    np.testing.assert_almost_equal(tflite_result, keras_result, 5)
    os.remove(keras_file)
Exemplo n.º 56
0
  def testFunctionalModelMultipleInputs(self):
    """Test a Functional tf.keras model with multiple inputs and outputs."""
    with session.Session().as_default():
      a = keras.layers.Input(shape=(3,), name='input_a')
      b = keras.layers.Input(shape=(3,), name='input_b')
      dense = keras.layers.Dense(4, name='dense')
      c = dense(a)
      d = dense(b)
      e = keras.layers.Dropout(0.5, name='dropout')(c)

      model = keras.models.Model([a, b], [d, e])
      model.compile(
          loss=keras.losses.MSE,
          optimizer=keras.optimizers.RMSprop(),
          metrics=[keras.metrics.mae],
          loss_weights=[1., 0.5])

      input_a_np = np.random.random((10, 3))
      input_b_np = np.random.random((10, 3))
      output_d_np = np.random.random((10, 4))
      output_e_np = np.random.random((10, 4))
      model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])

      model.predict([input_a_np, input_b_np], batch_size=5)
      fd, keras_file = tempfile.mkstemp('.h5')
      try:
        keras.models.save_model(model, keras_file)
      finally:
        os.close(fd)

    # Convert to TFLite model.
    converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    os.remove(keras_file)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual('input_a', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    self.assertEqual('input_b', input_details[1]['name'])
    self.assertEqual(np.float32, input_details[1]['dtype'])
    self.assertTrue(([1, 3] == input_details[1]['shape']).all())
    self.assertEqual((0., 0.), input_details[1]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(2, len(output_details))
    self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 4] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])

    self.assertEqual('dropout/Identity', output_details[1]['name'])
    self.assertEqual(np.float32, output_details[1]['dtype'])
    self.assertTrue(([1, 4] == output_details[1]['shape']).all())
    self.assertEqual((0., 0.), output_details[1]['quantization'])