Esempio n. 1
0
  def testV1SimpleModel(self):
    """Test a SavedModel."""
    with tf.Graph().as_default():
      saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])

      # Convert model and ensure model is not None.
      converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
      tflite_model = converter.convert()
      self.assertTrue(tflite_model)

      interpreter = Interpreter(model_content=tflite_model)
      interpreter.allocate_tensors()

      input_details = interpreter.get_input_details()
      self.assertLen(input_details, 2)
      self.assertStartsWith(input_details[0]['name'], 'inputA')
      self.assertEqual(np.float32, input_details[0]['dtype'])
      self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
      self.assertEqual((0., 0.), input_details[0]['quantization'])

      self.assertStartsWith(
          input_details[1]['name'],
          'inputB',
      )
      self.assertEqual(np.float32, input_details[1]['dtype'])
      self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
      self.assertEqual((0., 0.), input_details[1]['quantization'])

      output_details = interpreter.get_output_details()
      self.assertLen(output_details, 1)
      self.assertStartsWith(output_details[0]['name'], 'add')
      self.assertEqual(np.float32, output_details[0]['dtype'])
      self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
      self.assertEqual((0., 0.), output_details[0]['quantization'])
Esempio n. 2
0
    def __init__(self):
        super().__init__()

        self.__alive = True

        self.__pkg = importlib.util.find_spec("tflite_runtime")

        with open(PATH_TO_LABELS, "r") as f:
            self.__labels = [line.strip() for line in f.readlines()]

        if self.__labels[0] == '???':
            del (self.__labels[0])

        # Then load tensorflow lite model
        self.__interpreter = Interpreter(model_path=PATH_TO_CKPT)
        self.__interpreter.allocate_tensors()

        self.__head = HeadController()
        self.__head.start()
        self.__tracker = CentroidTracker()

        self.__detection_handler = None

        self.__led_controller = LedController()

        self.__detection_state = PersonNotPresentState(self.__detection_handler)
  def testPostTrainingCalibrateAndQuantize(self):
    func, calibration_gen = self._getCalibrationQuantizeModel()

    # Convert float model.
    float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
    float_tflite = float_converter.convert()
    self.assertTrue(float_tflite)

    # Convert quantized model.
    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
    quantized_converter.optimizations = [lite.Optimize.DEFAULT]
    quantized_converter.representative_dataset = calibration_gen
    quantized_tflite = quantized_converter.convert()
    self.assertTrue(quantized_tflite)

    # The default input and output types should be float.
    interpreter = Interpreter(model_content=quantized_tflite)
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    self.assertLen(input_details, 1)
    self.assertEqual(np.float32, input_details[0]['dtype'])
    output_details = interpreter.get_output_details()
    self.assertLen(output_details, 1)
    self.assertEqual(np.float32, output_details[0]['dtype'])

    # Ensure that the quantized weights tflite model is smaller.
    self.assertLess(len(quantized_tflite), len(float_tflite))
Esempio n. 4
0
    def testL2LossOp(self, tf_quantization_mode):
        root = autotrackable.AutoTrackable()
        root.l2_loss_func = def_function.function(lambda x: nn_ops.l2_loss(x))  # pylint: disable=unnecessary-lambda
        input_data = tf.range(4, dtype=tf.float32)
        concrete_func = root.l2_loss_func.get_concrete_function(input_data)

        converter = lite.TFLiteConverterV2.from_concrete_functions(
            [concrete_func], root)
        converter._experimental_tf_quantization_mode = tf_quantization_mode
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)
        self.assertIn('FlexL2Loss',
                      tflite_test_util.get_ops_list(tflite_model))

        # Check the model works.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()
        input_details = interpreter.get_input_details()
        test_input = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
        interpreter.set_tensor(input_details[0]['index'], test_input)
        interpreter.invoke()

        output_details = interpreter.get_output_details()
        expected_output = np.array([15.0], dtype=np.float32)
        output_data = interpreter.get_tensor(output_details[0]['index'])
        self.assertTrue((expected_output == output_data).all())
Esempio n. 5
0
    def testString(self):
        with ops.Graph().as_default():
            in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string)
            out_tensor = array_ops.reshape(in_tensor, shape=[2, 2])
            sess = session.Session()

        # Convert model and ensure model is not None.
        converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                      [out_tensor])
        converter.experimental_enable_mlir_converter = True
        tflite_model = converter.convert()

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('Placeholder', input_details[0]['name'])
        self.assertEqual(np.string_, input_details[0]['dtype'])
        self.assertTrue(([4] == input_details[0]['shape']).all())

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('Reshape', output_details[0]['name'])
        self.assertEqual(np.string_, output_details[0]['dtype'])
        self.assertTrue(([2, 2] == output_details[0]['shape']).all())
Esempio n. 6
0
    def testGraphDefBasic(self):
        with ops.Graph().as_default():
            in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                              dtype=dtypes.float32,
                                              name="input")
            _ = in_tensor + in_tensor
            sess = session.Session()

        tflite_model = convert.convert_graphdef_with_arrays(
            sess.graph_def,
            input_arrays_with_shape=[("input", [1, 16, 16, 3])],
            output_arrays=["add"],
            control_output_arrays=None,
            inference_type=dtypes.float32,
            enable_mlir_converter=False)
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual("input", input_details[0]["name"])
        self.assertEqual(np.float32, input_details[0]["dtype"])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
        self.assertEqual((0., 0.), input_details[0]["quantization"])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual("add", output_details[0]["name"])
        self.assertEqual(np.float32, output_details[0]["dtype"])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
        self.assertEqual((0., 0.), output_details[0]["quantization"])
Esempio n. 7
0
    def testFlexWithDoubleOp(self):
        # Create a graph that has one double op.
        saved_model_dir = os.path.join(self.get_temp_dir(), 'model2')
        with ops.Graph().as_default():
            with session.Session() as sess:
                in_tensor = array_ops.placeholder(shape=[1, 4],
                                                  dtype=dtypes.int32,
                                                  name='input')
                out_tensor = double_op.double(in_tensor)
                inputs = {'x': in_tensor}
                outputs = {'z': out_tensor}
                saved_model.simple_save(sess, saved_model_dir, inputs, outputs)

        converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
        converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
        converter.target_spec.experimental_select_user_tf_ops = ['Double']
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)
        self.assertIn('FlexDouble',
                      tflite_test_util.get_ops_list(tflite_model))

        # Check the model works with TensorFlow ops.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()
        input_details = interpreter.get_input_details()
        test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.int32)
        interpreter.set_tensor(input_details[0]['index'], test_input)
        interpreter.invoke()

        output_details = interpreter.get_output_details()
        expected_output = np.array([[2.0, 4.0, 6.0, 8.0]], dtype=np.int32)
        output_data = interpreter.get_tensor(output_details[0]['index'])
        self.assertTrue((expected_output == output_data).all())
  def testFreezeGraph(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    var = variable_scope.get_variable(
        'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + var
    sess = session.Session()
    sess.run(_global_variables_initializer())

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('Placeholder', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
  def testDumpGraphviz(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    graphviz_dir = self.get_temp_dir()
    converter.dump_graphviz_dir = graphviz_dir
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure interpreter is able to allocate and check graphviz data.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    num_items_graphviz = len(os.listdir(graphviz_dir))
    self.assertTrue(num_items_graphviz)

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    graphviz_dir = self.get_temp_dir()
    converter.dump_graphviz_dir = graphviz_dir
    converter.dump_graphviz_video = True
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure graphviz folder has more data after using video flag.
    num_items_graphviz_video = len(os.listdir(graphviz_dir))
    self.assertTrue(num_items_graphviz_video > num_items_graphviz)
Esempio n. 10
0
    def testMatMulCalibrateAndQuantize(self):
        concrete_func, calibration_gen = self._getQuantizedModel()
        float_converter = lite.TFLiteConverterV2.from_concrete_functions(
            [concrete_func])
        float_converter.experimental_new_converter = True
        float_tflite_model = float_converter.convert()

        quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(
            [concrete_func])
        quantized_converter.optimizations = [lite.Optimize.DEFAULT]
        quantized_converter.representative_dataset = calibration_gen
        quantized_converter.experimental_new_converter = True
        quantized_tflite_model = quantized_converter.convert()

        # The default input and output types should be float.
        quantized_interpreter = Interpreter(
            model_content=quantized_tflite_model)
        quantized_interpreter.allocate_tensors()
        input_details = quantized_interpreter.get_input_details()
        self.assertLen(input_details, 1)
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue((input_details[0]['shape_signature'] == [-1,
                                                                 33]).all())

        # Ensure that the quantized weights tflite model is smaller.
        self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
  def testFunctionalSequentialModel(self):
    """Test a Functional tf.keras model containing a Sequential model."""
    with session.Session().as_default():
      model = keras.models.Sequential()
      model.add(keras.layers.Dense(2, input_shape=(3,)))
      model.add(keras.layers.RepeatVector(3))
      model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
      model = keras.models.Model(model.input, model.output)

      model.compile(
          loss=keras.losses.MSE,
          optimizer=keras.optimizers.RMSprop(),
          metrics=[keras.metrics.categorical_accuracy],
          sample_weight_mode='temporal')
      x = np.random.random((1, 3))
      y = np.random.random((1, 3, 3))
      model.train_on_batch(x, y)
      model.predict(x)

      model.predict(x)
      fd, keras_file = tempfile.mkstemp('.h5')
      try:
        keras.models.save_model(model, keras_file)
      finally:
        os.close(fd)

    # Convert to TFLite model.
    converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check tensor details of converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('dense_input', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])

    # Check inference of converted model.
    input_data = np.array([[1, 2, 3]], dtype=np.float32)
    interpreter.set_tensor(input_details[0]['index'], input_data)
    interpreter.invoke()
    tflite_result = interpreter.get_tensor(output_details[0]['index'])

    keras_model = keras.models.load_model(keras_file)
    keras_result = keras_model.predict(input_data)

    np.testing.assert_almost_equal(tflite_result, keras_result, 5)
    os.remove(keras_file)
Esempio n. 12
0
    def testDeprecatedFlags(self):
        with ops.Graph().as_default():
            in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                              dtype=dtypes.float32)
            out_tensor = in_tensor + in_tensor
            sess = session.Session()

        # Convert model and ensure model is not None.
        converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                      [out_tensor])
        converter.target_ops = set([lite.OpsSet.SELECT_TF_OPS])

        # Ensure `target_ops` is set to the correct value after flag deprecation.
        self.assertEqual(converter.target_ops,
                         set([lite.OpsSet.SELECT_TF_OPS]))
        self.assertEqual(converter.target_spec.supported_ops,
                         set([lite.OpsSet.SELECT_TF_OPS]))

        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Ensures the model contains TensorFlow ops.
        # TODO(nupurgarg): Check values once there is a Python delegate interface.
        interpreter = Interpreter(model_content=tflite_model)
        with self.assertRaises(RuntimeError) as error:
            interpreter.allocate_tensors()
        self.assertIn(
            'Regular TensorFlow ops are not supported by this interpreter.',
            str(error.exception))
Esempio n. 13
0
    def testString(self):
        in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string)
        out_tensor = array_ops.reshape(in_tensor, shape=[2, 2])
        sess = session.Session()

        # Convert model and ensure model is not None.
        converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                      [out_tensor])
        tflite_model = mlir_convert_and_check_for_unsupported(self, converter)
        if tflite_model is None:
            return

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('Placeholder', input_details[0]['name'])
        self.assertEqual(np.string_, input_details[0]['dtype'])
        self.assertTrue(([4] == input_details[0]['shape']).all())

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('Reshape', output_details[0]['name'])
        self.assertEqual(np.string_, output_details[0]['dtype'])
        self.assertTrue(([2, 2] == output_details[0]['shape']).all())
Esempio n. 14
0
    def testConcreteFunc(self):
        input_data = constant_op.constant(1., shape=[1])
        root = tracking.AutoTrackable()
        root.v1 = variables.Variable(3.)
        root.v2 = variables.Variable(2.)
        root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
        concrete_func = root.f.get_concrete_function(input_data)

        # Convert model.
        converter = lite.TFLiteConverterV2.from_concrete_functions(
            [concrete_func])
        converter.experimental_enable_mlir_converter = True
        converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])

        tflite_model = mlir_convert_and_check_for_unsupported(self, converter)
        if tflite_model is None:
            return

        # Ensures the model contains TensorFlow ops.
        # TODO(nupurgarg): Check values once there is a Python delegate interface.
        interpreter = Interpreter(model_content=tflite_model)
        with self.assertRaises(RuntimeError) as error:
            interpreter.allocate_tensors()
        self.assertIn(
            'Regular TensorFlow ops are not supported by this interpreter. Make '
            'sure you invoke the Flex delegate before inference.',
            str(error.exception))
Esempio n. 15
0
    def _feed_tensors(self, dataset_gen, resize_input):
        """Feed tensors to the calibrator."""
        initialized = False
        for sample in dataset_gen():
            if isinstance(sample, dict):
                # Convert signature based inputs to the tensor index based data.
                if not hasattr(self, "_interpreter"):
                    self._interpreter = Interpreter(
                        model_content=self._model_content)
                input_array = [None] * len(sample)
                signature_runner = self._interpreter.get_signature_runner()
                for input_name, value in sample.items():
                    tensor_index = signature_runner._inputs[input_name]  # pylint: disable=protected-access
                    input_array[tensor_index] = value
            elif isinstance(sample, list):
                input_array = sample
            else:
                raise ValueError(
                    "You need to provide either a dictionary with input "
                    "names or values and an array with input values in "
                    "the order of input tensors of the graph in the "
                    "representative_dataset function. Unsupported value "
                    "from dataset: {}.".format(sample))

            if not initialized:
                initialized = True
                if resize_input:
                    self._calibrator.Prepare(
                        [list(s.shape) for s in input_array])
                else:
                    self._calibrator.Prepare()
            self._calibrator.FeedTensor(input_array)
  def testDefaultRangesStats(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    converter.inference_type = lite_constants.QUANTIZED_UINT8
    converter.quantized_input_stats = {'Placeholder': (0., 1.)}  # mean, std_dev
    converter.default_ranges_stats = (0, 6)  # min, max
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('Placeholder', input_details[0]['name'])
    self.assertEqual(np.uint8, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((1., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.uint8, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertTrue(output_details[0]['quantization'][0] > 0)  # scale
Esempio n. 17
0
  def _evaluateTFLiteModel(self, tflite_model, input_data, input_shapes=None):
    """Evaluates the model on the `input_data`.

    Args:
      tflite_model: TensorFlow Lite model.
      input_data: List of EagerTensor const ops containing the input data for
        each input tensor.
      input_shapes: List of tuples representing the `shape_signature` and the
        new shape of each input tensor that has unknown dimensions.

    Returns:
      [np.ndarray]
    """
    interpreter = Interpreter(model_content=tflite_model)
    input_details = interpreter.get_input_details()
    if input_shapes:
      for idx, (shape_signature, final_shape) in enumerate(input_shapes):
        self.assertTrue(
            (input_details[idx]['shape_signature'] == shape_signature).all())
        index = input_details[idx]['index']
        interpreter.resize_tensor_input(index, final_shape, strict=True)
    interpreter.allocate_tensors()

    output_details = interpreter.get_output_details()
    input_details = interpreter.get_input_details()

    for input_tensor, tensor_data in zip(input_details, input_data):
      interpreter.set_tensor(input_tensor['index'], tensor_data.numpy())
    interpreter.invoke()
    return [
        interpreter.get_tensor(details['index']) for details in output_details
    ]
  def testFloatWithShapesArray(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
    write_graph(sess.graph_def, '', graph_def_file, False)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_frozen_graph(
        graph_def_file, ['Placeholder'], ['add'],
        input_shapes={'Placeholder': [1, 16, 16, 3]})
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
Esempio n. 19
0
    def testFloat(self, enable_mlir):
        input_data = constant_op.constant(1., shape=[1])
        root = autotrackable.AutoTrackable()
        root.v1 = variables.Variable(3.)
        root.v2 = variables.Variable(2.)
        root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
        concrete_func = root.f.get_concrete_function(input_data)

        # Convert model.
        converter = lite.TFLiteConverterV2.from_concrete_functions(
            [concrete_func], root)
        converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
        converter.experimental_new_converter = enable_mlir
        tflite_model = converter.convert()

        # Check the model works with TensorFlow ops.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()
        input_details = interpreter.get_input_details()
        test_input = np.array([4.0], dtype=np.float32)
        interpreter.set_tensor(input_details[0]['index'], test_input)
        interpreter.invoke()

        output_details = interpreter.get_output_details()
        expected_output = np.array([24.0], dtype=np.float32)
        output_data = interpreter.get_tensor(output_details[0]['index'])
        self.assertTrue((expected_output == output_data).all())
  def testPbtxt(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
    write_graph(sess.graph_def, '', graph_def_file, True)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
                                                       ['Placeholder'], ['add'])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('Placeholder', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Esempio n. 21
0
    def testAddOp(self, tf_quantization_mode):
        root = autotrackable.AutoTrackable()
        root.add_func = def_function.function(lambda x: x + x)
        input_data = tf.reshape(tf.range(4, dtype=tf.float32), [1, 4])
        concrete_func = root.add_func.get_concrete_function(input_data)

        # Convert model and check if the op is not flex.
        converter = lite.TFLiteConverterV2.from_concrete_functions(
            [concrete_func], root)
        converter._experimental_tf_quantization_mode = tf_quantization_mode
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)
        if tf_quantization_mode == 'LEGACY_INTEGER':
            self.assertIn('ADD', tflite_test_util.get_ops_list(tflite_model))
        else:
            self.assertIn('FlexAddV2',
                          tflite_test_util.get_ops_list(tflite_model))

        # Check the model works.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()
        input_details = interpreter.get_input_details()
        test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)
        interpreter.set_tensor(input_details[0]['index'], test_input)
        interpreter.invoke()

        output_details = interpreter.get_output_details()
        expected_output = np.array([[2.0, 4.0, 6.0, 8.0]], dtype=np.float32)
        output_data = interpreter.get_tensor(output_details[0]['index'])
        self.assertTrue((expected_output == output_data).all())
  def testSimpleModel(self):
    """Test a SavedModel."""
    saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual('inputA', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    self.assertEqual('inputB', input_details[1]['name'])
    self.assertEqual(np.float32, input_details[1]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
    self.assertEqual((0., 0.), input_details[1]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Esempio n. 23
0
    def testDeprecatedFlags(self):
        with ops.Graph().as_default():
            in_tensor = array_ops.placeholder(shape=[1, 4],
                                              dtype=dtypes.float32)
            out_tensor = in_tensor + in_tensor
            sess = session.Session()

        # Convert model and ensure model is not None.
        converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                      [out_tensor])
        converter.target_ops = set([lite.OpsSet.SELECT_TF_OPS])

        # Ensure `target_ops` is set to the correct value after flag deprecation.
        self.assertEqual(converter.target_ops,
                         set([lite.OpsSet.SELECT_TF_OPS]))
        self.assertEqual(converter.target_spec.supported_ops,
                         set([lite.OpsSet.SELECT_TF_OPS]))

        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Check the model works with TensorFlow ops.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()
        input_details = interpreter.get_input_details()
        test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)
        interpreter.set_tensor(input_details[0]['index'], test_input)
        interpreter.invoke()

        output_details = interpreter.get_output_details()
        expected_output = np.array([[2.0, 4.0, 6.0, 8.0]], dtype=np.float32)
        output_data = interpreter.get_tensor(output_details[0]['index'])
        self.assertTrue((expected_output == output_data).all())
  def testNoneBatchSize(self):
    """Test a SavedModel, with None in input tensor's shape."""
    saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])

    converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual('inputA', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    self.assertEqual('inputB', input_details[1]['name'])
    self.assertEqual(np.float32, input_details[1]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
    self.assertEqual((0., 0.), input_details[1]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Esempio n. 25
0
    def testFloat(self):
        with ops.Graph().as_default():
            in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                              dtype=dtypes.float32)
            out_tensor = in_tensor + in_tensor
            sess = session.Session()

        # Convert model and ensure model is not None.
        converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                      [out_tensor])
        converter.experimental_enable_mlir_converter = True
        tflite_model = converter.convert()

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('Placeholder', input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
        self.assertEqual((0., 0.), input_details[0]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('add', output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
        self.assertEqual((0., 0.), output_details[0]['quantization'])
  def testOrderInputArrays(self):
    """Test a SavedModel ordering of input arrays."""
    saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])

    converter = lite.TFLiteConverter.from_saved_model(
        saved_model_dir, input_arrays=['inputB', 'inputA'])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual('inputA', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    self.assertEqual('inputB', input_details[1]['name'])
    self.assertEqual(np.float32, input_details[1]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
    self.assertEqual((0., 0.), input_details[1]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Esempio n. 27
0
  def testGraphDefBasic(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32, name="input")
    _ = in_tensor + in_tensor
    sess = session.Session()

    tflite_model = convert.toco_convert_graph_def(
        sess.graph_def, [("input", [1, 16, 16, 3])], ["add"],
        inference_type=lite_constants.FLOAT)
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual("input", input_details[0]["name"])
    self.assertEqual(np.float32, input_details[0]["dtype"])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
    self.assertEqual((0., 0.), input_details[0]["quantization"])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual("add", output_details[0]["name"])
    self.assertEqual(np.float32, output_details[0]["dtype"])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
    self.assertEqual((0., 0.), output_details[0]["quantization"])
  def testSequentialModelInputShape(self):
    """Test a Sequential tf.keras model testing input shapes argument."""
    keras_file = self._getSequentialModel()

    # Passing in shape of invalid input array raises error.
    with self.assertRaises(ValueError) as error:
      converter = lite.TFLiteConverter.from_keras_model_file(
          keras_file, input_shapes={'invalid-input': [2, 3]})
    self.assertEqual(
        "Invalid tensor 'invalid-input' found in tensor shapes map.",
        str(error.exception))

    # Passing in shape of valid input array.
    converter = lite.TFLiteConverter.from_keras_model_file(
        keras_file, input_shapes={'dense_input': [2, 3]})
    tflite_model = converter.convert()
    os.remove(keras_file)
    self.assertTrue(tflite_model)

    # Check input shape from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('dense_input', input_details[0]['name'])
    self.assertTrue(([2, 3] == input_details[0]['shape']).all())
Esempio n. 29
0
  def testCalibrateAndQuantizeBuiltinInt8(self):
    func, calibration_gen = self._getCalibrationQuantizeModel()

    # Convert float model.
    float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
    float_tflite = float_converter.convert()
    self.assertTrue(float_tflite)

    # Convert model by specifying target spec (instead of optimizations), since
    # when targeting an integer only backend, quantization is mandatory.
    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
    quantized_converter.target_spec.supported_ops = [
        lite.OpsSet.TFLITE_BUILTINS_INT8
    ]
    quantized_converter.representative_dataset = calibration_gen
    quantized_tflite = quantized_converter.convert()
    self.assertTrue(quantized_tflite)

    # The default input and output types should be float.
    interpreter = Interpreter(model_content=quantized_tflite)
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    self.assertLen(input_details, 1)
    self.assertEqual(np.float32, input_details[0]['dtype'])
    output_details = interpreter.get_output_details()
    self.assertLen(output_details, 1)
    self.assertEqual(np.float32, output_details[0]['dtype'])

    # Ensure that the quantized weights tflite model is smaller.
    self.assertLess(len(quantized_tflite), len(float_tflite))
Esempio n. 30
0
  def testIntermediateInputArray(self):
    """Convert a model from an intermediate input array."""
    in_tensor_init = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    in_tensor_final = in_tensor_init + in_tensor_init
    out_tensor = in_tensor_final + in_tensor_final
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor_final],
                                                  [out_tensor])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('add', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add_1', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])