Exemple #1
1
  def testPbtxt(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
    write_graph(sess.graph_def, '', graph_def_file, True)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
                                                       ['Placeholder'], ['add'])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('Placeholder', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemple #2
0
  def testSimpleModel(self):
    """Test a SavedModel."""
    saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual('inputA', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    self.assertEqual('inputB', input_details[1]['name'])
    self.assertEqual(np.float32, input_details[1]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
    self.assertEqual((0., 0.), input_details[1]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemple #3
0
  def testNoneBatchSize(self):
    """Test a SavedModel, with None in input tensor's shape."""
    saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])

    converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual('inputA', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    self.assertEqual('inputB', input_details[1]['name'])
    self.assertEqual(np.float32, input_details[1]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
    self.assertEqual((0., 0.), input_details[1]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemple #4
0
  def testOrderInputArrays(self):
    """Test a SavedModel ordering of input arrays."""
    saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])

    converter = lite.TFLiteConverter.from_saved_model(
        saved_model_dir, input_arrays=['inputB', 'inputA'])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual('inputA', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    self.assertEqual('inputB', input_details[1]['name'])
    self.assertEqual(np.float32, input_details[1]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
    self.assertEqual((0., 0.), input_details[1]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemple #5
0
  def testDefaultRangesStats(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    converter.inference_type = lite_constants.QUANTIZED_UINT8
    converter.quantized_input_stats = {'Placeholder': (0., 1.)}  # mean, std_dev
    converter.default_ranges_stats = (0, 6)  # min, max
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('Placeholder', input_details[0]['name'])
    self.assertEqual(np.uint8, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((1., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.uint8, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertTrue(output_details[0]['quantization'][0] > 0)  # scale
Exemple #6
0
def main():
    args = parse_args()

    # Compute the coordinates of the region of interest
    roi_coords = (
        args['roi_x'],
        args['roi_y'],
        args['roi_x'] + args['roi_width'],
        args['roi_y'] + args['roi_height']
    )

    # Load the model
    interpreter = Interpreter(args['model'])
    interpreter.allocate_tensors()

    input_tensor_index = interpreter.get_input_details()[0]['index']
    output_tensor_index = interpreter.get_output_details()[0]['index']

    # Open the video and process each frame
    start_time = time.time()
    for index, curr_roi, _ in iter_frames(args['video'], roi_coords):
        X = cv2.cvtColor(curr_roi, cv2.COLOR_GRAY2BGR)
        X = np.reshape(X, (1, *X.shape))
        X = keras.applications.mobilenet_v2.preprocess_input(X)
        
        interpreter.set_tensor(input_tensor_index, X)
        interpreter.invoke()
    
    # Print average inference time
    elapsed_time = time.time() - start_time
    print(f'Processed {index} frames')
    print(f'Time taken: {elapsed_time:0.2f} s')
    print(f'Time per frame: {elapsed_time / index:0.2f} s')
    print(f'Average FPS: {index / elapsed_time:0.1f}')
Exemple #7
0
  def testSequentialModelInputShape(self):
    """Test a Sequential tf.keras model testing input shapes argument."""
    keras_file = self._getSequentialModel()

    # Passing in shape of invalid input array has no impact as long as all input
    # arrays have a shape.
    converter = lite.TFLiteConverter.from_keras_model_file(
        keras_file, input_shapes={'invalid-input': [2, 3]})
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Passing in shape of valid input array.
    converter = lite.TFLiteConverter.from_keras_model_file(
        keras_file, input_shapes={'dense_input': [2, 3]})
    tflite_model = converter.convert()
    os.remove(keras_file)
    self.assertTrue(tflite_model)

    # Check input shape from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('dense_input', input_details[0]['name'])
    self.assertTrue(([2, 3] == input_details[0]['shape']).all())
Exemple #8
0
  def testFloat(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('Placeholder', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemple #9
0
  def testFunctionalSequentialModel(self):
    """Test a Functional tf.keras model containing a Sequential model."""
    with session.Session().as_default():
      model = keras.models.Sequential()
      model.add(keras.layers.Dense(2, input_shape=(3,)))
      model.add(keras.layers.RepeatVector(3))
      model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
      model = keras.models.Model(model.input, model.output)

      model.compile(
          loss=keras.losses.MSE,
          optimizer=keras.optimizers.RMSprop(),
          metrics=[keras.metrics.categorical_accuracy],
          sample_weight_mode='temporal')
      x = np.random.random((1, 3))
      y = np.random.random((1, 3, 3))
      model.train_on_batch(x, y)
      model.predict(x)

      model.predict(x)
      fd, keras_file = tempfile.mkstemp('.h5')
      try:
        keras.models.save_model(model, keras_file)
      finally:
        os.close(fd)

    # Convert to TFLite model.
    converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check tensor details of converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('dense_input', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])

    # Check inference of converted model.
    input_data = np.array([[1, 2, 3]], dtype=np.float32)
    interpreter.set_tensor(input_details[0]['index'], input_data)
    interpreter.invoke()
    tflite_result = interpreter.get_tensor(output_details[0]['index'])

    keras_model = keras.models.load_model(keras_file)
    keras_result = keras_model.predict(input_data)

    np.testing.assert_almost_equal(tflite_result, keras_result, 5)
    os.remove(keras_file)
Exemple #10
0
    def testDumpGraphviz(self):
        in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                          dtype=dtypes.float32)
        out_tensor = in_tensor + in_tensor
        sess = session.Session()

        # Convert model and ensure model is not None.
        converter = lite.TocoConverter.from_session(sess, [in_tensor],
                                                    [out_tensor])
        graphviz_dir = self.get_temp_dir()
        converter.dump_graphviz_dir = graphviz_dir
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Ensure interpreter is able to allocate and check graphviz data.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        num_items_graphviz = len(os.listdir(graphviz_dir))
        self.assertTrue(num_items_graphviz)

        # Convert model and ensure model is not None.
        converter = lite.TocoConverter.from_session(sess, [in_tensor],
                                                    [out_tensor])
        graphviz_dir = self.get_temp_dir()
        converter.dump_graphviz_dir = graphviz_dir
        converter.dump_graphviz_video = True
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Ensure graphviz folder has more data after using video flag.
        num_items_graphviz_video = len(os.listdir(graphviz_dir))
        self.assertTrue(num_items_graphviz_video > num_items_graphviz)
Exemple #11
0
  def testDumpGraphviz(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    graphviz_dir = self.get_temp_dir()
    converter.dump_graphviz_dir = graphviz_dir
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure interpreter is able to allocate and check graphviz data.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    num_items_graphviz = len(os.listdir(graphviz_dir))
    self.assertTrue(num_items_graphviz)

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
                                                  [out_tensor])
    graphviz_dir = self.get_temp_dir()
    converter.dump_graphviz_dir = graphviz_dir
    converter.dump_graphviz_video = True
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure graphviz folder has more data after using video flag.
    num_items_graphviz_video = len(os.listdir(graphviz_dir))
    self.assertTrue(num_items_graphviz_video > num_items_graphviz)
Exemple #12
0
  def testQuantization(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32, name='input')
    out_tensor = array_ops.fake_quant_with_min_max_args(
        in_tensor + in_tensor, min=0., max=1., name='output')
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
    converter.inference_type = lite_constants.QUANTIZED_UINT8
    converter.quantized_input_stats = [(0., 1.)]  # mean, std_dev
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('input', input_details[0]['name'])
    self.assertEqual(np.uint8, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((1., 0.),
                     input_details[0]['quantization'])  # scale, zero_point

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('output', output_details[0]['name'])
    self.assertEqual(np.uint8, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertTrue(output_details[0]['quantization'][0] > 0)  # scale
Exemple #13
0
  def testFreezeGraph(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    var = variable_scope.get_variable(
        'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + var
    sess = session.Session()
    sess.run(_global_variables_initializer())

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('Placeholder', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemple #14
0
    def testFloatWithShapesArray(self):
        in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                          dtype=dtypes.float32)
        _ = in_tensor + in_tensor
        sess = session.Session()

        # Write graph to file.
        graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
        write_graph(sess.graph_def, '', graph_def_file, False)
        sess.close()

        # Convert model and ensure model is not None.
        converter = lite.TocoConverter.from_frozen_graph(
            graph_def_file, ['Placeholder'], ['add'],
            input_shapes={'Placeholder': [1, 16, 16, 3]})
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
Exemple #15
0
    def testSequentialModelInputShape(self):
        """Test a Sequential tf.keras model testing input shapes argument."""
        keras_file = self._getSequentialModel()

        # Passing in shape of invalid input array has no impact as long as all input
        # arrays have a shape.
        converter = lite.TocoConverter.from_keras_model_file(
            keras_file, input_shapes={'invalid-input': [2, 3]})
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Passing in shape of valid input array.
        converter = lite.TocoConverter.from_keras_model_file(
            keras_file, input_shapes={'dense_input': [2, 3]})
        tflite_model = converter.convert()
        os.remove(keras_file)
        self.assertTrue(tflite_model)

        # Check input shape from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('dense_input', input_details[0]['name'])
        self.assertTrue(([2, 3] == input_details[0]['shape']).all())
Exemple #16
0
  def testInferenceInputType(self):
    in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3], dtype=dtypes.uint8)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
    converter.inference_input_type = lite_constants.QUANTIZED_UINT8
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('Placeholder', input_details[0]['name'])
    self.assertEqual(np.uint8, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.uint8, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])
Exemple #17
0
  def testGraphDefBasic(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32, name="input")
    _ = in_tensor + in_tensor
    sess = session.Session()

    tflite_model = convert.toco_convert_graph_def(
        sess.graph_def, [("input", [1, 16, 16, 3])], ["add"],
        inference_type=lite_constants.FLOAT)
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual("input", input_details[0]["name"])
    self.assertEqual(np.float32, input_details[0]["dtype"])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
    self.assertEqual((0., 0.), input_details[0]["quantization"])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual("add", output_details[0]["name"])
    self.assertEqual(np.float32, output_details[0]["dtype"])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
    self.assertEqual((0., 0.), output_details[0]["quantization"])
Exemple #18
0
  def testFloatWithShapesArray(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
    write_graph(sess.graph_def, '', graph_def_file, False)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_frozen_graph(
        graph_def_file, ['Placeholder'], ['add'],
        input_shapes={'Placeholder': [1, 16, 16, 3]})
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
Exemple #19
0
 def __init__(self, model_filename, labels_filename, box_priors_filename):
   self.interp = Interpreter(model_filename)
   self.inputs = self.interp.get_input_details()
   self.outputs = self.interp.get_output_details()
   self.interp.allocate_tensors()
   self.labels = ParseLabels(labels_filename)
   self.box_priors = ParseBoxPriors(box_priors_filename)
Exemple #20
0
class ObjectDetector:
  def __init__(self, model_filename, labels_filename, box_priors_filename):
    self.interp = Interpreter(model_filename)
    self.inputs = self.interp.get_input_details()
    self.outputs = self.interp.get_output_details()
    self.interp.allocate_tensors()
    self.labels = ParseLabels(labels_filename)
    self.box_priors = ParseBoxPriors(box_priors_filename)

  def detect(self, image):
    resized_image = cv2.resize(image[0,:,:,:], (300, 300))
    expanded_image = np.expand_dims(resized_image, axis=0)
    normalized_image = expanded_image.astype(np.float32) / 128. - 1.

    self.interp.set_tensor(self.inputs[0]["index"], normalized_image)
    self.interp.invoke()
    output_locations = self.interp.get_tensor(self.outputs[0]["index"])
    output_classes = self.interp.get_tensor(self.outputs[1]["index"])
    num_results = output_classes.shape[1]
    print("Num results:", num_results)
    boxes = []
    for i in range(num_results):
      ycenter = (
          output_locations[0, i, 0] / Y_SCALE * self.box_priors[2, i] +
          self.box_priors[0, i])
      xcenter = (
          output_locations[0, i, 1] / X_SCALE * self.box_priors[3, i] +
          self.box_priors[1, i])
      h = math.exp(output_locations[0, i, 2] / H_SCALE) * self.box_priors[2, i]
      w = math.exp(output_locations[0, i, 3] / W_SCALE) * self.box_priors[3, i]
      ymin = ycenter - h / 2
      xmin = xcenter - w / 2
      ymax = ycenter + h / 2
      xmax = xcenter + w / 2

      output_locations[0, i, 0] = ymin
      output_locations[0, i, 1] = xmin
      output_locations[0, i, 2] = ymax
      output_locations[0, i, 3] = xmax

      exp_scores = ExpIt(output_classes[0, i])
      top_class_score_index = np.argmax(exp_scores[1:]) + 1
      if exp_scores[top_class_score_index] > TOP_CLASS_SCORE_THRESHOLD:
        rectf = output_locations[0, i, [1, 0, 3, 2]]
        # Not actually a probability?
        value = exp_scores[top_class_score_index]
        boxes.append((value, rectf, top_class_score_index))

    boxes.sort(key=lambda x: x[0])

    output_dict = {
        'num_detections': len(boxes),
        'detection_classes': [box[2] for box in boxes],
        'detection_boxes': [box[1] for box in boxes],
        'detection_scores': [box[0] for box in boxes],
        }
    return output_dict
Exemple #21
0
  def testFunctionalModel(self):
    """Test a Functional tf.keras model with default inputs."""
    inputs = keras.layers.Input(shape=(3,), name='input')
    x = keras.layers.Dense(2)(inputs)
    output = keras.layers.Dense(3)(x)

    model = keras.models.Model(inputs, output)
    model.compile(
        loss=keras.losses.MSE,
        optimizer=keras.optimizers.RMSprop(),
        metrics=[keras.metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    model.predict(x)
    fd, keras_file = tempfile.mkstemp('.h5')
    try:
      keras.models.save_model(model, keras_file)
    finally:
      os.close(fd)

    # Convert to TFLite model.
    converter = lite.TocoConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check tensor details of converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('input', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])

    # Check inference of converted model.
    input_data = np.array([[1, 2, 3]], dtype=np.float32)
    interpreter.set_tensor(input_details[0]['index'], input_data)
    interpreter.invoke()
    tflite_result = interpreter.get_tensor(output_details[0]['index'])

    keras_model = keras.models.load_model(keras_file)
    keras_result = keras_model.predict(input_data)

    np.testing.assert_almost_equal(tflite_result, keras_result, 5)
    os.remove(keras_file)
Exemple #22
0
  def testSequentialModelTocoConverter(self):
    """Test a Sequential tf.keras model with deprecated TocoConverter."""
    keras_file = self._getSequentialModel()

    converter = lite.TocoConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the model is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
Exemple #23
0
  def testSimpleModelTocoConverter(self):
    """Test a SavedModel with deprecated TocoConverter."""
    saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_saved_model(saved_model_dir)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the model is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
Exemple #24
0
  def testFloatTocoConverter(self):
    """Tests deprecated test TocoConverter."""
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the interpreter is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
Exemple #25
0
  def testGraphDefQuantization(self):
    in_tensor_1 = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA")
    in_tensor_2 = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB")
    _ = array_ops.fake_quant_with_min_max_args(
        in_tensor_1 + in_tensor_2, min=0., max=1., name="output")
    sess = session.Session()

    input_arrays_map = [("inputA", [1, 16, 16, 3]), ("inputB", [1, 16, 16, 3])]
    output_arrays = ["output"]
    tflite_model = convert.toco_convert_graph_def(
        sess.graph_def,
        input_arrays_map,
        output_arrays,
        inference_type=lite_constants.QUANTIZED_UINT8,
        quantized_input_stats=[(0., 1.), (0., 1.)])
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual("inputA", input_details[0]["name"])
    self.assertEqual(np.uint8, input_details[0]["dtype"])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
    self.assertEqual((1., 0.),
                     input_details[0]["quantization"])  # scale, zero_point

    self.assertEqual("inputB", input_details[1]["name"])
    self.assertEqual(np.uint8, input_details[1]["dtype"])
    self.assertTrue(([1, 16, 16, 3] == input_details[1]["shape"]).all())
    self.assertEqual((1., 0.),
                     input_details[1]["quantization"])  # scale, zero_point

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual("output", output_details[0]["name"])
    self.assertEqual(np.uint8, output_details[0]["dtype"])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
    self.assertTrue(output_details[0]["quantization"][0] > 0)  # scale
Exemple #26
0
  def testFloatTocoConverter(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
    write_graph(sess.graph_def, '', graph_def_file, False)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
                                                     ['Placeholder'], ['add'])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the model is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
Exemple #27
0
  def testExtendedMode(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    out_tensor = in_tensor + in_tensor
    sess = session.Session()

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
    converter.converter_mode = lite.ConverterMode.TOCO_EXTENDED_ALL
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensures the model contains TensorFlow ops.
    # TODO(nupurgarg): Check values once there is a Python delegate interface.
    interpreter = Interpreter(model_content=tflite_model)
    with self.assertRaises(RuntimeError) as error:
      interpreter.allocate_tensors()
    self.assertIn(
        'Regular TensorFlow ops are not supported by this interpreter. Make '
        'sure you invoke the Eager delegate before inference.',
        str(error.exception))
    def testDefaultRangesStats(self):
        in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                          dtype=dtypes.float32)
        out_tensor = in_tensor + in_tensor
        sess = session.Session()

        # Convert model and ensure model is not None.
        converter = lite.TocoConverter.from_session(sess, [in_tensor],
                                                    [out_tensor])
        converter.inference_type = lite_constants.QUANTIZED_UINT8
        converter.quantized_input_stats = {
            'Placeholder': (0., 1.)
        }  # mean, std_dev
        converter.default_ranges_stats = (0, 6)  # min, max
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('Placeholder', input_details[0]['name'])
        self.assertEqual(np.uint8, input_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
        self.assertEqual((1., 0.), input_details[0]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('add', output_details[0]['name'])
        self.assertEqual(np.uint8, output_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
        self.assertTrue(output_details[0]['quantization'][0] > 0)  # scale
    def testOrderInputArrays(self):
        """Test a SavedModel ordering of input arrays."""
        saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])

        converter = lite.TocoConverter.from_saved_model(
            saved_model_dir, input_arrays=['inputB', 'inputA'])
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(2, len(input_details))
        self.assertEqual('inputA', input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
        self.assertEqual((0., 0.), input_details[0]['quantization'])

        self.assertEqual('inputB', input_details[1]['name'])
        self.assertEqual(np.float32, input_details[1]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
        self.assertEqual((0., 0.), input_details[1]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('add', output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
        self.assertEqual((0., 0.), output_details[0]['quantization'])
  def testSequentialModel(self):
    """Test a Sequential tf.keras model with default inputs."""
    keras_file = self._getSequentialModel()

    converter = lite.TocoConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    os.remove(keras_file)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('dense_input', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
    def testNoneBatchSize(self):
        """Test a SavedModel, with None in input tensor's shape."""
        saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])

        converter = lite.TocoConverter.from_saved_model(saved_model_dir)
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(2, len(input_details))
        self.assertEqual('inputA', input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
        self.assertEqual((0., 0.), input_details[0]['quantization'])

        self.assertEqual('inputB', input_details[1]['name'])
        self.assertEqual(np.float32, input_details[1]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
        self.assertEqual((0., 0.), input_details[1]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('add', output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
        self.assertEqual((0., 0.), output_details[0]['quantization'])
    def testSimpleModel(self):
        """Test a SavedModel."""
        saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])

        # Convert model and ensure model is not None.
        converter = lite.TocoConverter.from_saved_model(saved_model_dir)
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(2, len(input_details))
        self.assertEqual('inputA', input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
        self.assertEqual((0., 0.), input_details[0]['quantization'])

        self.assertEqual('inputB', input_details[1]['name'])
        self.assertEqual(np.float32, input_details[1]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
        self.assertEqual((0., 0.), input_details[1]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('add', output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
        self.assertEqual((0., 0.), output_details[0]['quantization'])
    def testPbtxt(self):
        in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                          dtype=dtypes.float32)
        _ = in_tensor + in_tensor
        sess = session.Session()

        # Write graph to file.
        graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
        write_graph(sess.graph_def, '', graph_def_file, True)

        # Convert model and ensure model is not None.
        converter = lite.TocoConverter.from_frozen_graph(
            graph_def_file, ['Placeholder'], ['add'])
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('Placeholder', input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
        self.assertEqual((0., 0.), input_details[0]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('add', output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
        self.assertEqual((0., 0.), output_details[0]['quantization'])
    def testGraphDefBasic(self):
        in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                          dtype=dtypes.float32,
                                          name="input")
        _ = in_tensor + in_tensor
        sess = session.Session()

        tflite_model = convert.toco_convert_graph_def(
            sess.graph_def, [("input", [1, 16, 16, 3])], ["add"],
            inference_type=lite_constants.FLOAT)
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual("input", input_details[0]["name"])
        self.assertEqual(np.float32, input_details[0]["dtype"])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
        self.assertEqual((0., 0.), input_details[0]["quantization"])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual("add", output_details[0]["name"])
        self.assertEqual(np.float32, output_details[0]["dtype"])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
        self.assertEqual((0., 0.), output_details[0]["quantization"])
    def testFloat(self):
        in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                          dtype=dtypes.float32)
        out_tensor = in_tensor + in_tensor
        sess = session.Session()

        # Convert model and ensure model is not None.
        converter = lite.TocoConverter.from_session(sess, [in_tensor],
                                                    [out_tensor])
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('Placeholder', input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
        self.assertEqual((0., 0.), input_details[0]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('add', output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
        self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemple #36
0
  def testTFLiteGraphDef(self):
    # Tests the object detection model that cannot be loaded in TensorFlow.
    self._initObjectDetectionArgs()

    converter = lite.TFLiteConverter.from_frozen_graph(
        self._graph_def_file, self._input_arrays, self._output_arrays,
        self._input_shapes)
    converter.allow_custom_ops = True
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('normalized_input_image_tensor', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 300, 300, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(4, len(output_details))
    self.assertEqual('TFLite_Detection_PostProcess', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 10, 4] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])

    self.assertEqual('TFLite_Detection_PostProcess:1',
                     output_details[1]['name'])
    self.assertTrue(([1, 10] == output_details[1]['shape']).all())
    self.assertEqual('TFLite_Detection_PostProcess:2',
                     output_details[2]['name'])
    self.assertTrue(([1, 10] == output_details[2]['shape']).all())
    self.assertEqual('TFLite_Detection_PostProcess:3',
                     output_details[3]['name'])
    self.assertTrue(([1] == output_details[3]['shape']).all())
Exemple #37
0
  def testSequentialModel(self):
    """Test a Sequential tf.keras model with default inputs."""
    keras_file = self._getSequentialModel()

    converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check tensor details of converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('dense_input', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])

    # Check inference of converted model.
    input_data = np.array([[1, 2, 3]], dtype=np.float32)
    interpreter.set_tensor(input_details[0]['index'], input_data)
    interpreter.invoke()
    tflite_result = interpreter.get_tensor(output_details[0]['index'])

    keras_model = keras.models.load_model(keras_file)
    keras_result = keras_model.predict(input_data)

    np.testing.assert_almost_equal(tflite_result, keras_result, 5)
    os.remove(keras_file)
Exemple #38
0
class TFObjectDetection(ObjectDetection):
    """Object Detection class for TensorFlow
    """
    def __init__(self, model_path, labels):
        super(TFObjectDetection, self).__init__(labels)
        self.interpreter = Interpreter(model_path=model_path)
        self.interpreter.allocate_tensors()
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

    def predict(self, preprocessed_image):
        inputs = np.array(preprocessed_image,
                          dtype=np.float32)[:, :, (2, 1, 0)]  # RGB -> BGR

        self.interpreter.set_tensor(self.input_details[0]['index'],
                                    inputs[np.newaxis, ...])
        self.interpreter.invoke()
        output = self.interpreter.get_tensor(self.output_details[0]['index'])

        return output[0]
    def testQuantization(self):
        in_tensor_1 = array_ops.placeholder(shape=[1, 16, 16, 3],
                                            dtype=dtypes.float32,
                                            name='inputA')
        in_tensor_2 = array_ops.placeholder(shape=[1, 16, 16, 3],
                                            dtype=dtypes.float32,
                                            name='inputB')
        out_tensor = array_ops.fake_quant_with_min_max_args(in_tensor_1 +
                                                            in_tensor_2,
                                                            min=0.,
                                                            max=1.,
                                                            name='output')
        sess = session.Session()

        # Convert model and ensure model is not None.
        converter = lite.TocoConverter.from_session(sess,
                                                    [in_tensor_1, in_tensor_2],
                                                    [out_tensor])
        converter.inference_type = lite_constants.QUANTIZED_UINT8
        converter.quantized_input_stats = {
            'inputA': (0., 1.),
            'inputB': (0., 1.)
        }  # mean, std_dev
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(2, len(input_details))
        self.assertEqual('inputA', input_details[0]['name'])
        self.assertEqual(np.uint8, input_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
        self.assertEqual((1., 0.),
                         input_details[0]['quantization'])  # scale, zero_point

        self.assertEqual('inputB', input_details[1]['name'])
        self.assertEqual(np.uint8, input_details[1]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
        self.assertEqual((1., 0.),
                         input_details[1]['quantization'])  # scale, zero_point

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('output', output_details[0]['name'])
        self.assertEqual(np.uint8, output_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
        self.assertTrue(output_details[0]['quantization'][0] > 0)  # scale
    def testGraphDefQuantization(self):
        in_tensor_1 = array_ops.placeholder(shape=[1, 16, 16, 3],
                                            dtype=dtypes.float32,
                                            name="inputA")
        in_tensor_2 = array_ops.placeholder(shape=[1, 16, 16, 3],
                                            dtype=dtypes.float32,
                                            name="inputB")
        _ = array_ops.fake_quant_with_min_max_args(in_tensor_1 + in_tensor_2,
                                                   min=0.,
                                                   max=1.,
                                                   name="output")
        sess = session.Session()

        input_arrays_map = [("inputA", [1, 16, 16, 3]),
                            ("inputB", [1, 16, 16, 3])]
        output_arrays = ["output"]
        tflite_model = convert.toco_convert_graph_def(
            sess.graph_def,
            input_arrays_map,
            output_arrays,
            inference_type=lite_constants.QUANTIZED_UINT8,
            quantized_input_stats=[(0., 1.), (0., 1.)])
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(2, len(input_details))
        self.assertEqual("inputA", input_details[0]["name"])
        self.assertEqual(np.uint8, input_details[0]["dtype"])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
        self.assertEqual((1., 0.),
                         input_details[0]["quantization"])  # scale, zero_point

        self.assertEqual("inputB", input_details[1]["name"])
        self.assertEqual(np.uint8, input_details[1]["dtype"])
        self.assertTrue(([1, 16, 16, 3] == input_details[1]["shape"]).all())
        self.assertEqual((1., 0.),
                         input_details[1]["quantization"])  # scale, zero_point

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual("output", output_details[0]["name"])
        self.assertEqual(np.uint8, output_details[0]["dtype"])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
        self.assertTrue(output_details[0]["quantization"][0] > 0)  # scale
  def testFunctionalSequentialModel(self):
    """Test a Functional tf.keras model containing a Sequential model."""
    model = keras.models.Sequential()
    model.add(keras.layers.Dense(2, input_shape=(3,)))
    model.add(keras.layers.RepeatVector(3))
    model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
    model = keras.models.Model(model.input, model.output)

    model.compile(
        loss=keras.losses.MSE,
        optimizer=keras.optimizers.RMSprop(),
        metrics=[keras.metrics.categorical_accuracy],
        sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    model.predict(x)

    model.predict(x)
    fd, keras_file = tempfile.mkstemp('.h5')
    keras.models.save_model(model, keras_file)

    # Convert to TFLite model.
    converter = lite.TocoConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    os.close(fd)
    os.remove(keras_file)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('dense_input', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
  def testFunctionalModel(self):
    """Test a Functional tf.keras model with default inputs."""
    inputs = keras.layers.Input(shape=(3,), name='input')
    x = keras.layers.Dense(2)(inputs)
    output = keras.layers.Dense(3)(x)

    model = keras.models.Model(inputs, output)
    model.compile(
        loss=keras.losses.MSE,
        optimizer=keras.optimizers.RMSprop(),
        metrics=[keras.metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    model.predict(x)
    fd, keras_file = tempfile.mkstemp('.h5')
    keras.models.save_model(model, keras_file)

    # Convert to TFLite model.
    converter = lite.TocoConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    os.close(fd)
    os.remove(keras_file)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('input', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Exemple #43
0
    def testTFLiteGraphDef(self):
        # Tests the object detection model that cannot be loaded in TensorFlow.
        self._initObjectDetectionArgs()

        converter = lite.TocoConverter.from_frozen_graph(
            self._graph_def_file, self._input_arrays, self._output_arrays,
            self._input_shapes)
        converter.allow_custom_ops = True
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('normalized_input_image_tensor',
                         input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 300, 300, 3] == input_details[0]['shape']).all())
        self.assertEqual((0., 0.), input_details[0]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(4, len(output_details))
        self.assertEqual('TFLite_Detection_PostProcess',
                         output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 10, 4] == output_details[0]['shape']).all())
        self.assertEqual((0., 0.), output_details[0]['quantization'])

        self.assertEqual('TFLite_Detection_PostProcess:1',
                         output_details[1]['name'])
        self.assertTrue(([1, 10] == output_details[1]['shape']).all())
        self.assertEqual('TFLite_Detection_PostProcess:2',
                         output_details[2]['name'])
        self.assertTrue(([1, 10] == output_details[2]['shape']).all())
        self.assertEqual('TFLite_Detection_PostProcess:3',
                         output_details[3]['name'])
        self.assertTrue(([1] == output_details[3]['shape']).all())
Exemple #44
0
def main():
    args = parse_args()

    # Compute the coordinates of the region of interest
    roi_coords = (args['roi_x'], args['roi_y'],
                  args['roi_x'] + args['roi_width'],
                  args['roi_y'] + args['roi_height'])

    # Determine class labels
    with open(args['labels'], 'rb') as f:
        labels = pickle.load(f)

    # Load the model
    interpreter = Interpreter(args['model'])
    interpreter.allocate_tensors()

    input_tensor_index = interpreter.get_input_details()[0]['index']
    output_tensor_index = interpreter.get_output_details()[0]['index']

    # Open the video and process each frame
    ys = collections.deque(maxlen=15)

    for index, curr_roi, _ in iter_frames(args['video'],
                                          roi_coords,
                                          nth=args['nth']):
        # Run the classifier and add the result to the set of recent predictions
        X = cv2.cvtColor(curr_roi, cv2.COLOR_GRAY2BGR)
        X = np.reshape(X, (1, *X.shape))
        X = keras.applications.mobilenet_v2.preprocess_input(X)

        interpreter.set_tensor(input_tensor_index, X)
        interpreter.invoke()

        y = interpreter.get_tensor(output_tensor_index)[0]
        ys.append(y)

        # Determine the instantaneous and average prediction results
        class_index = y.argmax()
        class_name = labels[class_index]

        avg_class_index = np.mean(ys, axis=0).argmax()
        avg_class_name = labels[avg_class_index]

        # Draw the results on the image
        curr_roi = cv2.cvtColor(curr_roi, cv2.COLOR_GRAY2BGR)
        curr_roi = cv2.putText(curr_roi, f'{class_name}: {100*y.max():0.2f}%',
                               (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                               (0, 0, 255))
        curr_roi = cv2.putText(
            curr_roi,
            f'{avg_class_name}: {100*np.mean(ys, axis=0).max():0.2f}%',
            (25, 200), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255))

        cv2.imshow('Prediction', curr_roi)
        if cv2.waitKey(0) == ord('q'):
            break
Exemple #45
0
    def test_fully_connected(self):
        num_batches = 10
        num_input_channels = 20
        num_output_channels = 5

        # No operation initially.
        self.assertEqual(len(_get_tf_operations()), 0)

        # Create 1 operation (Placeholder for the input).
        input_shape = (num_batches, num_input_channels)
        x = tf.placeholder(tf.float32, shape=input_shape)
        self.assertEqual(len(_get_tf_operations()), 1)

        # Defining weights and bias as constants. It should add 2 more
        # nodes into the graph.
        weights_shape = (num_output_channels, num_input_channels)
        weights_value = np.random.rand(*weights_shape).astype(np.float32)
        weights = tf.constant(weights_value, dtype=tf.float32)
        bias_shape = (num_output_channels, )
        bias_value = np.random.rand(*bias_shape).astype(np.float32)
        bias = tf.constant(bias_value, dtype=tf.float32)
        self.assertEqual(len(_get_tf_operations()), 3)

        # Call the function to construct a TF Function node which is equivalent
        # to TFLite FULLY_CONNECTED node.
        output = ops.fully_connected(x,
                                     weights,
                                     bias,
                                     fused_activation_function='RELU')

        # Exactly one op should be added. It should be a function containing 2-3 ops
        # (matmul, add, relu).
        operations = _get_tf_operations()
        self.assertEqual(len(operations), 4)

        op = operations[-1]
        node_def = op.node_def
        # Note: `as_bytes` conversion is required for Python 3.
        self.assertEqual(node_def.attr['_tflite_function_name'].s,
                         as_bytes('FULLY_CONNECTED'))
        self.assertEqual(node_def.attr['_fused_activation_function'].s,
                         as_bytes('RELU'))

        # Try to run the TF session to get the output value.
        input_value = np.random.rand(*input_shape).astype(np.float32)
        with tf.Session() as sess:
            output_value = sess.run(output, feed_dict={x: input_value})
            graph_def = sess.graph_def

        # Convert the GraphDef to FlatBuffer.
        flatbuffer_data = converter.Convert(graph_def.SerializeToString())

        # Construct an interpreter with the FlatBuffer.
        interpreter = Interpreter(model_content=flatbuffer_data)

        # Invoke the interpreter.
        input_details = interpreter.get_input_details()
        input_index = input_details[0]['index']
        interpreter.resize_tensor_input(input_index, input_shape)
        interpreter.allocate_tensors()
        interpreter.set_tensor(input_index, input_value)
        interpreter.invoke()

        # Get the output from the interpreter, and compare it with the result from
        # TensorFlow.
        output_details = interpreter.get_output_details()
        tflite_output_value = interpreter.get_tensor(
            output_details[0]['index'])

        max_error = np.max(np.abs(tflite_output_value - output_value))

        self.assertTrue(max_error < ERROR_THRESHOLD)
Exemple #46
0
    def testFunctionalModel(self):
        """Test a Functional tf.keras model with default inputs."""
        with session.Session().as_default():
            inputs = keras.layers.Input(shape=(3, ), name='input')
            x = keras.layers.Dense(2)(inputs)
            output = keras.layers.Dense(3)(x)

            model = keras.models.Model(inputs, output)
            model.compile(loss=keras.losses.MSE,
                          optimizer=keras.optimizers.RMSprop(),
                          metrics=[keras.metrics.categorical_accuracy])
            x = np.random.random((1, 3))
            y = np.random.random((1, 3))
            model.train_on_batch(x, y)

            model.predict(x)
            fd, keras_file = tempfile.mkstemp('.h5')
            try:
                keras.models.save_model(model, keras_file)
            finally:
                os.close(fd)

        # Convert to TFLite model.
        converter = lite.TocoConverter.from_keras_model_file(keras_file)
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Check tensor details of converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('input', input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 3] == input_details[0]['shape']).all())
        self.assertEqual((0., 0.), input_details[0]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 3] == output_details[0]['shape']).all())
        self.assertEqual((0., 0.), output_details[0]['quantization'])

        # Check inference of converted model.
        input_data = np.array([[1, 2, 3]], dtype=np.float32)
        interpreter.set_tensor(input_details[0]['index'], input_data)
        interpreter.invoke()
        tflite_result = interpreter.get_tensor(output_details[0]['index'])

        keras_model = keras.models.load_model(keras_file)
        keras_result = keras_model.predict(input_data)

        np.testing.assert_almost_equal(tflite_result, keras_result, 5)
        os.remove(keras_file)
Exemple #47
0
 def __init__(self, model_path, labels):
     super(TFObjectDetection, self).__init__(labels)
     self.interpreter = Interpreter(model_path=model_path)
     self.interpreter.allocate_tensors()
     self.input_details = self.interpreter.get_input_details()
     self.output_details = self.interpreter.get_output_details()
Exemple #48
0
    def testSequentialModel(self):
        """Test a Sequential tf.keras model with default inputs."""
        keras_file = self._getSequentialModel()

        converter = lite.TocoConverter.from_keras_model_file(keras_file)
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Check tensor details of converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('dense_input', input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 3] == input_details[0]['shape']).all())
        self.assertEqual((0., 0.), input_details[0]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('time_distributed/Reshape_1',
                         output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
        self.assertEqual((0., 0.), output_details[0]['quantization'])

        # Check inference of converted model.
        input_data = np.array([[1, 2, 3]], dtype=np.float32)
        interpreter.set_tensor(input_details[0]['index'], input_data)
        interpreter.invoke()
        tflite_result = interpreter.get_tensor(output_details[0]['index'])

        keras_model = keras.models.load_model(keras_file)
        keras_result = keras_model.predict(input_data)

        np.testing.assert_almost_equal(tflite_result, keras_result, 5)
        os.remove(keras_file)
Exemple #49
0
    def testFunctionalModelMultipleInputs(self):
        """Test a Functional tf.keras model with multiple inputs and outputs."""
        a = keras.layers.Input(shape=(3, ), name='input_a')
        b = keras.layers.Input(shape=(3, ), name='input_b')
        dense = keras.layers.Dense(4, name='dense')
        c = dense(a)
        d = dense(b)
        e = keras.layers.Dropout(0.5, name='dropout')(c)

        model = keras.models.Model([a, b], [d, e])
        model.compile(loss=keras.losses.MSE,
                      optimizer=keras.optimizers.RMSprop(),
                      metrics=[keras.metrics.mae],
                      loss_weights=[1., 0.5])

        input_a_np = np.random.random((10, 3))
        input_b_np = np.random.random((10, 3))
        output_d_np = np.random.random((10, 4))
        output_e_np = np.random.random((10, 4))
        model.train_on_batch([input_a_np, input_b_np],
                             [output_d_np, output_e_np])

        model.predict([input_a_np, input_b_np], batch_size=5)
        fd, keras_file = tempfile.mkstemp('.h5')
        keras.models.save_model(model, keras_file)

        # Convert to TFLite model.
        converter = lite.TocoConverter.from_keras_model_file(keras_file)
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        os.close(fd)
        os.remove(keras_file)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(2, len(input_details))
        self.assertEqual('input_a', input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 3] == input_details[0]['shape']).all())
        self.assertEqual((0., 0.), input_details[0]['quantization'])

        self.assertEqual('input_b', input_details[1]['name'])
        self.assertEqual(np.float32, input_details[1]['dtype'])
        self.assertTrue(([1, 3] == input_details[1]['shape']).all())
        self.assertEqual((0., 0.), input_details[1]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(2, len(output_details))
        self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 4] == output_details[0]['shape']).all())
        self.assertEqual((0., 0.), output_details[0]['quantization'])

        self.assertEqual('dropout/Identity', output_details[1]['name'])
        self.assertEqual(np.float32, output_details[1]['dtype'])
        self.assertTrue(([1, 4] == output_details[1]['shape']).all())
        self.assertEqual((0., 0.), output_details[1]['quantization'])
Exemple #50
0
  def testFunctionalModelMultipleInputs(self):
    """Test a Functional tf.keras model with multiple inputs and outputs."""
    with session.Session().as_default():
      a = keras.layers.Input(shape=(3,), name='input_a')
      b = keras.layers.Input(shape=(3,), name='input_b')
      dense = keras.layers.Dense(4, name='dense')
      c = dense(a)
      d = dense(b)
      e = keras.layers.Dropout(0.5, name='dropout')(c)

      model = keras.models.Model([a, b], [d, e])
      model.compile(
          loss=keras.losses.MSE,
          optimizer=keras.optimizers.RMSprop(),
          metrics=[keras.metrics.mae],
          loss_weights=[1., 0.5])

      input_a_np = np.random.random((10, 3))
      input_b_np = np.random.random((10, 3))
      output_d_np = np.random.random((10, 4))
      output_e_np = np.random.random((10, 4))
      model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])

      model.predict([input_a_np, input_b_np], batch_size=5)
      fd, keras_file = tempfile.mkstemp('.h5')
      try:
        keras.models.save_model(model, keras_file)
      finally:
        os.close(fd)

    # Convert to TFLite model.
    converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    os.remove(keras_file)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(2, len(input_details))
    self.assertEqual('input_a', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    self.assertEqual('input_b', input_details[1]['name'])
    self.assertEqual(np.float32, input_details[1]['dtype'])
    self.assertTrue(([1, 3] == input_details[1]['shape']).all())
    self.assertEqual((0., 0.), input_details[1]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(2, len(output_details))
    self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 4] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])

    self.assertEqual('dropout/Identity', output_details[1]['name'])
    self.assertEqual(np.float32, output_details[1]['dtype'])
    self.assertTrue(([1, 4] == output_details[1]['shape']).all())
    self.assertEqual((0., 0.), output_details[1]['quantization'])