def _evaluateTFLiteModelUsingSignatureDef(self, tflite_model, signature_key,
                                            inputs):
    """Evaluates the model on the `inputs`.

    Args:
      tflite_model: TensorFlow Lite model.
      signature_key: Signature key.
      inputs: Map from input tensor names in the SignatureDef to tensor value.

    Returns:
      Dictionary of outputs.
      Key is the output name in the SignatureDef 'signature_key'
      Value is the output value
    """
    interpreter = Interpreter(model_content=tflite_model)
    signature_runner = interpreter.get_signature_runner(signature_key)
    return signature_runner(**inputs)
Esempio n. 2
0
  def _evaluateTFLiteModelUsingSignatureDef(self, tflite_model, method_name,
                                            inputs):
    """Evaluates the model on the `inputs`.

    Args:
      tflite_model: TensorFlow Lite model.
      method_name: Exported Method name of the SavedModel.
      inputs: Map from input tensor names in the SignatureDef to tensor value.

    Returns:
      Dictionary of outputs.
      Key is the output name in the SignatureDef 'method_name'
      Value is the output value
    """
    interpreter = Interpreter(model_content=tflite_model)
    signature_runner = interpreter.get_signature_runner(method_name)
    return signature_runner(**inputs)
Esempio n. 3
0
  def testFlexResourceVariables(self):

    class Model(tf.Module):

      def __init__(self):
        self.v = tf.Variable([[0.0, 0.0, 0.0, 0.0]])

      @tf.function(
          input_signature=[tf.TensorSpec(shape=[1, 4], dtype=tf.float32)])
      def eval(self, x):
        # Control flow is needed to generate "FlexReadVariableOp".
        if tf.reduce_mean(x) > 1.0:
          self.v.assign_add([[1.0, 1.0, 1.0, 1.0]])
        return self.v + x

    m = Model()
    to_save = m.eval.get_concrete_function()
    save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
    tf.saved_model.save(m, save_dir, to_save)
    converter = tf.lite.TFLiteConverter.from_saved_model(save_dir)

    converter.target_spec.supported_ops = [
        tf.lite.OpsSet.TFLITE_BUILTINS,
        tf.lite.OpsSet.SELECT_TF_OPS,
    ]
    converter.experimental_enable_resource_variables = True
    tflite_model = converter.convert()

    # Check the model works with TensorFlow ops.
    interpreter = Interpreter(model_content=tflite_model)
    signature_runner = interpreter.get_signature_runner()
    outputs = signature_runner(
        x=np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32))
    expected_output = np.array([[2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
    self.assertTrue((expected_output == list(outputs.values())[0]).all)

    # Second run.
    outputs = signature_runner(
        x=np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32))
    expected_output = np.array([[3.0, 4.0, 5.0, 6.0]], dtype=np.float32)
    self.assertTrue((expected_output == list(outputs.values())[0]).all)
Esempio n. 4
0
class Calibrator(object):
  """Calibrates a floating point model and then quantizes it.

  This is an internal class, not a public interface.
  """

  def __init__(self,
               model_content,
               custom_op_registerers_by_name=None,
               custom_op_registerers_by_func=None):
    """Constructor.

    Args:
      model_content: Content of a TF-Lite Flatbuffer file.
      custom_op_registerers_by_name: List of str (symbol names) that take a
        pointer to a MutableOpResolver and register custom ops.
      custom_op_registerers_by_func: List of functions that take a pointer to a
        MutableOpResolver and register custom ops.

    Raises:
      ValueError: If the calibrator was unable to open the model.
    """
    if not model_content:
      raise ValueError("`model_content` must be specified.")
    if custom_op_registerers_by_name is None:
      custom_op_registerers_by_name = []
    if custom_op_registerers_by_func is None:
      custom_op_registerers_by_func = []
    try:
      self._calibrator = (
          _calibration_wrapper.CalibrationWrapper(
              model_content, custom_op_registerers_by_name,
              custom_op_registerers_by_func))
      self._model_content = model_content
    except Exception as e:
      raise ValueError("Failed to parse the model: %s." % e)
    if not self._calibrator:
      raise ValueError("Failed to parse the model.")

  def _feed_tensors(self, dataset_gen, resize_input):
    """Feed tensors to the calibrator."""
    initialized = False
    for sample in dataset_gen():
      if isinstance(sample, dict):
        # Convert signature based inputs to the tensor index based data.
        if not hasattr(self, "_interpreter"):
          self._interpreter = Interpreter(model_content=self._model_content)
        input_array = []
        signature_runner = self._interpreter.get_signature_runner()
        input_details = sorted(
            signature_runner.get_input_details().items(),
            key=lambda item: item[1]["index"])
        for input_name, input_detail in input_details:
          input_array.append(sample[input_name])
      elif isinstance(sample, list):
        input_array = sample
      else:
        raise ValueError("You need to provide either a dictionary with input "
                         "names or values and an array with input values in "
                         "the order of input tensors of the graph in the "
                         "representative_dataset function. Unsupported value "
                         "from dataset: {}.".format(sample))

      if not initialized:
        initialized = True
        if resize_input:
          self._calibrator.Prepare([list(s.shape) for s in input_array])
        else:
          self._calibrator.Prepare()
      self._calibrator.FeedTensor(input_array)

  @convert_phase(Component.OPTIMIZE_TFLITE_MODEL,
                 SubComponent.QUANTIZE_USING_DEPRECATED_QUANTIZER)
  def calibrate_and_quantize(self,
                             dataset_gen,
                             input_type,
                             output_type,
                             allow_float,
                             activations_type=dtypes.int8,
                             resize_input=True,
                             disable_per_channel=False):
    """Calibrates the model with specified generator and then quantizes it.

    The input shapes of the calibrator are resized with the calibration data if
    `resize_input` is set.

    Returns:
      A quantized model.

    Args:
      dataset_gen: A generator that generates calibration samples.
      input_type: A tf.dtype representing the desired real-value input type.
      output_type: A tf.dtype representing the desired real-value output type.
      allow_float: A boolean. False if the resulting model cannot perform float
                   computation, useful when targeting an integer-only backend.
                   If False, an error will be thrown if an operation cannot be
                   quantized, otherwise the model will fallback to float ops.
      activations_type: A tf.dtype representing the desired type for
                   activations.
      resize_input: A boolean. True if the shape of the sample data is different
        from the input.
      disable_per_channel: A boolean. True if disabling per-channel
                   quantization.
    """
    self._feed_tensors(dataset_gen, resize_input)
    return self._calibrator.QuantizeModel(
        np.dtype(input_type.as_numpy_dtype()).num,
        np.dtype(output_type.as_numpy_dtype()).num, allow_float,
        np.dtype(activations_type.as_numpy_dtype()).num,
        disable_per_channel)

  @convert_phase(Component.OPTIMIZE_TFLITE_MODEL,
                 SubComponent.QUANTIZE_USING_DEPRECATED_QUANTIZER)
  def calibrate_and_quantize_single(self,
                                    dataset_gen,
                                    input_type,
                                    output_type,
                                    allow_float,
                                    op_output_name,
                                    resize_input=True):
    """Calibrates the model with specified generator and then quantizes it.

    Only the single op with output op_output_name will be quantized.
    The input shapes of the calibrator are resized with the calibration data.

    Returns:
      A quantized model.

    Args:
      dataset_gen: A generator that generates calibration samples.
      input_type: A tf.dtype representing the desired real-value input type.
      output_type: A tf.dtype representing the desired real-value output type.
      allow_float: A boolean. False if the resulting model cannot perform float
        computation, useful when targeting an integer-only backend. If False, an
        error will be thrown if an operation cannot be quantized, otherwise the
        model will fallback to float ops.
      op_output_name: A string, only this op will be quantized.
      resize_input: A boolean. True if the shape of the sample data is different
        from the input.
    """
    self._feed_tensors(dataset_gen, resize_input)
    return self._calibrator.QuantizeModel(
        np.dtype(input_type.as_numpy_dtype()).num,
        np.dtype(output_type.as_numpy_dtype()).num, allow_float, op_output_name)

  @convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.CALIBRATE)
  def calibrate(self, dataset_gen):
    """Calibrates the model with specified generator.

    Returns:
      A model with min and max calibration stats.

    Args:
      dataset_gen: A generator that generates calibration samples.
    """
    self._feed_tensors(dataset_gen, resize_input=True)
    return self._calibrator.Calibrate()