Ejemplo n.º 1
0
    def _initialize_stats(self):
        """Helper function initializes stats."""
        # TODO(b/177749613) : Fix the dependency on tf.lite._get_ops_details()
        # Following code is needed to get op's name from the output tensor index,
        # since NumericVerify op only provides its quantized input tensor index.
        self._defining_op = dict()
        for op_info in self._quant_interpreter._get_ops_details():  # pylint: disable=protected-access
            self._defining_op.update({
                tensor_idx: op_info['index']
                for tensor_idx in op_info['outputs']
            })

        self._numeric_verify_tensor_details = None
        self._numeric_verify_op_details = None
        if not self._get_numeric_verify_tensor_details():
            raise ValueError(
                'Please check if the quantized model is in debug mode')

        self._layer_debug_metrics = _DEFAULT_LAYER_DEBUG_METRICS.copy()
        if self._debug_options.layer_debug_metrics:
            self._layer_debug_metrics.update(
                self._debug_options.layer_debug_metrics)

        self.layer_statistics = None
        self.model_statistics = None

        self._metrics = metrics_stub.TFLiteMetrics()
        self._metrics.increase_counter_debugger_creation()
Ejemplo n.º 2
0
 def test_interpreter_creation_counter_increase_success(self):
     stub = metrics.TFLiteMetrics()
     stub.increase_counter_interpreter_creation()
Ejemplo n.º 3
0
 def test_TFLiteMetrics_creation_success(self):
     metrics.TFLiteMetrics()
Ejemplo n.º 4
0
  def __init__(self,
               model_path=None,
               model_content=None,
               experimental_delegates=None,
               num_threads=None,
               experimental_op_resolver_type=OpResolverType.AUTO,
               experimental_preserve_all_tensors=False):
    """Constructor.

    Args:
      model_path: Path to TF-Lite Flatbuffer file.
      model_content: Content of model.
      experimental_delegates: Experimental. Subject to change. List of
        [TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates)
          objects returned by lite.load_delegate().
      num_threads: Sets the number of threads used by the interpreter and
        available to CPU kernels. If not set, the interpreter will use an
        implementation-dependent default number of threads. Currently, only a
        subset of kernels, such as conv, support multi-threading. num_threads
        should be >= -1. Setting num_threads to 0 has the effect to disable
        multithreading, which is equivalent to setting num_threads to 1. If set
        to the value -1, the number of threads used will be
        implementation-defined and platform-dependent.
      experimental_op_resolver_type: The op resolver used by the interpreter. It
        must be an instance of OpResolverType. By default, we use the built-in
        op resolver which corresponds to tflite::ops::builtin::BuiltinOpResolver
        in C++.
      experimental_preserve_all_tensors: If true, then intermediate tensors
        used during computation are preserved for inspection. Otherwise, reading
        intermediate tensors provides undefined values.

    Raises:
      ValueError: If the interpreter was unable to create.
    """
    if not hasattr(self, '_custom_op_registerers'):
      self._custom_op_registerers = []

    op_resolver_id = _get_op_resolver_id(experimental_op_resolver_type)
    if op_resolver_id is None:
      raise ValueError('Unrecognized passed in op resolver type: {}'.format(
          experimental_op_resolver_type))

    if model_path and not model_content:
      custom_op_registerers_by_name = [
          x for x in self._custom_op_registerers if isinstance(x, str)
      ]
      custom_op_registerers_by_func = [
          x for x in self._custom_op_registerers if not isinstance(x, str)
      ]
      self._interpreter = (
          _interpreter_wrapper.CreateWrapperFromFile(
              model_path, op_resolver_id, custom_op_registerers_by_name,
              custom_op_registerers_by_func,
              experimental_preserve_all_tensors))
      if not self._interpreter:
        raise ValueError('Failed to open {}'.format(model_path))
    elif model_content and not model_path:
      custom_op_registerers_by_name = [
          x for x in self._custom_op_registerers if isinstance(x, str)
      ]
      custom_op_registerers_by_func = [
          x for x in self._custom_op_registerers if not isinstance(x, str)
      ]
      # Take a reference, so the pointer remains valid.
      # Since python strings are immutable then PyString_XX functions
      # will always return the same pointer.
      self._model_content = model_content
      self._interpreter = (
          _interpreter_wrapper.CreateWrapperFromBuffer(
              model_content, op_resolver_id, custom_op_registerers_by_name,
              custom_op_registerers_by_func,
              experimental_preserve_all_tensors))
    elif not model_content and not model_path:
      raise ValueError('`model_path` or `model_content` must be specified.')
    else:
      raise ValueError('Can\'t both provide `model_path` and `model_content`')

    if num_threads is not None:
      if not isinstance(num_threads, int):
        raise ValueError('type of num_threads should be int')
      if num_threads < 1:
        raise ValueError('num_threads should >= 1')
      self._interpreter.SetNumThreads(num_threads)

    # Each delegate is a wrapper that owns the delegates that have been loaded
    # as plugins. The interpreter wrapper will be using them, but we need to
    # hold them in a list so that the lifetime is preserved at least as long as
    # the interpreter wrapper.
    self._delegates = []
    if experimental_delegates:
      self._delegates = experimental_delegates
      for delegate in self._delegates:
        self._interpreter.ModifyGraphWithDelegate(
            delegate._get_native_delegate_pointer())  # pylint: disable=protected-access
    self._signature_defs = self.get_signature_list()

    self._metrics = metrics.TFLiteMetrics()
    self._metrics.increase_counter_interpreter_creation()
Ejemplo n.º 5
0
 def test_converter_params_set_success(self):
     stub = metrics.TFLiteMetrics()
     stub.set_converter_param('name', 'value')
Ejemplo n.º 6
0
 def test_converter_success_counter_increase_success(self):
     stub = metrics.TFLiteMetrics()
     stub.increase_counter_converter_success()
Ejemplo n.º 7
0
    def __init__(
            self,
            quant_debug_model_path: Optional[str] = None,
            quant_debug_model_content: Optional[bytes] = None,
            float_model_path: Optional[str] = None,
            float_model_content: Optional[bytes] = None,
            debug_dataset: Optional[Callable[
                [], Iterable[Sequence[np.ndarray]]]] = None,
            debug_options: Optional[QuantizationDebugOptions] = None) -> None:
        """Runs the TFLite debugging model with given debug options.

    Args:
      quant_debug_model_path: Path to the quantized debug TFLite model file.
      quant_debug_model_content: Content of the quantized debug TFLite model.
      float_model_path: Path to float TFLite model file.
      float_model_content: Content of the float TFLite model.
      debug_dataset: a factory function that returns dataset generator which is
        used to generate input samples (list of np.ndarray) for the model. The
        generated elements must have same types and shape as inputs to the
        model.
      debug_options: Debug options to debug the given model.

    Raises:
      ValueError: If the debugger was unable to be created.

    Attributes:
      layer_statistics: results of error metrics for each NumericVerify op
        results. in {layer_name: {metric_name: metric}} format.
      model_statistics: results of error metrics for difference between float
        and quantized models. in {metric_name: metric} format.
    """
        self._data_gen = debug_dataset
        self._debug_options = debug_options or QuantizationDebugOptions()

        input_data = next(iter(self._data_gen()))
        self._quant_interpreter = tf.lite.Interpreter(
            quant_debug_model_path, quant_debug_model_content)
        if self._debug_options.model_debug_metrics:
            self._float_interpreter = tf.lite.Interpreter(
                float_model_path, float_model_content)

        # TODO(b/177749613) : Fix the dependency on tf.lite._get_ops_details()
        # Following code is needed to get op's name from the output tensor index,
        # since NumericVerify op only provides its quantized input tensor index.
        self._defining_op = dict()
        for op_info in self._quant_interpreter._get_ops_details():  # pylint: disable=protected-access
            self._defining_op.update({
                tensor_idx: op_info['op_name']
                for tensor_idx in op_info['outputs']
            })

        self._numeric_verify_tensor_details = None
        if not self._get_numeric_verify_tensor_details():
            raise ValueError(
                'Please check if the quantized model is in debug mode')

        self._layer_debug_metrics = _DEFAULT_LAYER_DEBUG_METRICS.copy()
        if self._debug_options.layer_debug_metrics:
            self._layer_debug_metrics.update(
                self._debug_options.layer_debug_metrics)

        self.layer_statistics = None
        self.model_statistics = None

        self._metrics = metrics_stub.TFLiteMetrics()
        self._metrics.increase_counter_debugger_creation()
Ejemplo n.º 8
0
    def __init__(self,
                 model_path=None,
                 model_content=None,
                 experimental_delegates=None,
                 num_threads=None):
        """Constructor.

    Args:
      model_path: Path to TF-Lite Flatbuffer file.
      model_content: Content of model.
      experimental_delegates: Experimental. Subject to change. List of
        [TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates)
          objects returned by lite.load_delegate().
      num_threads: Sets the number of threads used by the interpreter and
        available to CPU kernels. If not set, the interpreter will use an
        implementation-dependent default number of threads. Currently, only a
        subset of kernels, such as conv, support multi-threading.

    Raises:
      ValueError: If the interpreter was unable to create.
    """
        if not hasattr(self, '_custom_op_registerers'):
            self._custom_op_registerers = []
        if model_path and not model_content:
            custom_op_registerers_by_name = [
                x for x in self._custom_op_registerers if isinstance(x, str)
            ]
            custom_op_registerers_by_func = [
                x for x in self._custom_op_registerers
                if not isinstance(x, str)
            ]
            self._interpreter = (_interpreter_wrapper.CreateWrapperFromFile(
                model_path, custom_op_registerers_by_name,
                custom_op_registerers_by_func))
            if not self._interpreter:
                raise ValueError('Failed to open {}'.format(model_path))
        elif model_content and not model_path:
            custom_op_registerers_by_name = [
                x for x in self._custom_op_registerers if isinstance(x, str)
            ]
            custom_op_registerers_by_func = [
                x for x in self._custom_op_registerers
                if not isinstance(x, str)
            ]
            # Take a reference, so the pointer remains valid.
            # Since python strings are immutable then PyString_XX functions
            # will always return the same pointer.
            self._model_content = model_content
            self._interpreter = (_interpreter_wrapper.CreateWrapperFromBuffer(
                model_content, custom_op_registerers_by_name,
                custom_op_registerers_by_func))
        elif not model_content and not model_path:
            raise ValueError(
                '`model_path` or `model_content` must be specified.')
        else:
            raise ValueError(
                'Can\'t both provide `model_path` and `model_content`')

        if num_threads is not None:
            if not isinstance(num_threads, int):
                raise ValueError('type of num_threads should be int')
            if num_threads < 1:
                raise ValueError('num_threads should >= 1')
            self._interpreter.SetNumThreads(num_threads)

        # Each delegate is a wrapper that owns the delegates that have been loaded
        # as plugins. The interpreter wrapper will be using them, but we need to
        # hold them in a list so that the lifetime is preserved at least as long as
        # the interpreter wrapper.
        self._delegates = []
        if experimental_delegates:
            self._delegates = experimental_delegates
            for delegate in self._delegates:
                self._interpreter.ModifyGraphWithDelegate(
                    delegate._get_native_delegate_pointer())  # pylint: disable=protected-access
        self._signature_defs = self.get_signature_list()

        self._metrics = metrics.TFLiteMetrics()
        self._metrics.increase_counter_interpreter_creation()