예제 #1
0
    def convert(self):
        """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format. Either a TFLite Flatbuffer or a
      Graphviz graph depending on value in `output_format`.

    Raises:
      ValueError:
        Input shape is not specified.
        None value for dimension in input_tensor.
    """
        # Checks dimensions in input tensor.
        if self._has_valid_tensors():
            for tensor in self._input_tensors:
                shape = tensor.get_shape()
                if not shape:
                    raise ValueError("Provide an input shape for input array "
                                     "'{0}'.".format(_tensor_name(tensor)))
                # Note that shape_list might be empty for scalar shapes.
                shape_list = shape.as_list()
                if None in shape_list[1:]:
                    raise ValueError(
                        "None is only supported in the 1st dimension. Tensor '{0}' has "
                        "invalid shape '{1}'.".format(_tensor_name(tensor),
                                                      shape_list))
                elif shape_list and shape_list[0] is None:
                    self._set_batch_size(batch_size=1)

        # Get quantization stats. Ensures there is one stat per name if the stats
        # are specified.
        if self.quantized_input_stats:
            quantized_stats = []
            invalid_stats = []
            for name in self.get_input_arrays():
                if name in self.quantized_input_stats:
                    quantized_stats.append(self.quantized_input_stats[name])
                else:
                    invalid_stats.append(name)

            if invalid_stats:
                raise ValueError(
                    "Quantization input stats are not available for input "
                    "tensors '{0}'.".format(",".join(invalid_stats)))
        else:
            quantized_stats = None
        if self.representative_dataset:
            if not isinstance(self.representative_dataset,
                              RepresentativeDataset):
                raise TypeError(
                    "representative_dataset must be an instance of "
                    "RepresentativeDataset")
            if self.representative_dataset.input_gen is None:
                raise ValueError(
                    "Provide an input generator for representative_dataset")

        # TODO(shashishekhar): For now use optimizations order is ignored.
        # Both size and latency optimizations decide whether to apply post
        # training optimizations.
        post_training_optimize = bool(
            len(
                set(self.optimizations) & set([
                    Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE
                ])))
        # Do weights only quantization if there is no dataset for calibration.
        weights_only_quantize_flag = (post_training_optimize and
                                      (self.representative_dataset is None))

        converter_kwargs = {
            "inference_type": self.inference_type,
            "inference_input_type": self.inference_input_type,
            "input_format": constants.TENSORFLOW_GRAPHDEF,
            "output_format": self.output_format,
            "quantized_input_stats": quantized_stats,
            "default_ranges_stats": self.default_ranges_stats,
            "drop_control_dependency": self.drop_control_dependency,
            "reorder_across_fake_quant": self.reorder_across_fake_quant,
            "change_concat_input_ranges": self.change_concat_input_ranges,
            "allow_custom_ops": self.allow_custom_ops,
            "post_training_quantize": weights_only_quantize_flag,
            "target_ops": self.target_ops,
            "dump_graphviz_dir": self.dump_graphviz_dir,
            "dump_graphviz_video": self.dump_graphviz_video
        }

        optimized_graph = None
        if self.inference_type == constants.QUANTIZED_UINT8:
            optimized_graph = self._graph_def
        else:
            try:
                optimized_graph = _run_graph_optimizations(
                    self._graph_def, self._input_tensors, self._output_tensors)
            except Exception:
                optimized_graph = self._graph_def

        # Converts model.
        if self._has_valid_tensors():
            result = _toco_convert_impl(input_data=optimized_graph,
                                        input_tensors=self._input_tensors,
                                        output_tensors=self._output_tensors,
                                        **converter_kwargs)
        else:
            result = _toco_convert_graph_def(
                input_data=optimized_graph,
                input_arrays_with_shape=self._input_arrays_with_shape,
                output_arrays=self._output_arrays,
                **converter_kwargs)

        if self.representative_dataset and post_training_optimize:
            calibrate_quantize = _calibrator.Calibrator(result)
            result = calibrate_quantize.calibrate_and_quantize(
                self.representative_dataset.input_gen)

        return result
예제 #2
0
    def convert(self):
        """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format. Either a TFLite Flatbuffer or a
      Graphviz graph depending on value in `output_format`.

    Raises:
      ValueError:
        Input shape is not specified.
        None value for dimension in input_tensor.
    """
        # Checks dimensions in input tensor.
        if self._has_valid_tensors():
            for tensor in self._input_tensors:
                shape = tensor.shape
                if not shape:
                    raise ValueError("Provide an input shape for input array "
                                     "'{0}'.".format(_get_tensor_name(tensor)))
                # Note that shape_list might be empty for scalar shapes.
                shape_list = shape.as_list()
                if None in shape_list[1:]:
                    raise ValueError(
                        "None is only supported in the 1st dimension. Tensor '{0}' has "
                        "invalid shape '{1}'.".format(_get_tensor_name(tensor),
                                                      shape_list))
                elif shape_list and shape_list[0] is None:
                    self._set_batch_size(batch_size=1)

        # Get quantization stats. Ensures there is one stat per name if the stats
        # are specified.
        if self.quantized_input_stats:
            quantized_stats = []
            invalid_stats = []
            for name in self.get_input_arrays():
                if name in self.quantized_input_stats:
                    quantized_stats.append(self.quantized_input_stats[name])
                else:
                    invalid_stats.append(name)

            if invalid_stats:
                raise ValueError(
                    "Quantization input stats are not available for input "
                    "tensors '{0}'.".format(",".join(invalid_stats)))
        else:
            quantized_stats = None

        self._validate_quantization()
        self._validate_representative_dataset()

        toco_inference_input_type = self.inference_input_type
        inference_input_type = self.inference_input_type
        inference_output_type = self.inference_output_type
        post_training_optimize = self._is_post_training_optimize()
        if post_training_optimize:
            # Post training optimizations require that TOCO outputs a float model.
            if self.inference_type != constants.FLOAT:
                raise ValueError(
                    "`optimizations` require that `inference_type` is set to float."
                )
            toco_inference_input_type = constants.FLOAT
            # Set up default values.
            if inference_input_type is None:
                inference_input_type = constants.FLOAT
            if inference_output_type is None:
                inference_output_type = constants.FLOAT

        weight_only_quantize = self._is_int8_weight_only_quantize()
        if weight_only_quantize:
            # Currently, weight only quantization requires float inputs and outputs.
            if (inference_input_type != constants.FLOAT
                    or inference_output_type != constants.FLOAT):
                raise ValueError(
                    "Provide an inference_input_type and inference_output_type of type "
                    "tf.float32.")

        if not post_training_optimize and self.inference_output_type is not None:
            raise ValueError(
                "inference_output_type is currently not supported if optimizations "
                "are not enabled.")

        optimized_graph = self._graph_def
        if self.inference_type != constants.QUANTIZED_UINT8:
            try:
                optimized_graph = _run_graph_optimizations(
                    self._graph_def,
                    self._input_tensors,
                    self._output_tensors,
                    config=self._grappler_config())
            except Exception:
                optimized_graph = self._graph_def

        self._debug_info = _get_debug_info(self._debug_info_func,
                                           optimized_graph)

        converter_kwargs = self._get_base_converter_args()
        converter_kwargs.update({
            "inference_type": self.inference_type,
            "inference_input_type": toco_inference_input_type,
            "output_format": self.output_format,
            "quantized_input_stats": quantized_stats,
            "default_ranges_stats": self.default_ranges_stats,
            "drop_control_dependency": self.drop_control_dependency,
            "reorder_across_fake_quant": self.reorder_across_fake_quant,
            "change_concat_input_ranges": self.change_concat_input_ranges,
            "dump_graphviz_dir": self.dump_graphviz_dir,
            "dump_graphviz_video": self.dump_graphviz_video
        })

        # Converts model.
        if self._has_valid_tensors():
            result = _toco_convert_impl(input_data=optimized_graph,
                                        input_tensors=self._input_tensors,
                                        output_tensors=self._output_tensors,
                                        **converter_kwargs)
        else:
            result = _toco_convert_graph_def(
                input_data=optimized_graph,
                input_arrays_with_shape=self._input_arrays_with_shape,
                output_arrays=self._output_arrays,
                **converter_kwargs)

        if self._is_calibration_quantize():
            result = self._calibrate_quantize_model(result,
                                                    inference_input_type,
                                                    inference_output_type)

        return result
예제 #3
0
    def convert(self):
        """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format.

    Raises:
      ValueError:
        Input shape is not specified.
        None value for dimension in input_tensor.
    """
        graph_def = _convert_to_constants.convert_variables_to_constants_v2(
            self._func)
        input_tensors = [
            tensor for tensor in self._func.inputs
            if tensor.dtype != _dtypes.resource
        ]
        output_tensors = self._func.outputs

        # Run a Grappler pass.
        graph_def = _run_graph_optimizations(graph_def, input_tensors,
                                             output_tensors, self._func.graph)

        # Checks dimensions in input tensor.
        for tensor in input_tensors:
            # Note that shape_list might be empty for scalar shapes.
            shape_list = tensor.get_shape().as_list()
            if None in shape_list[1:]:
                raise ValueError(
                    "None is only supported in the 1st dimension. Tensor '{0}' has "
                    "invalid shape '{1}'.".format(_tensor_name(tensor),
                                                  shape_list))
            elif shape_list and shape_list[0] is None:
                self._set_batch_size(batch_size=1)

        if self.representative_dataset:
            if not isinstance(self.representative_dataset,
                              RepresentativeDataset):
                raise TypeError(
                    "representative_dataset must be an instance of "
                    "RepresentativeDataset")
            if self.representative_dataset.input_gen is None:
                raise ValueError(
                    "Provide an input generator for representative_dataset")

        # TODO(shashishekhar): For now use optimizations order is ignored.
        # Both size and latency optimizations decide whether to apply post
        # training optimizations.
        post_training_optimize = bool(
            len(
                set(self.optimizations)
                & set([
                    Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE
                ])))
        # Do weights only quantization if there is no dataset for calibration.
        weights_only_quantize_flag = (post_training_optimize and
                                      (self.representative_dataset is None))

        converter_kwargs = {
            "input_format": constants.TENSORFLOW_GRAPHDEF,
            "allow_custom_ops": self.allow_custom_ops,
            "post_training_quantize": weights_only_quantize_flag,
            "target_ops": self.target_ops,
        }

        # Converts model.
        result = _toco_convert_impl(input_data=graph_def,
                                    input_tensors=input_tensors,
                                    output_tensors=output_tensors,
                                    **converter_kwargs)

        if self.representative_dataset and post_training_optimize:
            calibrate_quantize = _calibrator.Calibrator(result)
            result = calibrate_quantize.calibrate_and_quantize(
                self.representative_dataset.input_gen)

        return result
예제 #4
0
파일: lite.py 프로젝트: ziyin-dl/tensorflow
    def convert(self):
        """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format. Either a TFLite Flatbuffer or a
      Graphviz graph depending on value in `output_format`.

    Raises:
      ValueError:
        Input shape is not specified.
        None value for dimension in input_tensor.
    """
        # Checks dimensions in input tensor.
        if self._has_valid_tensors():
            for tensor in self._input_tensors:
                shape = tensor.get_shape()
                if not shape or not shape.as_list():
                    raise ValueError("Provide an input shape for input array "
                                     "'{0}'.".format(_tensor_name(tensor)))
                shape_list = shape.as_list()
                if None in shape_list[1:]:
                    raise ValueError(
                        "None is only supported in the 1st dimension. Tensor '{0}' has "
                        "invalid shape '{1}'.".format(_tensor_name(tensor),
                                                      shape_list))
                elif shape_list[0] is None:
                    self._set_batch_size(batch_size=1)

        # Get quantization stats. Ensures there is one stat per name if the stats
        # are specified.
        if self.quantized_input_stats:
            quantized_stats = []
            invalid_stats = []
            for name in self.get_input_arrays():
                if name in self.quantized_input_stats:
                    quantized_stats.append(self.quantized_input_stats[name])
                else:
                    invalid_stats.append(name)

            if invalid_stats:
                raise ValueError(
                    "Quantization input stats are not available for input "
                    "tensors '{0}'.".format(",".join(invalid_stats)))
        else:
            quantized_stats = None

        converter_kwargs = {
            "inference_type": self.inference_type,
            "inference_input_type": self.inference_input_type,
            "input_format": constants.TENSORFLOW_GRAPHDEF,
            "output_format": self.output_format,
            "quantized_input_stats": quantized_stats,
            "default_ranges_stats": self.default_ranges_stats,
            "drop_control_dependency": self.drop_control_dependency,
            "reorder_across_fake_quant": self.reorder_across_fake_quant,
            "change_concat_input_ranges": self.change_concat_input_ranges,
            "allow_custom_ops": self.allow_custom_ops,
            "post_training_quantize": self.post_training_quantize,
            "target_ops": self.target_ops,
            "dump_graphviz_dir": self.dump_graphviz_dir,
            "dump_graphviz_video": self.dump_graphviz_video
        }

        optimized_graph = None
        if self.inference_type == constants.QUANTIZED_UINT8:
            optimized_graph = self._graph_def
        else:
            try:
                optimized_graph = _run_graph_optimizations(
                    self._graph_def, [t.name for t in self._output_tensors])
            except Exception:
                optimized_graph = self._graph_def

        # Converts model.
        if self._has_valid_tensors():
            result = _toco_convert_impl(input_data=optimized_graph,
                                        input_tensors=self._input_tensors,
                                        output_tensors=self._output_tensors,
                                        **converter_kwargs)
        else:
            result = _toco_convert_graph_def(
                input_data=optimized_graph,
                input_arrays_with_shape=self._input_arrays_with_shape,
                output_arrays=self._output_arrays,
                **converter_kwargs)
        return result
예제 #5
0
    def convert(self):
        """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format.

    Raises:
      ValueError:
        Multiple concrete functions are specified.
        Input shape is not specified.
        Invalid quantization parameters.
    """
        # TODO(b/130297984): Add support for converting multiple function.
        if len(self._funcs) != 1:
            raise ValueError(
                "This converter can only convert a single "
                "ConcreteFunction. Converting multiple functions is "
                "under development.")

        frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
            self._funcs[0], lower_control_flow=False)
        input_tensors = [
            tensor for tensor in frozen_func.inputs
            if tensor.dtype != _dtypes.resource
        ]
        output_tensors = frozen_func.outputs

        # Run a Grappler pass.
        graph_def = frozen_func.graph.as_graph_def()
        graph_def = _run_graph_optimizations(graph_def,
                                             input_tensors,
                                             output_tensors,
                                             config=self._grappler_config(),
                                             graph=frozen_func.graph)

        # Checks dimensions in input tensor.
        for tensor in input_tensors:
            # Note that shape_list might be empty for scalar shapes.
            shape_list = tensor.shape.as_list()
            if None in shape_list[1:]:
                raise ValueError(
                    "None is only supported in the 1st dimension. Tensor '{0}' has "
                    "invalid shape '{1}'.".format(_get_tensor_name(tensor),
                                                  shape_list))
            elif shape_list and shape_list[0] is None:
                # Set the batch size to 1 if undefined.
                shape = tensor.shape.as_list()
                shape[0] = 1
                tensor.set_shape(shape)

        self._validate_quantization()
        self._validate_representative_dataset()
        self._debug_info = _get_debug_info(
            _build_debug_info_func(self._funcs[0].graph), graph_def)
        converter_kwargs = self._get_base_converter_args()

        # Converts model.
        result = _toco_convert_impl(input_data=graph_def,
                                    input_tensors=input_tensors,
                                    output_tensors=output_tensors,
                                    **converter_kwargs)

        if self._is_calibration_quantize():
            result = self._calibrate_quantize_model(result, constants.FLOAT,
                                                    constants.FLOAT)

        return result
예제 #6
0
  def convert(self):
    """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format.

    Raises:
      ValueError:
        Multiple concrete functions are specified.
        Input shape is not specified.
        Invalid quantization parameters.
    """
    # TODO(b/130297984): Add support for converting multiple function.
    self._target_ops = self.target_spec.supported_ops
    if len(self._funcs) != 1:
      raise ValueError("This converter can only convert a single "
                       "ConcreteFunction. Converting multiple functions is "
                       "under development.")

    frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
        self._funcs[0])
    input_tensors = [
        tensor for tensor in frozen_func.inputs
        if tensor.dtype != _dtypes.resource
    ]
    output_tensors = frozen_func.outputs

    # Run a Grappler pass.
    graph_def = frozen_func.graph.as_graph_def()
    graph_def = _run_graph_optimizations(
        graph_def,
        input_tensors,
        output_tensors,
        config=self._grappler_config(),
        graph=frozen_func.graph)

    # Checks dimensions in input tensor.
    for tensor in input_tensors:
      # Note that shape_list might be empty for scalar shapes.
      shape_list = tensor.shape.as_list()
      if None in shape_list[1:]:
        raise ValueError(
            "None is only supported in the 1st dimension. Tensor '{0}' has "
            "invalid shape '{1}'.".format(_get_tensor_name(tensor), shape_list))
      elif shape_list and shape_list[0] is None:
        # Set the batch size to 1 if undefined.
        shape = tensor.shape.as_list()
        shape[0] = 1
        tensor.set_shape(shape)

    self._validate_representative_dataset()

    converter_kwargs = {
        "input_format": constants.TENSORFLOW_GRAPHDEF,
        "allow_custom_ops": self.allow_custom_ops,
        "post_training_quantize": self._is_weight_only_quantize(),
        "target_ops": self.target_spec.supported_ops,
    }

    # Converts model.
    result = _toco_convert_impl(
        input_data=graph_def,
        input_tensors=input_tensors,
        output_tensors=output_tensors,
        **converter_kwargs)

    if self._is_calibration_quantize():
      result = self._calibrate_quantize_model(result, constants.FLOAT,
                                              constants.FLOAT)

    return result
예제 #7
0
  def convert(self):
    """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format. Either a TFLite Flatbuffer or a
      Graphviz graph depending on value in `output_format`.

    Raises:
      ValueError:
        Input shape is not specified.
        None value for dimension in input_tensor.
    """
    self._target_ops = self.target_ops
    # Checks dimensions in input tensor.
    if self._has_valid_tensors():
      for tensor in self._input_tensors:
        shape = tensor.shape
        if not shape:
          raise ValueError("Provide an input shape for input array "
                           "'{0}'.".format(_get_tensor_name(tensor)))
        # Note that shape_list might be empty for scalar shapes.
        shape_list = shape.as_list()
        if None in shape_list[1:]:
          raise ValueError(
              "None is only supported in the 1st dimension. Tensor '{0}' has "
              "invalid shape '{1}'.".format(
                  _get_tensor_name(tensor), shape_list))
        elif shape_list and shape_list[0] is None:
          self._set_batch_size(batch_size=1)

    # Get quantization stats. Ensures there is one stat per name if the stats
    # are specified.
    if self.quantized_input_stats:
      quantized_stats = []
      invalid_stats = []
      for name in self.get_input_arrays():
        if name in self.quantized_input_stats:
          quantized_stats.append(self.quantized_input_stats[name])
        else:
          invalid_stats.append(name)

      if invalid_stats:
        raise ValueError("Quantization input stats are not available for input "
                         "tensors '{0}'.".format(",".join(invalid_stats)))
    else:
      quantized_stats = None

    self._validate_representative_dataset()

    toco_inference_input_type = self.inference_input_type
    inference_input_type = self.inference_input_type
    inference_output_type = self.inference_output_type
    post_training_optimize = self._is_post_training_optimize()
    if post_training_optimize:
      # Post training optimizations require that TOCO outputs a float model.
      if self.inference_type != constants.FLOAT:
        raise ValueError(
            "`optimizations` require that `inference_type` is set to float.")
      toco_inference_input_type = constants.FLOAT
      # Set up default values.
      if inference_input_type is None:
        inference_input_type = constants.FLOAT
      if inference_output_type is None:
        inference_output_type = constants.FLOAT

    weight_only_quantize = self._is_weight_only_quantize()
    if weight_only_quantize:
      # Currently, weight only quantization requires float inputs and outputs.
      if (inference_input_type != constants.FLOAT or
          inference_output_type != constants.FLOAT):
        raise ValueError(
            "Provide an inference_input_type and inference_output_type of type "
            "tf.float32.")

    if not post_training_optimize and self.inference_output_type is not None:
      raise ValueError(
          "inference_output_type is currently not supported if optimizations "
          "are not enabled.")

    converter_kwargs = {
        "inference_type": self.inference_type,
        "inference_input_type": toco_inference_input_type,
        "input_format": constants.TENSORFLOW_GRAPHDEF,
        "output_format": self.output_format,
        "quantized_input_stats": quantized_stats,
        "default_ranges_stats": self.default_ranges_stats,
        "drop_control_dependency": self.drop_control_dependency,
        "reorder_across_fake_quant": self.reorder_across_fake_quant,
        "change_concat_input_ranges": self.change_concat_input_ranges,
        "allow_custom_ops": self.allow_custom_ops,
        "post_training_quantize": weight_only_quantize,
        "target_ops": self.target_ops,
        "dump_graphviz_dir": self.dump_graphviz_dir,
        "dump_graphviz_video": self.dump_graphviz_video
    }

    optimized_graph = self._graph_def
    if self.inference_type != constants.QUANTIZED_UINT8:
      try:
        optimized_graph = _run_graph_optimizations(
            self._graph_def,
            self._input_tensors,
            self._output_tensors,
            config=self._grappler_config())
      except Exception:
        optimized_graph = self._graph_def

    # Converts model.
    if self._has_valid_tensors():
      result = _toco_convert_impl(
          input_data=optimized_graph,
          input_tensors=self._input_tensors,
          output_tensors=self._output_tensors,
          **converter_kwargs)
    else:
      result = _toco_convert_graph_def(
          input_data=optimized_graph,
          input_arrays_with_shape=self._input_arrays_with_shape,
          output_arrays=self._output_arrays,
          **converter_kwargs)

    if self._is_calibration_quantize():
      result = self._calibrate_quantize_model(result, inference_input_type,
                                              inference_output_type)

    return result
예제 #8
0
  def convert(self):
    """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format. Either a TFLite Flatbuffer or a
      Graphviz graph depending on value in `output_format`.

    Raises:
      ValueError:
        Input shape is not specified.
        None value for dimension in input_tensor.
    """
    # Checks dimensions in input tensor.
    if self._has_valid_tensors():
      for tensor in self._input_tensors:
        shape = tensor.shape
        if not shape:
          raise ValueError("Provide an input shape for input array "
                           "'{0}'.".format(_get_tensor_name(tensor)))
        # Note that shape_list might be empty for scalar shapes.
        shape_list = shape.as_list()
        if None in shape_list[1:]:
          raise ValueError(
              "None is only supported in the 1st dimension. Tensor '{0}' has "
              "invalid shape '{1}'.".format(
                  _get_tensor_name(tensor), shape_list))
        elif shape_list and shape_list[0] is None:
          self._set_batch_size(batch_size=1)

    # Get quantization stats. Ensures there is one stat per name if the stats
    # are specified.
    if self.quantized_input_stats:
      quantized_stats = []
      invalid_stats = []
      for name in self.get_input_arrays():
        if name in self.quantized_input_stats:
          quantized_stats.append(self.quantized_input_stats[name])
        else:
          invalid_stats.append(name)

      if invalid_stats:
        raise ValueError("Quantization input stats are not available for input "
                         "tensors '{0}'.".format(",".join(invalid_stats)))
    else:
      quantized_stats = None
    if self.representative_dataset:
      if not isinstance(self.representative_dataset, RepresentativeDataset):
        raise TypeError(
            "representative_dataset must be an instance of "
            "RepresentativeDataset")
      if self.representative_dataset.input_gen is None:
        raise ValueError(
            "Provide an input generator for representative_dataset")

    post_training_optimize = bool(
        len(set(self.optimizations) & set([Optimize.OPTIMIZE_FOR_LATENCY,
                                           Optimize.OPTIMIZE_FOR_SIZE])))
    # Do weights only quantization if there is no dataset for calibration.
    weights_only_quantize_flag = (
        post_training_optimize and (self.representative_dataset is None))

    toco_inference_input_type = self.inference_input_type
    inference_input_type = self.inference_input_type
    inference_output_type = self.inference_output_type
    if post_training_optimize:
      # Post training optimizations require that TOCO outputs a float model.
      if self.inference_type != constants.FLOAT:
        raise ValueError(
            "`optimizations` require that `inference_type` is set to float.")
      toco_inference_input_type = constants.FLOAT
      # Set up default values.
      if inference_input_type is None:
        inference_input_type = constants.FLOAT
      if inference_output_type is None:
        inference_output_type = constants.FLOAT

    if weights_only_quantize_flag:
      # Currently, weight only quantization requires float inputs and outputs.
      if (inference_input_type != constants.FLOAT or
          inference_output_type != constants.FLOAT):
        raise ValueError(
            "Provide an inference_input_type and inference_output_type of type "
            "tf.float32.")

    if not post_training_optimize and self.inference_output_type is not None:
      raise ValueError(
          "inference_output_type is currently not supported if optimizations "
          "are not enabled.")

    converter_kwargs = {
        "inference_type": self.inference_type,
        "inference_input_type": toco_inference_input_type,
        "input_format": constants.TENSORFLOW_GRAPHDEF,
        "output_format": self.output_format,
        "quantized_input_stats": quantized_stats,
        "default_ranges_stats": self.default_ranges_stats,
        "drop_control_dependency": self.drop_control_dependency,
        "reorder_across_fake_quant": self.reorder_across_fake_quant,
        "change_concat_input_ranges": self.change_concat_input_ranges,
        "allow_custom_ops": self.allow_custom_ops,
        "post_training_quantize": weights_only_quantize_flag,
        "target_ops": self.target_ops,
        "dump_graphviz_dir": self.dump_graphviz_dir,
        "dump_graphviz_video": self.dump_graphviz_video
    }

    optimized_graph = None
    if self.inference_type == constants.QUANTIZED_UINT8:
      optimized_graph = self._graph_def
    else:
      try:
        is_only_flex_enabled = set([OpsSet.SELECT_TF_OPS]) == self.target_ops
        config = _get_grappler_config(
            enable_layout_optimizer=is_only_flex_enabled)
        optimized_graph = _run_graph_optimizations(
            self._graph_def, self._input_tensors, self._output_tensors, config)
      except Exception:
        optimized_graph = self._graph_def

    # Converts model.
    if self._has_valid_tensors():
      result = _toco_convert_impl(
          input_data=optimized_graph,
          input_tensors=self._input_tensors,
          output_tensors=self._output_tensors,
          **converter_kwargs)
    else:
      result = _toco_convert_graph_def(
          input_data=optimized_graph,
          input_arrays_with_shape=self._input_arrays_with_shape,
          output_arrays=self._output_arrays,
          **converter_kwargs)

    if self.representative_dataset and post_training_optimize:
      calibrate_quantize = _calibrator.Calibrator(result)
      result = calibrate_quantize.calibrate_and_quantize(
          self.representative_dataset.input_gen, inference_input_type,
          inference_output_type)

    return result
예제 #9
0
파일: lite.py 프로젝트: Wajih-O/tensorflow
  def convert(self):
    """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format. Either a TFLite Flatbuffer or a
      Graphviz graph depending on value in `output_format`.

    Raises:
      ValueError:
        Input shape is not specified.
        None value for dimension in input_tensor.
    """
    # Checks dimensions in input tensor.
    if self._has_valid_tensors():
      for tensor in self._input_tensors:
        shape = tensor.get_shape()
        if not shape:
          raise ValueError("Provide an input shape for input array "
                           "'{0}'.".format(_tensor_name(tensor)))
        # Note that shape_list might be empty for scalar shapes.
        shape_list = shape.as_list()
        if None in shape_list[1:]:
          raise ValueError(
              "None is only supported in the 1st dimension. Tensor '{0}' has "
              "invalid shape '{1}'.".format(_tensor_name(tensor), shape_list))
        elif shape_list and shape_list[0] is None:
          self._set_batch_size(batch_size=1)

    # Get quantization stats. Ensures there is one stat per name if the stats
    # are specified.
    if self.quantized_input_stats:
      quantized_stats = []
      invalid_stats = []
      for name in self.get_input_arrays():
        if name in self.quantized_input_stats:
          quantized_stats.append(self.quantized_input_stats[name])
        else:
          invalid_stats.append(name)

      if invalid_stats:
        raise ValueError("Quantization input stats are not available for input "
                         "tensors '{0}'.".format(",".join(invalid_stats)))
    else:
      quantized_stats = None

    converter_kwargs = {
        "inference_type": self.inference_type,
        "inference_input_type": self.inference_input_type,
        "input_format": constants.TENSORFLOW_GRAPHDEF,
        "output_format": self.output_format,
        "quantized_input_stats": quantized_stats,
        "default_ranges_stats": self.default_ranges_stats,
        "drop_control_dependency": self.drop_control_dependency,
        "reorder_across_fake_quant": self.reorder_across_fake_quant,
        "change_concat_input_ranges": self.change_concat_input_ranges,
        "allow_custom_ops": self.allow_custom_ops,
        "post_training_quantize": self.post_training_quantize,
        "target_ops": self.target_ops,
        "dump_graphviz_dir": self.dump_graphviz_dir,
        "dump_graphviz_video": self.dump_graphviz_video
    }

    optimized_graph = None
    if self.inference_type == constants.QUANTIZED_UINT8:
      optimized_graph = self._graph_def
    else:
      try:
        optimized_graph = _run_graph_optimizations(
            self._graph_def, self._input_tensors, self._output_tensors)
      except Exception:
        optimized_graph = self._graph_def

    # Converts model.
    if self._has_valid_tensors():
      result = _toco_convert_impl(
          input_data=optimized_graph,
          input_tensors=self._input_tensors,
          output_tensors=self._output_tensors,
          **converter_kwargs)
    else:
      result = _toco_convert_graph_def(
          input_data=optimized_graph,
          input_arrays_with_shape=self._input_arrays_with_shape,
          output_arrays=self._output_arrays,
          **converter_kwargs)
    return result
예제 #10
0
  def convert(self):
    """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format.

    Raises:
      ValueError:
        Multiple concrete functions are specified.
        Input shape is not specified.
        Invalid quantization parameters.
    """
    # TODO(b/130297984): Add support for converting multiple function.
    if len(self._funcs) != 1:
      raise ValueError("This converter can only convert a single "
                       "ConcreteFunction. Converting multiple functions is "
                       "under development.")

    frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
        self._funcs[0])
    input_tensors = [
        tensor for tensor in frozen_func.inputs
        if tensor.dtype != _dtypes.resource
    ]
    output_tensors = frozen_func.outputs

    # Run a Grappler pass.
    is_only_flex_enabled = set(
        [OpsSet.SELECT_TF_OPS]) == self.target_spec.supported_ops
    config = _get_grappler_config(enable_layout_optimizer=is_only_flex_enabled)
    graph_def = _run_graph_optimizations(
        frozen_func.graph.as_graph_def(),
        input_tensors,
        output_tensors,
        config,
        graph=frozen_func.graph)

    # Checks dimensions in input tensor.
    for tensor in input_tensors:
      # Note that shape_list might be empty for scalar shapes.
      shape_list = tensor.shape.as_list()
      if None in shape_list[1:]:
        raise ValueError(
            "None is only supported in the 1st dimension. Tensor '{0}' has "
            "invalid shape '{1}'.".format(_get_tensor_name(tensor), shape_list))
      elif shape_list and shape_list[0] is None:
        # Set the batch size to 1 if undefined.
        shape = tensor.shape.as_list()
        shape[0] = 1
        tensor.set_shape(shape)

    if self.representative_dataset:
      if not isinstance(self.representative_dataset, RepresentativeDataset):
        raise TypeError("`representative_dataset` must be an instance of "
                        "`RepresentativeDataset`")
      if self.representative_dataset.input_gen is None:
        raise ValueError(
            "Provide an input generator for `representative_dataset`")

    # TODO(shashishekhar): For now use optimizations order is ignored.
    # Both size and latency optimizations decide whether to apply post
    # training optimizations.
    post_training_optimize = bool(
        len(
            set(self.optimizations)
            & set([Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE])))
    # Do weights only quantization if there is no dataset for calibration.
    weights_only_quantize_flag = (
        post_training_optimize and (self.representative_dataset is None))

    converter_kwargs = {
        "input_format": constants.TENSORFLOW_GRAPHDEF,
        "allow_custom_ops": self.allow_custom_ops,
        "post_training_quantize": weights_only_quantize_flag,
        "target_ops": self.target_spec.supported_ops,
    }

    # Converts model.
    result = _toco_convert_impl(
        input_data=graph_def,
        input_tensors=input_tensors,
        output_tensors=output_tensors,
        **converter_kwargs)

    if self.representative_dataset and post_training_optimize:
      calibrate_quantize = _calibrator.Calibrator(result)
      result = calibrate_quantize.calibrate_and_quantize(
          self.representative_dataset.input_gen)

    return result
예제 #11
0
파일: lite.py 프로젝트: Harryi0/tinyML
  def convert(self):
    """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format.

    Raises:
      ValueError:
        Multiple concrete functions are specified.
        Input shape is not specified.
        Invalid quantization parameters.
    """
    # TODO(b/130297984): Add support for converting multiple function.
    if len(self._funcs) != 1:
      raise ValueError("This converter can only convert a single "
                       "ConcreteFunction. Converting multiple functions is "
                       "under development.")

    # graph_def is used here to preserve the node bug information
    frozen_func, graph_def = (
        _convert_to_constants.convert_variables_to_constants_v2_as_graph(
            self._funcs[0], lower_control_flow=False))
    input_tensors = [
        tensor for tensor in frozen_func.inputs
        if tensor.dtype != _dtypes.resource
    ]
    output_tensors = frozen_func.outputs

    # Run a Grappler pass.
    graph_def = _run_graph_optimizations(
        graph_def,
        input_tensors,
        output_tensors,
        config=self._grappler_config(),
        graph=frozen_func.graph)

    # Checks dimensions in input tensor.
    for tensor in input_tensors:
      # Note that shape_list might be empty for scalar shapes.
      shape_list = tensor.shape.as_list()
      if None in shape_list[1:]:
        raise ValueError(
            "None is only supported in the 1st dimension. Tensor '{0}' has "
            "invalid shape '{1}'.".format(_get_tensor_name(tensor), shape_list))
      elif shape_list and shape_list[0] is None:
        # Set the batch size to 1 if undefined.
        shape = tensor.shape.as_list()
        shape[0] = 1
        tensor.set_shape(shape)

    self._validate_quantization()
    self._validate_representative_dataset()
    if self._trackable_obj is None:
      self._debug_info = _get_debug_info(
          _build_debug_info_func(self._funcs[0].graph), graph_def)
    else:
      self._debug_info = _get_debug_info(
          _convert_debug_info_func(self._trackable_obj.graph_debug_info),
          graph_def)

    converter_kwargs = self._get_base_converter_args()

    if not self.experimental_new_converter:
      logging.warning(
          "Please consider switching to use new converter by setting "
          "experimental_new_converter to true. "
          "Old converter (TOCO) is deprecated and flow will be switched on "
          "by default to use new converter soon.")
    else:
      logging.info("Using experimental converter: If you encountered a problem "
                   "please file a bug. You can opt-out "
                   "by setting experimental_new_converter=False")

    # Converts model.
    result = _toco_convert_impl(
        input_data=graph_def,
        input_tensors=input_tensors,
        output_tensors=output_tensors,
        **converter_kwargs)

    if self._is_calibration_quantize():
      result = self._calibrate_quantize_model(
          result, constants.FLOAT, constants.FLOAT,
          self.experimental_new_quantizer)

    return result
예제 #12
0
  def convert(self):
    """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format. Either a TFLite Flatbuffer or a
      Graphviz graph depending on value in `output_format`.

    Raises:
      ValueError:
        Input shape is not specified.
        None value for dimension in input_tensor.
    """
    # Checks dimensions in input tensor.
    if self._has_valid_tensors():
      for tensor in self._input_tensors:
        shape = tensor.shape
        if not shape:
          raise ValueError("Provide an input shape for input array "
                           "'{0}'.".format(_get_tensor_name(tensor)))
        # Note that shape_list might be empty for scalar shapes.
        shape_list = shape.as_list()
        if None in shape_list[1:]:
          raise ValueError(
              "None is only supported in the 1st dimension. Tensor '{0}' has "
              "invalid shape '{1}'.".format(
                  _get_tensor_name(tensor), shape_list))
        elif shape_list and shape_list[0] is None:
          self._set_batch_size(batch_size=1)

    # Get quantization stats. Ensures there is one stat per name if the stats
    # are specified.
    if self.quantized_input_stats:
      quantized_stats = []
      invalid_stats = []
      for name in self.get_input_arrays():
        if name in self.quantized_input_stats:
          quantized_stats.append(self.quantized_input_stats[name])
        else:
          invalid_stats.append(name)

      if invalid_stats:
        raise ValueError("Quantization input stats are not available for input "
                         "tensors '{0}'.".format(",".join(invalid_stats)))
    else:
      quantized_stats = None

    self._validate_quantization()
    self._validate_representative_dataset()

    toco_inference_input_type = self.inference_input_type
    inference_input_type = self.inference_input_type
    inference_output_type = self.inference_output_type
    post_training_optimize = self._is_post_training_optimize()
    if post_training_optimize:
      # Post training optimizations require that TOCO outputs a float model.
      if self.inference_type != constants.FLOAT:
        raise ValueError(
            "`optimizations` require that `inference_type` is set to float.")
      toco_inference_input_type = constants.FLOAT
      # Set up default values.
      if inference_input_type is None:
        inference_input_type = constants.FLOAT
      if inference_output_type is None:
        inference_output_type = constants.FLOAT

    weight_only_quantize = self._is_int8_weight_only_quantize()
    if weight_only_quantize:
      # Currently, weight only quantization requires float inputs and outputs.
      if (inference_input_type != constants.FLOAT or
          inference_output_type != constants.FLOAT):
        raise ValueError(
            "Provide an inference_input_type and inference_output_type of type "
            "tf.float32.")

    if not post_training_optimize and self.inference_output_type is not None:
      raise ValueError(
          "inference_output_type is currently not supported if optimizations "
          "are not enabled.")

    optimized_graph = self._graph_def
    # if it is not uint8 or int8 with post-training quantization, it is not
    # quantization aware training, then graph optimization is applied.
    # Graph optimization is disabled for quantization aware training.
    if (self.inference_type != constants.QUANTIZED_UINT8 or
        (self.inference_type == constants.INT8 and
         (post_training_optimize or weight_only_quantize))):
      try:
        # Run function inlining optimization to ensure any models generated
        # through the from_frozen_graph path have been inlined.
        optimized_graph = _run_graph_optimizations(
            self._graph_def,
            self._input_tensors,
            self._output_tensors,
            config=self._grappler_config(["function"]))
      except Exception:
        optimized_graph = self._graph_def

    self._debug_info = _get_debug_info(self._debug_info_func, optimized_graph)

    converter_kwargs = self._get_base_converter_args()
    converter_kwargs.update({
        "inference_type": self.inference_type,
        "inference_input_type": toco_inference_input_type,
        "output_format": self.output_format,
        "quantized_input_stats": quantized_stats,
        "default_ranges_stats": self.default_ranges_stats,
        "drop_control_dependency": self.drop_control_dependency,
        "reorder_across_fake_quant": self.reorder_across_fake_quant,
        "change_concat_input_ranges": self.change_concat_input_ranges,
        "dump_graphviz_dir": self.dump_graphviz_dir,
        "dump_graphviz_video": self.dump_graphviz_video,
        "conversion_summary_dir": self.conversion_summary_dir,
        "custom_opdefs": self._custom_opdefs,
    })

    if not self.experimental_new_converter:
      logging.warning(
          "Please consider switching to use new converter by setting "
          "experimental_new_converter to true. "
          "Old converter (TOCO) is deprecated and flow will be switched on "
          "by default to use new converter soon.")
    else:
      logging.info("Using experimental converter: If you encountered a problem "
                   "please file a bug. You can opt-out "
                   "by setting experimental_new_converter=False")

    # Converts model.
    if self._has_valid_tensors():
      result = _toco_convert_impl(
          input_data=optimized_graph,
          input_tensors=self._input_tensors,
          output_tensors=self._output_tensors,
          **converter_kwargs)
    else:
      result = _toco_convert_graph_def(
          input_data=optimized_graph,
          input_arrays_with_shape=self._input_arrays_with_shape,
          output_arrays=self._output_arrays,
          **converter_kwargs)

    if self._is_calibration_quantize():
      result = self._calibrate_quantize_model(
          result, inference_input_type, inference_output_type,
          self.experimental_new_quantizer)

    return result
예제 #13
0
파일: lite.py 프로젝트: perfmjs/tensorflow
  def convert(self):
    """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format. Either a TFLite Flatbuffer or a
      Graphviz graph depending on value in `output_format`.

    Raises:
      ValueError:
        Input shape is not specified.
        None value for dimension in input_tensor.
    """
    # Checks dimensions in input tensor.
    if self._has_valid_tensors():
      for tensor in self._input_tensors:
        shape = tensor.shape
        if not shape:
          raise ValueError("Provide an input shape for input array "
                           "'{0}'.".format(_get_tensor_name(tensor)))
        # Note that shape_list might be empty for scalar shapes.
        shape_list = shape.as_list()
        if None in shape_list[1:]:
          raise ValueError(
              "None is only supported in the 1st dimension. Tensor '{0}' has "
              "invalid shape '{1}'.".format(
                  _get_tensor_name(tensor), shape_list))
        elif shape_list and shape_list[0] is None:
          self._set_batch_size(batch_size=1)

    # Get quantization stats. Ensures there is one stat per name if the stats
    # are specified.
    if self.quantized_input_stats:
      quantized_stats = []
      invalid_stats = []
      for name in self.get_input_arrays():
        if name in self.quantized_input_stats:
          quantized_stats.append(self.quantized_input_stats[name])
        else:
          invalid_stats.append(name)

      if invalid_stats:
        raise ValueError("Quantization input stats are not available for input "
                         "tensors '{0}'.".format(",".join(invalid_stats)))
    else:
      quantized_stats = None
    if self.representative_dataset:
      if not isinstance(self.representative_dataset, RepresentativeDataset):
        raise TypeError(
            "representative_dataset must be an instance of "
            "RepresentativeDataset")
      if self.representative_dataset.input_gen is None:
        raise ValueError(
            "Provide an input generator for representative_dataset")

    # TODO(shashishekhar): For now use optimizations order is ignored.
    # Both size and latency optimizations decide whether to apply post
    # training optimizations.
    post_training_optimize = bool(
        len(set(self.optimizations) & set([Optimize.OPTIMIZE_FOR_LATENCY,
                                           Optimize.OPTIMIZE_FOR_SIZE])))
    # Do weights only quantization if there is no dataset for calibration.
    weights_only_quantize_flag = (
        post_training_optimize and (self.representative_dataset is None))

    converter_kwargs = {
        "inference_type": self.inference_type,
        "inference_input_type": self.inference_input_type,
        "input_format": constants.TENSORFLOW_GRAPHDEF,
        "output_format": self.output_format,
        "quantized_input_stats": quantized_stats,
        "default_ranges_stats": self.default_ranges_stats,
        "drop_control_dependency": self.drop_control_dependency,
        "reorder_across_fake_quant": self.reorder_across_fake_quant,
        "change_concat_input_ranges": self.change_concat_input_ranges,
        "allow_custom_ops": self.allow_custom_ops,
        "post_training_quantize": weights_only_quantize_flag,
        "target_ops": self.target_ops,
        "dump_graphviz_dir": self.dump_graphviz_dir,
        "dump_graphviz_video": self.dump_graphviz_video
    }

    optimized_graph = None
    if self.inference_type == constants.QUANTIZED_UINT8:
      optimized_graph = self._graph_def
    else:
      try:
        is_only_flex_enabled = set([OpsSet.SELECT_TF_OPS]) == self.target_ops
        config = _get_grappler_config(
            enable_layout_optimizer=is_only_flex_enabled)
        optimized_graph = _run_graph_optimizations(
            self._graph_def, self._input_tensors, self._output_tensors, config)
      except Exception:
        optimized_graph = self._graph_def

    # Converts model.
    if self._has_valid_tensors():
      result = _toco_convert_impl(
          input_data=optimized_graph,
          input_tensors=self._input_tensors,
          output_tensors=self._output_tensors,
          **converter_kwargs)
    else:
      result = _toco_convert_graph_def(
          input_data=optimized_graph,
          input_arrays_with_shape=self._input_arrays_with_shape,
          output_arrays=self._output_arrays,
          **converter_kwargs)

    if self.representative_dataset and post_training_optimize:
      calibrate_quantize = _calibrator.Calibrator(result)
      result = calibrate_quantize.calibrate_and_quantize(
          self.representative_dataset.input_gen)

    return result
예제 #14
0
파일: lite.py 프로젝트: perfmjs/tensorflow
  def convert(self):
    """Converts a TensorFlow GraphDef based on instance variables.

    Returns:
      The converted data in serialized format.

    Raises:
      ValueError:
        Input shape is not specified.
        None value for dimension in input_tensor.
    """
    frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
        self._func)
    input_tensors = [
        tensor for tensor in frozen_func.inputs
        if tensor.dtype != _dtypes.resource
    ]
    output_tensors = frozen_func.outputs

    # Run a Grappler pass.
    is_only_flex_enabled = set(
        [OpsSet.SELECT_TF_OPS]) == self.target_spec.supported_ops
    config = _get_grappler_config(enable_layout_optimizer=is_only_flex_enabled)
    graph_def = _run_graph_optimizations(
        frozen_func.graph.as_graph_def(),
        input_tensors,
        output_tensors,
        config,
        graph=frozen_func.graph)

    # Checks dimensions in input tensor.
    for tensor in input_tensors:
      # Note that shape_list might be empty for scalar shapes.
      shape_list = tensor.shape.as_list()
      if None in shape_list[1:]:
        raise ValueError(
            "None is only supported in the 1st dimension. Tensor '{0}' has "
            "invalid shape '{1}'.".format(_get_tensor_name(tensor), shape_list))
      elif shape_list and shape_list[0] is None:
        # Set the batch size to 1 if undefined.
        shape = tensor.shape.as_list()
        shape[0] = 1
        tensor.set_shape(shape)

    if self.representative_dataset:
      if not isinstance(self.representative_dataset, RepresentativeDataset):
        raise TypeError("`representative_dataset` must be an instance of "
                        "`RepresentativeDataset`")
      if self.representative_dataset.input_gen is None:
        raise ValueError(
            "Provide an input generator for `representative_dataset`")

    # TODO(shashishekhar): For now use optimizations order is ignored.
    # Both size and latency optimizations decide whether to apply post
    # training optimizations.
    post_training_optimize = bool(
        len(
            set(self.optimizations)
            & set([Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE])))
    # Do weights only quantization if there is no dataset for calibration.
    weights_only_quantize_flag = (
        post_training_optimize and (self.representative_dataset is None))

    converter_kwargs = {
        "input_format": constants.TENSORFLOW_GRAPHDEF,
        "allow_custom_ops": self.allow_custom_ops,
        "post_training_quantize": weights_only_quantize_flag,
        "target_ops": self.target_spec.supported_ops,
    }

    # Converts model.
    result = _toco_convert_impl(
        input_data=graph_def,
        input_tensors=input_tensors,
        output_tensors=output_tensors,
        **converter_kwargs)

    if self.representative_dataset and post_training_optimize:
      calibrate_quantize = _calibrator.Calibrator(result)
      result = calibrate_quantize.calibrate_and_quantize(
          self.representative_dataset.input_gen)

    return result