コード例 #1
0
ファイル: converter.py プロジェクト: yyht/compute-engine
def convert_keras_model(model):
    """Converts a Keras model to TFLite flatbuffer.

    Returns:
      The converted data in serialized format.
    """
    if not tf.executing_eagerly():
        raise RuntimeError(
            "Graph mode is not supported. Please enable eager execution using "
            "tf.enable_eager_execution() when using TensorFlow 1.x")
    func = concrete_function_from_keras_model(model)
    if version.parse(tf.__version__) >= version.parse("1.15"):
        frozen_func = convert_variables_to_constants_v2(
            func, lower_control_flow=False)
    else:
        frozen_func = convert_variables_to_constants_v2(func)
    input_tensors = [
        tensor for tensor in frozen_func.inputs
        if tensor.dtype != tf.dtypes.resource
    ]
    output_tensors = frozen_func.outputs

    graph_def = frozen_func.graph.as_graph_def()
    # Run a constant folding using grappler since we currently don't implement
    # folding for LCE custom ops
    graph_def = run_graph_optimizations(
        graph_def,
        input_tensors,
        output_tensors,
        config=get_grappler_config(["constfold"]),
        graph=frozen_func.graph,
    )

    # Checks dimensions in input tensor.
    for tensor in input_tensors:
        # Note that shape_list might be empty for scalar shapes.
        shape_list = tensor.shape.as_list()
        if None in shape_list[1:]:
            raise ValueError(
                "None is only supported in the 1st dimension. Tensor '{0}' has "
                "invalid shape '{1}'.".format(get_tensor_name(tensor),
                                              shape_list))
        elif shape_list and shape_list[0] is None:
            # Set the batch size to 1 if undefined.
            shape = tensor.shape.as_list()
            shape[0] = 1
            tensor.set_shape(shape)

    return convert_graphdef_to_tflite_flatbuffer(
        graph_def.SerializeToString(),
        [get_tensor_name(tensor) for tensor in input_tensors],
        [
            DataType.Name(tensor.dtype.as_datatype_enum)
            for tensor in input_tensors
        ],
        [tensor.shape.as_list() for tensor in input_tensors],
        [get_tensor_name(tensor) for tensor in output_tensors],
    )
コード例 #2
0
  def testTensorName(self):
    with ops.Graph().as_default():
      in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.float32)
      out_tensors = array_ops.split(
          value=in_tensor, num_or_size_splits=[1, 1, 1, 1], axis=0)

    expect_names = ["split", "split:1", "split:2", "split:3"]
    for i in range(len(expect_names)):
      got_name = util.get_tensor_name(out_tensors[i])
      self.assertEqual(got_name, expect_names[i])
コード例 #3
0
ファイル: util_test.py プロジェクト: aritratony/tensorflow
  def testTensorName(self):
    in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.float32)
    # out_tensors should have names: "split:0", "split:1", "split:2", "split:3".
    out_tensors = array_ops.split(
        value=in_tensor, num_or_size_splits=[1, 1, 1, 1], axis=0)
    expect_names = ["split", "split:1", "split:2", "split:3"]

    for i in range(len(expect_names)):
      got_name = util.get_tensor_name(out_tensors[i])
      self.assertEqual(got_name, expect_names[i])
コード例 #4
0
def build_toco_convert_protos(input_tensors,
                              output_tensors,
                              inference_type=dtypes.float32,
                              inference_input_type=None,
                              input_format=lite_constants.TENSORFLOW_GRAPHDEF,
                              input_shapes=None,
                              output_format=lite_constants.TFLITE,
                              quantized_input_stats=None,
                              default_ranges_stats=None,
                              drop_control_dependency=True,
                              reorder_across_fake_quant=False,
                              allow_custom_ops=False,
                              change_concat_input_ranges=False,
                              post_training_quantize=False,
                              quantize_to_float16=False,
                              dump_graphviz_dir=None,
                              dump_graphviz_video=False,
                              target_ops=None,
                              allow_nonexistent_arrays=False,
                              debug_info=None,
                              conversion_summary_dir=None,
                              saved_model_dir=None,
                              saved_model_version=0,
                              saved_model_tags=None,
                              saved_model_exported_names=None,
                              select_user_tf_ops=None):
    """Builds protocol buffers describing a conversion of a model using TOCO.

  Typically this is to convert from TensorFlow GraphDef to TFLite, in which
  case the default `input_format` and `output_format` are sufficient.

  Args:
    input_tensors: List of input tensors. Type and shape are computed using
      `foo.shape` and `foo.dtype`.
    output_tensors: List of output tensors (only .name is used from this).
    inference_type: Data type of numeric arrays, excluding the input layer.
      (default tf.float32, must be in {tf.float32, tf.int8, tf.uint8})
    inference_input_type: Data type of the numeric arrays in the input layer. If
      `inference_input_type` is in {tf.int8, tf.uint8}, then
      `quantized_input_stats` must be provided. (default is the value assigned
      to `inference_type`, must be in {tf.float32, tf.int8, tf.uint8})
    input_format: Type of data to read.
      (default TENSORFLOW_GRAPHDEF, must be in {TENSORFLOW_GRAPHDEF})
    input_shapes: Input array shape. (default None, must be None or a list of
      the same length as `input_tensors`.)
    output_format: Output file format. (default TFLITE, must be in
    {TFLITE, GRAPHVIZ_DOT})
    quantized_input_stats: Map of input tensor names to a tuple of floats
      representing the mean and standard deviation of the training data.
      (e.g., {"foo" : (0., 1.)}). Required if `inference_input_type` is tf.int8
        or tf.uint8. (default None)
    default_ranges_stats: Tuple of integers representing (min, max) range values
      for all arrays without a specified range. Intended for experimenting with
      quantization via "dummy quantization". (default None)
    drop_control_dependency: Boolean indicating whether to drop control
      dependencies silently. This is due to TFLite not supporting control
      dependencies. (default True)
    reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
      nodes in unexpected locations. Used when the location of the FakeQuant
      nodes is preventing graph transformations necessary to convert the graph.
      Results in a graph that differs from the quantized training graph,
      potentially causing differing arithmetic behavior. (default False)
    allow_custom_ops: Boolean indicating whether to allow custom operations.
      When false any unknown operation is an error. When true, custom ops are
      created for any op that is unknown. The developer will need to provide
      these to the TensorFlow Lite runtime with a custom resolver. (default
      False)
    change_concat_input_ranges: Boolean to change behavior of min/max ranges for
      inputs and outputs of the concat operator for quantized models. Changes
      the ranges of concat operator overlap when true. (default False)
    post_training_quantize: Boolean indicating whether to quantize the weights
      of the converted float model. Model size will be reduced and there will be
      latency improvements (at the cost of accuracy). (default False)
    quantize_to_float16: Boolean indicating whether to convert float buffers to
      float16. (default False)
    dump_graphviz_dir: Full filepath of folder to dump the graphs at various
      stages of processing GraphViz .dot files. Preferred over
      --output_format=GRAPHVIZ_DOT in order to keep the requirements of the
      output file. (default None)
    dump_graphviz_video: Boolean indicating whether to dump the graph after
      every graph transformation. (default False)
    target_ops: Experimental flag, subject to change. Set of OpsSet options
      indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS]))
    allow_nonexistent_arrays: Allow specifying array names that don't exist or
      are unused in the final graph. (default False)
    debug_info: `GraphDebugInfo` proto containing the stack traces for the
      original nodes referred by the converted graph.
    conversion_summary_dir: A string, the path to the generated conversion logs.
    saved_model_dir: Filepath of the saved model to be converted. This value
      will be non-empty only when the saved model import path will be used.
      Otherwises, the graph def-based conversion will be processed.
    saved_model_version: SavedModel file format version of The saved model file
      to be converted. This value will be set only when the SavedModel import
      path will be used.
    saved_model_tags: Set of string saved model tags, formatted in the
      comma-separated value. This value will be set only when the SavedModel
      import path will be used.
    saved_model_exported_names: Names to be exported (default: export all) when
      the saved model import path is on. This value will be set only when the
      SavedModel import path will be used.
    select_user_tf_ops: List of user's defined TensorFlow ops need to be
      supported in the TensorFlow Lite runtime. These ops will be supported as
      select TensorFlow ops.

  Returns:
    model_flags, toco_flags, debug_info: three protocol buffers describing the
      conversion process and debug information.

  Raises:
    ValueError:
      If the input tensor type is unknown
      Missing mean_values or std_dev_values
    RuntimeError: If TOCO fails to convert (in which case the runtime error's
      error text will contain the TOCO error log)
  """
    toco = build_toco_flags(inference_type, inference_input_type, input_format,
                            output_format, default_ranges_stats,
                            drop_control_dependency, reorder_across_fake_quant,
                            allow_custom_ops, post_training_quantize,
                            quantize_to_float16, dump_graphviz_dir,
                            dump_graphviz_video, target_ops,
                            conversion_summary_dir, select_user_tf_ops)
    model = _model_flags_pb2.ModelFlags()
    model.change_concat_input_ranges = change_concat_input_ranges
    for idx, input_tensor in enumerate(input_tensors):
        input_array = model.input_arrays.add()
        if saved_model_dir:
            input_array.name = input_tensor.name
        else:
            input_array.name = util.get_tensor_name(input_tensor)
        input_array.data_type = convert_tensor_tf_type_to_tflite_type(
            input_tensor.dtype, usage="input type of the TensorFlow model")

        if _requires_input_stats(toco) and quantized_input_stats:
            input_array.mean_value, input_array.std_value = quantized_input_stats[
                idx]

        if input_shapes is None:
            shape = input_tensor.shape
        else:
            shape = input_shapes[idx]

        if shape.rank is not None:
            # Create shapes with -1 for unknown dimensions.
            dims = []
            for dim in shape:
                if (dim is None or (isinstance(dim, tensor_shape.Dimension)
                                    and dim.value is None)):
                    dims.append(-1)
                else:
                    dims.append(int(dim))
            input_array.shape.dims.extend(dims)
            input_array.shape.unknown_rank = False
        else:
            input_array.shape.unknown_rank = True

    for output_tensor in output_tensors:
        if saved_model_dir:
            model.output_arrays.append(output_tensor.name)
        else:
            model.output_arrays.append(util.get_tensor_name(output_tensor))

    model.allow_nonexistent_arrays = allow_nonexistent_arrays

    if saved_model_dir:
        model.saved_model_dir = saved_model_dir
    model.saved_model_version = saved_model_version
    if saved_model_tags:
        model.saved_model_tags.extend(saved_model_tags)
    if saved_model_exported_names:
        model.saved_model_exported_names.extend(saved_model_exported_names)

    return model, toco, debug_info
コード例 #5
0
ファイル: convert.py プロジェクト: yuan89623/tensorflow
def build_toco_convert_protos(input_tensors,
                              output_tensors,
                              inference_type=lite_constants.FLOAT,
                              inference_input_type=None,
                              input_format=lite_constants.TENSORFLOW_GRAPHDEF,
                              input_shapes=None,
                              output_format=lite_constants.TFLITE,
                              quantized_input_stats=None,
                              default_ranges_stats=None,
                              drop_control_dependency=True,
                              reorder_across_fake_quant=False,
                              allow_custom_ops=False,
                              custom_opdefs=None,
                              change_concat_input_ranges=False,
                              post_training_quantize=False,
                              quantize_to_float16=False,
                              dump_graphviz_dir=None,
                              dump_graphviz_video=False,
                              target_ops=None,
                              allow_nonexistent_arrays=False,
                              debug_info=None,
                              conversion_summary_dir=None,
                              saved_model_dir=None,
                              saved_model_version=0,
                              saved_model_tags=None,
                              saved_model_exported_names=None):
    """Builds protocol buffers describing a conversion of a model using TOCO.

  Typically this is to convert from TensorFlow GraphDef to TFLite, in which
  case the default `input_format` and `output_format` are sufficient.

  Args:
    input_tensors: List of input tensors. Type and shape are computed using
      `foo.shape` and `foo.dtype`.
    output_tensors: List of output tensors (only .name is used from this).
    inference_type: Target data type of real-number arrays in the output file.
      Must be `{tf.float32, tf.uint8, tf.int8}`.  (default tf.float32)
    inference_input_type: Target data type of real-number input arrays. Allows
      for a different type for input arrays in the case of quantization. Must be
      `{tf.float32, tf.uint8, tf.int8}`. (default `inference_type`)
    input_format: Type of data to read Currently must be
      `{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
    input_shapes: Input array shape. It needs to be a list of the same length as
      `input_tensors`, or None. (default None)
    output_format: Output file format. Currently must be `{TFLITE,
      GRAPHVIZ_DOT}`. (default TFLITE)
    quantized_input_stats: List of tuples of floats representing the mean and
      standard deviation. Each tuple maps to the corresponding input tensor.
      Only need if `inference_input_type` is `QUANTIZED_UINT8` or `INT8`.
      real_input_value = (quantized_input_value - mean_value) / std_dev_value.
      (default None)
    default_ranges_stats: Tuple of integers representing (min, max) range values
      for all arrays without a specified range. Intended for experimenting with
      quantization via "dummy quantization". (default None)
    drop_control_dependency: Boolean indicating whether to drop control
      dependencies silently. This is due to TFLite not supporting control
      dependencies. (default True)
    reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
      nodes in unexpected locations. Used when the location of the FakeQuant
      nodes is preventing graph transformations necessary to convert the graph.
      Results in a graph that differs from the quantized training graph,
      potentially causing differing arithmetic behavior. (default False)
    allow_custom_ops: Boolean indicating whether to allow custom operations.
      When false any unknown operation is an error. When true, custom ops are
      created for any op that is unknown. The developer will need to provide
      these to the TensorFlow Lite runtime with a custom resolver. (default
      False)
    custom_opdefs: List of strings representing custom ops OpDefs that are
      included in the GraphDef. Required when using custom operations with the
      MLIR-based converter. (default None)
    change_concat_input_ranges: Boolean to change behavior of min/max ranges for
      inputs and outputs of the concat operator for quantized models. Changes
      the ranges of concat operator overlap when true. (default False)
    post_training_quantize: Boolean indicating whether to quantize the weights
      of the converted float model. Model size will be reduced and there will be
      latency improvements (at the cost of accuracy). (default False)
    quantize_to_float16: Boolean indicating whether to convert float buffers to
      float16. (default False)
    dump_graphviz_dir: Full filepath of folder to dump the graphs at various
      stages of processing GraphViz .dot files. Preferred over
      --output_format=GRAPHVIZ_DOT in order to keep the requirements of the
      output file. (default None)
    dump_graphviz_video: Boolean indicating whether to dump the graph after
      every graph transformation. (default False)
    target_ops: Experimental flag, subject to change. Set of OpsSet options
      indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS]))
    allow_nonexistent_arrays: Allow specifying array names that don't exist or
      are unused in the final graph. (default False)
    debug_info: `GraphDebugInfo` proto containing the stack traces for the
      original nodes referred by the converted graph.
    conversion_summary_dir: A string, the path to the generated conversion logs.
    saved_model_dir: Filepath of the saved model to be converted. This value
      will be non-empty only when the saved model import path will be used.
      Otherwises, the graph def-based conversion will be processed.
    saved_model_version: SavedModel file format version of The saved model file
      to be converted. This value will be set only when the SavedModel import
      path will be used.
    saved_model_tags: Set of string saved model tags, formatted in the
      comma-separated value. This value will be set only when the SavedModel
      import path will be used.
    saved_model_exported_names: Names to be exported (default: export all) when
      the saved model import path is on. This value will be set only when the
      SavedModel import path will be used.

  Returns:
    model_flags, toco_flags, debug_info: three protocol buffers describing the
      conversion process and debug information.

  Raises:
    ValueError:
      If the input tensor type is unknown
      Missing mean_values or std_dev_values
    RuntimeError: If TOCO fails to convert (in which case the runtime error's
      error text will contain the TOCO error log)
  """
    toco = _toco_flags_pb2.TocoFlags()
    toco.input_format = input_format
    toco.output_format = output_format
    toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)
    if inference_input_type:
        toco.inference_input_type = util.convert_dtype_to_tflite_type(
            inference_input_type)
    else:
        toco.inference_input_type = toco.inference_type
    toco.drop_control_dependency = drop_control_dependency
    toco.reorder_across_fake_quant = reorder_across_fake_quant
    toco.allow_custom_ops = allow_custom_ops
    if custom_opdefs:
        toco.custom_opdefs.extend(custom_opdefs)
    toco.post_training_quantize = post_training_quantize
    toco.quantize_to_float16 = quantize_to_float16
    if default_ranges_stats:
        toco.default_ranges_min = default_ranges_stats[0]
        toco.default_ranges_max = default_ranges_stats[1]
    if dump_graphviz_dir:
        toco.dump_graphviz_dir = dump_graphviz_dir
    toco.dump_graphviz_include_video = dump_graphviz_video
    if conversion_summary_dir:
        toco.conversion_summary_dir = conversion_summary_dir
    if target_ops:
        if set(target_ops) == set(
            [OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):
            toco.enable_select_tf_ops = True
        elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
            toco.enable_select_tf_ops = True
            toco.force_select_tf_ops = True

    model = _model_flags_pb2.ModelFlags()
    model.change_concat_input_ranges = change_concat_input_ranges
    for idx, input_tensor in enumerate(input_tensors):
        input_array = model.input_arrays.add()
        input_array.name = util.get_tensor_name(input_tensor)
        input_array.data_type = util.convert_dtype_to_tflite_type(
            input_tensor.dtype)

        if _requires_input_stats(toco) and quantized_input_stats:
            input_array.mean_value, input_array.std_value = quantized_input_stats[
                idx]

        if input_shapes is None:
            shape = input_tensor.shape
        else:
            shape = input_shapes[idx]

        # Create shapes with -1 for unknown dimensions.
        dims = []
        for dim in shape:
            if (dim is None or (isinstance(dim, tensor_shape.Dimension)
                                and dim.value is None)):
                dims.append(-1)
            else:
                dims.append(int(dim))
        input_array.shape.dims.extend(dims)

    for output_tensor in output_tensors:
        model.output_arrays.append(util.get_tensor_name(output_tensor))

    model.allow_nonexistent_arrays = allow_nonexistent_arrays

    if saved_model_dir:
        model.saved_model_dir = saved_model_dir
    model.saved_model_version = saved_model_version
    if saved_model_tags:
        model.saved_model_tags.extend(saved_model_tags)
    if saved_model_exported_names:
        model.saved_model_exported_names.extend(saved_model_exported_names)

    return model, toco, debug_info
コード例 #6
0
def convert_keras_model(
    model: tf.keras.Model,
    *,  # Require remaining arguments to be keyword-only.
    inference_input_type: tf.DType = tf.float32,
    inference_output_type: tf.DType = tf.float32,
    target: str = "arm",
    experimental_default_int8_range: Optional[Tuple[float, float]] = None,
    experimental_enable_bitpacked_activations: bool = False,
) -> bytes:
    """Converts a Keras model to TFLite flatbuffer.

    !!! example
        ```python
        tflite_model = convert_keras_model(model)
        with open("/tmp/my_model.tflite", "wb") as f:
            f.write(tflite_model)
        ```

    # Arguments
        model: The model to convert.
        inference_input_type: Data type of the input layer. Defaults to `tf.float32`,
            must be either `tf.float32` or `tf.int8`.
        inference_output_type: Data type of the output layer. Defaults to `tf.float32`,
            must be either `tf.float32` or `tf.int8`.
        target: Target hardware platform. Must be "arm" or "xcore".
        experimental_default_int8_range: Tuple of integers representing `(min, max)`
            range values for all arrays without a specified range. Intended for
            experimenting with quantization via "dummy quantization". (default None)
        experimental_enable_bitpacked_activations: Enable an experimental
            converter optimisation that attempts to reduce intermediate
            activation memory usage by bitpacking the activation tensor between
            consecutive binary convolutions where possible.

    # Returns
        The converted data in serialized format.
    """
    if not isinstance(model, tf.keras.Model):
        raise ValueError(
            f"Expected `model` argument to be a `tf.keras.Model` instance, got `{model}`."
        )
    if inference_input_type not in (tf.float32, tf.int8):
        raise ValueError(
            "Expected `inference_input_type` to be either `tf.float32` or `tf.int8`, "
            f"got {inference_input_type}.")
    if inference_output_type not in (tf.float32, tf.int8):
        raise ValueError(
            "Expected `inference_output_type` to be either `tf.float32` or `tf.int8`, "
            f"got {inference_output_type}.")
    if target not in ("arm", "xcore"):
        raise ValueError(
            f'Expected `target` to be "arm" or "xcore", but got {target}.')

    if not tf.executing_eagerly():
        raise RuntimeError(
            "Graph mode is not supported. Please enable eager execution using "
            "tf.enable_eager_execution() when using TensorFlow 1.x")
    if experimental_default_int8_range:
        warnings.warn(
            "Using `experimental_default_int8_range` as fallback quantization stats. "
            "This should only be used for latency tests.")
    if hasattr(model, "dtype_policy") and model.dtype_policy.name != "float32":
        raise RuntimeError(
            "Mixed precision float16 models are not supported by the TFLite converter, "
            "please convert them to float32 first. See also: "
            "https://github.com/tensorflow/tensorflow/issues/46380")
    func = concrete_function_from_keras_model(model)
    if version.parse(tf.__version__) >= version.parse("1.15"):
        frozen_func = convert_variables_to_constants_v2(
            func, lower_control_flow=False)
    else:
        frozen_func = convert_variables_to_constants_v2(func)
    input_tensors = [
        tensor for tensor in frozen_func.inputs
        if tensor.dtype != tf.dtypes.resource
    ]
    output_tensors = frozen_func.outputs

    graph_def = frozen_func.graph.as_graph_def()
    should_quantize = (_contains_training_quant_op(graph_def)
                       or experimental_default_int8_range is not None)

    # Checks dimensions in input tensor.
    for tensor in input_tensors:
        # Note that shape_list might be empty for scalar shapes.
        shape_list = tensor.shape.as_list()
        if None in shape_list[1:]:
            raise ValueError(
                "None is only supported in the 1st dimension. Tensor '{0}' has "
                "invalid shape '{1}'.".format(get_tensor_name(tensor),
                                              shape_list))
        elif shape_list and shape_list[0] is None:
            # Set the batch size to 1 if undefined.
            shape = tensor.shape.as_list()
            shape[0] = 1
            tensor.set_shape(shape)

    tflite_buffer = convert_graphdef_to_tflite_flatbuffer(
        graph_def.SerializeToString(),
        [get_tensor_name(tensor) for tensor in input_tensors],
        [
            DataType.Name(tensor.dtype.as_datatype_enum)
            for tensor in input_tensors
        ],
        [tensor.shape.as_list() for tensor in input_tensors],
        [get_tensor_name(tensor) for tensor in output_tensors],
        should_quantize,
        target,
        experimental_default_int8_range,
        experimental_enable_bitpacked_activations,
    )
    if should_quantize and (inference_input_type != tf.float32
                            or inference_output_type != tf.float32):
        tflite_buffer = modify_integer_quantized_model_io_type(
            tflite_buffer,
            inference_input_type=inference_input_type,
            inference_output_type=inference_output_type,
        )

    return tflite_buffer
コード例 #7
0
ファイル: convert.py プロジェクト: zhuochenKIDD/tensorflow
def convert_graphdef(input_data, input_tensors, output_tensors, **kwargs):
  """Convert a frozen GraphDef model using the TF Lite converter.

  Conversion can be customized by providing arguments that are forwarded to
  `build_model_flags` and `build_conversion_flags` (see documentation).

  Args:
    input_data: Input data (i.e. often `sess.graph_def`),
   input_tensors: List of input tensors. Type and shape are computed using
      `foo.shape` and `foo.dtype`.
    output_tensors: List of output tensors (only .name is used from this).
    **kwargs: See `build_model_flags` and `build_conversion_flags`.

  Returns:
    The converted data. For example if TFLite was the destination, then
    this will be a tflite flatbuffer in a bytes array.

  Raises:
    Defined in `build_conversion_flags`.
  """
  model_flags = build_model_flags(**kwargs)
  conversion_flags = build_conversion_flags(**kwargs)
  saved_model_dir = kwargs.get("saved_model_dir", None)
  input_shapes = kwargs.get("input_shapes", None)
  enable_mlir_converter = kwargs.get("enable_mlir_converter", True)
  quantized_input_stats = kwargs.get("quantized_input_stats", None)
  debug_info = kwargs.get("debug_info", None)

  for idx, input_tensor in enumerate(input_tensors):
    input_array = model_flags.input_arrays.add()
    if saved_model_dir:
      input_array.name = input_tensor.name
    else:
      input_array.name = util.get_tensor_name(input_tensor)
    input_array.data_type = convert_tensor_tf_type_to_tflite_type(
        input_tensor.dtype, usage="input type of the TensorFlow model")

    if _is_quantized_input_stats_required(conversion_flags):
      if quantized_input_stats:
        input_array.mean_value, input_array.std_value = (
            quantized_input_stats[idx])
      else:
        # We should ideally raise an error here, but we don't as it would break
        # several models/projects that depend on this workflow.
        warnings.warn("Statistics for quantized inputs were expected, but not "
                      "specified; continuing anyway.")

    if input_shapes is None:
      shape = input_tensor.shape
    else:
      shape = input_shapes[idx]

    if shape.rank is not None:
      # Create shapes with -1 for unknown dimensions.
      dims = []
      for dim in shape:
        if (dim is None or
            (isinstance(dim, tensor_shape.Dimension) and dim.value is None)):
          dims.append(-1)
        else:
          dims.append(int(dim))
      input_array.shape.dims.extend(dims)
      input_array.shape.unknown_rank = False
    else:
      input_array.shape.unknown_rank = True

  for output_tensor in output_tensors:
    if saved_model_dir:
      model_flags.output_arrays.append(output_tensor.name)
    else:
      model_flags.output_arrays.append(util.get_tensor_name(output_tensor))

  data = convert(
      model_flags.SerializeToString(),
      conversion_flags.SerializeToString(),
      input_data.SerializeToString(),
      debug_info_str=debug_info.SerializeToString() if debug_info else None,
      enable_mlir_converter=enable_mlir_converter)
  return data
コード例 #8
0
ファイル: converter.py プロジェクト: honglh/compute-engine
def convert_keras_model(
    model: tf.keras.Model,
    *,  # Require remaining arguments to be keyword-only.
    experimental_default_int8_range: Optional[Tuple[float, float]] = None,
    experimental_enable_bitpacked_activations: bool = False,
) -> bytes:
    """Converts a Keras model to TFLite flatbuffer.

    !!! example
        ```python
        tflite_model = convert_keras_model(model)
        with open("/tmp/my_model.tflite", "wb") as f:
            f.write(tflite_model)
        ```

    # Arguments
        model: The model to convert.
        experimental_default_int8_range: Tuple of integers representing `(min, max)`
            range values for all arrays without a specified range. Intended for
            experimenting with quantization via "dummy quantization". (default None)
        experimental_enable_bitpacked_activations: Enable an experimental
            converter optimisation that attempts to reduce intermediate
            activation memory usage by bitpacking the activation tensor between
            consecutive binary convolutions where possible.

    # Returns
        The converted data in serialized format.
    """
    if not isinstance(model, tf.keras.Model):
        raise ValueError(
            f"Expected `model` argument to be a `tf.keras.Model` instance, got `{model}`."
        )
    if not tf.executing_eagerly():
        raise RuntimeError(
            "Graph mode is not supported. Please enable eager execution using "
            "tf.enable_eager_execution() when using TensorFlow 1.x")
    if experimental_default_int8_range:
        warnings.warn(
            "Using `experimental_default_int8_range` as fallback quantization stats. "
            "This should only be used for latency tests.")
    func = concrete_function_from_keras_model(model)
    if version.parse(tf.__version__) >= version.parse("1.15"):
        frozen_func = convert_variables_to_constants_v2(
            func, lower_control_flow=False)
    else:
        frozen_func = convert_variables_to_constants_v2(func)
    input_tensors = [
        tensor for tensor in frozen_func.inputs
        if tensor.dtype != tf.dtypes.resource
    ]
    output_tensors = frozen_func.outputs

    graph_def = frozen_func.graph.as_graph_def()
    should_quantize = (_contains_training_quant_op(graph_def)
                       or experimental_default_int8_range is not None)

    # Checks dimensions in input tensor.
    for tensor in input_tensors:
        # Note that shape_list might be empty for scalar shapes.
        shape_list = tensor.shape.as_list()
        if None in shape_list[1:]:
            raise ValueError(
                "None is only supported in the 1st dimension. Tensor '{0}' has "
                "invalid shape '{1}'.".format(get_tensor_name(tensor),
                                              shape_list))
        elif shape_list and shape_list[0] is None:
            # Set the batch size to 1 if undefined.
            shape = tensor.shape.as_list()
            shape[0] = 1
            tensor.set_shape(shape)

    return convert_graphdef_to_tflite_flatbuffer(
        graph_def.SerializeToString(),
        [get_tensor_name(tensor) for tensor in input_tensors],
        [
            DataType.Name(tensor.dtype.as_datatype_enum)
            for tensor in input_tensors
        ],
        [tensor.shape.as_list() for tensor in input_tensors],
        [get_tensor_name(tensor) for tensor in output_tensors],
        should_quantize,
        experimental_default_int8_range,
        experimental_enable_bitpacked_activations,
    )
コード例 #9
0
ファイル: convert.py プロジェクト: aritratony/tensorflow
def build_toco_convert_protos(input_tensors,
                              output_tensors,
                              inference_type=lite_constants.FLOAT,
                              inference_input_type=None,
                              input_format=lite_constants.TENSORFLOW_GRAPHDEF,
                              input_shapes=None,
                              output_format=lite_constants.TFLITE,
                              quantized_input_stats=None,
                              default_ranges_stats=None,
                              drop_control_dependency=True,
                              reorder_across_fake_quant=False,
                              allow_custom_ops=False,
                              change_concat_input_ranges=False,
                              post_training_quantize=False,
                              dump_graphviz_dir=None,
                              dump_graphviz_video=False,
                              target_ops=None,
                              allow_nonexistent_arrays=False):
  """Builds protocol buffers describing a conversion of a model using TOCO.

  Typically this is to convert from TensorFlow GraphDef to TFLite, in which
  case the default `input_format` and `output_format` are sufficient.

  Args:
    input_tensors: List of input tensors. Type and shape are computed using
      `foo.shape` and `foo.dtype`.
    output_tensors: List of output tensors (only .name is used from this).
    inference_type: Target data type of real-number arrays in the output file.
      Must be `{tf.float32, tf.uint8}`.  (default tf.float32)
    inference_input_type: Target data type of real-number input arrays. Allows
      for a different type for input arrays in the case of quantization.
      Must be `{tf.float32, tf.uint8}`. (default `inference_type`)
    input_format: Type of data to read Currently must be
      `{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
    input_shapes: Input array shape. It needs to be a list of the same length
      as `input_tensors`, or None. (default None)
    output_format: Output file format. Currently must be `{TFLITE,
      GRAPHVIZ_DOT}`. (default TFLITE)
    quantized_input_stats: List of tuples of floats representing the mean and
      standard deviation. Each tuple maps to the corresponding input tensor.
      Only need if `inference_input_type` is `QUANTIZED_UINT8`.
      real_input_value = (quantized_input_value - mean_value) / std_dev_value.
      (default None)
    default_ranges_stats: Tuple of integers representing (min, max) range values
      for all arrays without a specified range. Intended for experimenting with
      quantization via "dummy quantization". (default None)
    drop_control_dependency: Boolean indicating whether to drop control
      dependencies silently. This is due to TFLite not supporting control
      dependencies. (default True)
    reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
      nodes in unexpected locations. Used when the location of the FakeQuant
      nodes is preventing graph transformations necessary to convert the graph.
      Results in a graph that differs from the quantized training graph,
      potentially causing differing arithmetic behavior. (default False)
    allow_custom_ops: Boolean indicating whether to allow custom operations.
      When false any unknown operation is an error. When true, custom ops are
      created for any op that is unknown. The developer will need to provide
      these to the TensorFlow Lite runtime with a custom resolver.
      (default False)
    change_concat_input_ranges: Boolean to change behavior of min/max ranges for
      inputs and outputs of the concat operator for quantized models. Changes
      the ranges of concat operator overlap when true. (default False)
    post_training_quantize: Boolean indicating whether to quantize the weights
      of the converted float model. Model size will be reduced and there will be
      latency improvements (at the cost of accuracy).
      (default False)
    dump_graphviz_dir: Full filepath of folder to dump the graphs at various
      stages of processing GraphViz .dot files. Preferred over
      --output_format=GRAPHVIZ_DOT in order to keep the requirements of the
      output file. (default None)
    dump_graphviz_video: Boolean indicating whether to dump the graph after
      every graph transformation. (default False)
    target_ops: Experimental flag, subject to change. Set of OpsSet
      options indicating which converter to use.
      (default set([OpsSet.TFLITE_BUILTINS]))
    allow_nonexistent_arrays: Allow specifying array names that don't exist
      or are unused in the final graph. (default False)

  Returns:
    model_flags, toco_flags: two protocol buffers describing the conversion
    process.

  Raises:
    ValueError:
      If the input tensor type is unknown
      Missing mean_values or std_dev_values
    RuntimeError: If TOCO fails to convert (in which case the runtime error's
      error text will contain the TOCO error log)
  """
  toco = _toco_flags_pb2.TocoFlags()
  toco.input_format = input_format
  toco.output_format = output_format
  toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)
  if inference_input_type:
    toco.inference_input_type = util.convert_dtype_to_tflite_type(
        inference_input_type)
  else:
    toco.inference_input_type = toco.inference_type
  toco.drop_control_dependency = drop_control_dependency
  toco.reorder_across_fake_quant = reorder_across_fake_quant
  toco.allow_custom_ops = allow_custom_ops
  toco.post_training_quantize = post_training_quantize
  if default_ranges_stats:
    toco.default_ranges_min = default_ranges_stats[0]
    toco.default_ranges_max = default_ranges_stats[1]
  if dump_graphviz_dir:
    toco.dump_graphviz_dir = dump_graphviz_dir
  toco.dump_graphviz_include_video = dump_graphviz_video
  if target_ops:
    if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):
      toco.enable_select_tf_ops = True
    elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
      toco.enable_select_tf_ops = True
      toco.force_select_tf_ops = True

  model = _model_flags_pb2.ModelFlags()
  model.change_concat_input_ranges = change_concat_input_ranges
  for idx, input_tensor in enumerate(input_tensors):
    input_array = model.input_arrays.add()
    input_array.name = util.get_tensor_name(input_tensor)
    input_array.data_type = util.convert_dtype_to_tflite_type(
        input_tensor.dtype)

    if toco.inference_input_type == _types_pb2.QUANTIZED_UINT8:
      if not quantized_input_stats:
        raise ValueError("std_dev and mean must be defined when "
                         "inference_input_type is QUANTIZED_UINT8.")
      input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
    if input_shapes is None:
      shape = input_tensor.shape
    else:
      shape = input_shapes[idx]
    input_array.shape.dims.extend(map(int, shape))

  for output_tensor in output_tensors:
    model.output_arrays.append(util.get_tensor_name(output_tensor))

  model.allow_nonexistent_arrays = allow_nonexistent_arrays

  return model, toco