示例#1
0
def saved_model_to_frozen_graphdef(
        saved_model_dir,
        output_file_model,
        output_file_flags,
        input_arrays=None,
        input_shapes=None,
        output_arrays=None,
        tag_set=None,
        signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
        batch_size=1):
    """Converts a SavedModel to a frozen graph. Writes graph to tmp directory.

  Stores frozen graph and command line flags in the tmp directory.

  Args:
    saved_model_dir: SavedModel directory to convert.
    output_file_model: Full file path to save frozen graph.
    output_file_flags: Full file path to save ModelFlags.
    input_arrays: List of input tensors to freeze graph with. Uses input arrays
      from SignatureDef when none are provided. (default None)
    input_shapes: Map of strings representing input tensor names to list of
      integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
      Automatically determined when input shapes is None (e.g., {"foo" : None}).
      (default None)
    output_arrays: List of output tensors to freeze graph with. Uses output
      arrays from SignatureDef when none are provided. (default None)
    tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
      analyze. All tags in the tag set must be present. (default "serve")
    signature_key: Key identifying SignatureDef containing inputs and outputs.
    batch_size: Batch size for the model. Replaces the first dimension of an
      input size array if undefined. (default 1)

  Returns: None.

  Raises:
    ValueError: Unable to convert to frozen graph.
  """
    frozen_graph_def, in_tensors, out_tensors = _freeze_saved_model(
        saved_model_dir, input_arrays, input_shapes, output_arrays, tag_set,
        signature_key, batch_size)

    # Initialize model flags.
    model = model_flags_pb2.ModelFlags()

    for input_tensor in in_tensors:
        input_array = model.input_arrays.add()
        input_array.name = convert.tensor_name(input_tensor)
        input_array.shape.dims.extend(map(int, input_tensor.get_shape()))

    for output_tensor in out_tensors:
        model.output_arrays.append(convert.tensor_name(output_tensor))

    # Write model and ModelFlags to file. ModelFlags contain input array and
    # output array information that is parsed from the SignatureDef and used for
    # analysis by TOCO.
    _write_and_flush_file(output_file_model,
                          frozen_graph_def.SerializeToString())
    _write_and_flush_file(output_file_flags, model.SerializeToString())
    def _run(self, sess, in_tensor, out_tensor, should_succeed):
        """Use toco binary to check conversion from graphdef to tflite.

    Args:
      sess: Active TensorFlow session containing graph.
      in_tensor: TensorFlow tensor to use as input.
      out_tensor: TensorFlow tensor to use as output.
      should_succeed: Whether this is a valid conversion.
    """
        # Build all protos and extract graphdef
        graph_def = sess.graph_def
        toco_flags = toco_flags_pb2.TocoFlags()
        toco_flags.input_format = toco_flags_pb2.TENSORFLOW_GRAPHDEF
        toco_flags.output_format = toco_flags_pb2.TFLITE
        toco_flags.inference_input_type = types_pb2.FLOAT
        toco_flags.inference_type = types_pb2.FLOAT
        toco_flags.allow_custom_ops = True
        model_flags = model_flags_pb2.ModelFlags()
        input_array = model_flags.input_arrays.add()
        input_array.name = TensorName(in_tensor)
        input_array.shape.dims.extend(map(int, in_tensor.get_shape()))
        model_flags.output_arrays.append(TensorName(out_tensor))
        # Shell out to run toco (in case it crashes)
        with tempfile.NamedTemporaryFile() as fp_toco, \
               tempfile.NamedTemporaryFile() as fp_model, \
               tempfile.NamedTemporaryFile() as fp_input, \
               tempfile.NamedTemporaryFile() as fp_output:
            fp_model.write(model_flags.SerializeToString())
            fp_toco.write(toco_flags.SerializeToString())
            fp_input.write(graph_def.SerializeToString())
            fp_model.flush()
            fp_toco.flush()
            fp_input.flush()
            tflite_bin = resource_loader.get_path_to_datafile(
                "toco_from_protos")
            cmdline = " ".join([
                tflite_bin, fp_model.name, fp_toco.name, fp_input.name,
                fp_output.name
            ])
            exitcode = os.system(cmdline)
            if exitcode == 0:
                stuff = fp_output.read()
                self.assertEqual(stuff is not None, should_succeed)
            else:
                self.assertFalse(should_succeed)
示例#3
0
def build_toco_convert_protos(input_tensors,
                              output_tensors,
                              inference_type=lite_constants.FLOAT,
                              inference_input_type=None,
                              input_format=lite_constants.TENSORFLOW_GRAPHDEF,
                              input_shapes=None,
                              output_format=lite_constants.TFLITE,
                              quantized_input_stats=None,
                              default_ranges_stats=None,
                              drop_control_dependency=True,
                              reorder_across_fake_quant=False,
                              allow_custom_ops=False,
                              change_concat_input_ranges=False,
                              quantize_weights=False,
                              dump_graphviz_dir=None,
                              dump_graphviz_video=False):
  """Builds protocol buffers describing a conversion of a model using TOCO.

  Typically this is to convert from TensorFlow GraphDef to TFLite, in which
  case the default `input_format` and `output_format` are sufficient.

  Args:
    input_tensors: List of input tensors. Type and shape are computed using
      `foo.get_shape()` and `foo.dtype`.
    output_tensors: List of output tensors (only .name is used from this).
    inference_type: Target data type of real-number arrays in the output file.
      Must be `{FLOAT, QUANTIZED_UINT8}`.  (default FLOAT)
    inference_input_type: Target data type of real-number input arrays. Allows
      for a different type for input arrays in the case of quantization.
      Must be `{FLOAT, QUANTIZED_UINT8}`. (default `inference_type`)
    input_format: Type of data to read Currently must be
      `{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
    input_shapes: Input array shape. It needs to be a list of the same length
      as `input_tensors`, or None. (default None)
    output_format: Output file format. Currently must be `{TFLITE,
      GRAPHVIZ_DOT}`. (default TFLITE)
    quantized_input_stats: List of tuples of integers representing the mean and
      standard deviation. Each tuple maps to the corresponding input tensor.
      Only need if `inference_type` is `QUANTIZED_UINT8`. (default None)
    default_ranges_stats: Tuple of integers representing (min, max) range values
      for all arrays without a specified range. Intended for experimenting with
      quantization via "dummy quantization". (default None)
    drop_control_dependency: Boolean indicating whether to drop control
      dependencies silently. This is due to TFLite not supporting control
      dependencies. (default True)
    reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
      nodes in unexpected locations. Used when the location of the FakeQuant
      nodes is preventing graph transformations necessary to convert the graph.
      Results in a graph that differs from the quantized training graph,
      potentially causing differing arithmetic behavior. (default False)
    allow_custom_ops: Boolean indicating whether to allow custom operations.
      When false any unknown operation is an error. When true, custom ops are
      created for any op that is unknown. The developer will need to provide
      these to the TensorFlow Lite runtime with a custom resolver.
      (default False)
    change_concat_input_ranges: Boolean to change behavior of min/max ranges for
      inputs and outputs of the concat operator for quantized models. Changes
      the ranges of concat operator overlap when true. (default False)
    quantize_weights: Boolean indicating whether to store weights as quantized
      weights followed by dequantize operations. Computation is still done in
      float, but reduces model size (at the cost of accuracy and latency).
      (default False)
    dump_graphviz_dir: Full filepath of folder to dump the graphs at various
      stages of processing GraphViz .dot files. Preferred over
      --output_format=GRAPHVIZ_DOT in order to keep the requirements of the
      output file. (default None)
    dump_graphviz_video: Boolean indicating whether to dump the graph after
      every graph transformation. (default False)

  Returns:
    model_flags, toco_flags: two protocol buffers describing the conversion
    process.

  Raises:
    ValueError: If the input tensor type is unknown
    RuntimeError: If TOCO fails to convert (in which case the runtime error's
      error text will contain the TOCO error log)
  """
  toco = _toco_flags_pb2.TocoFlags()
  toco.input_format = input_format
  toco.output_format = output_format
  toco.inference_type = inference_type
  if inference_input_type:
    toco.inference_input_type = inference_input_type
  toco.drop_control_dependency = drop_control_dependency
  toco.reorder_across_fake_quant = reorder_across_fake_quant
  toco.allow_custom_ops = allow_custom_ops
  toco.quantize_weights = quantize_weights
  if default_ranges_stats:
    toco.default_ranges_min = default_ranges_stats[0]
    toco.default_ranges_max = default_ranges_stats[1]
  if dump_graphviz_dir:
    toco.dump_graphviz_dir = dump_graphviz_dir
  toco.dump_graphviz_include_video = dump_graphviz_video

  model = _model_flags_pb2.ModelFlags()
  model.change_concat_input_ranges = change_concat_input_ranges
  for idx, input_tensor in enumerate(input_tensors):
    input_array = model.input_arrays.add()
    if inference_type == lite_constants.QUANTIZED_UINT8:
      input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
    input_array.name = tensor_name(input_tensor)
    if input_shapes is None:
      shape = input_tensor.get_shape()
    else:
      shape = input_shapes[idx]
    input_array.shape.dims.extend(map(int, shape))

  for output_tensor in output_tensors:
    model.output_arrays.append(tensor_name(output_tensor))
  return model, toco
示例#4
0
def toco_convert(input_data,
                 input_tensors,
                 output_tensors,
                 output_filename,
                 inference_type=FLOAT,
                 input_format=TENSORFLOW_GRAPHDEF,
                 output_format=TFLITE,
                 quantized_input_stats=None,
                 drop_control_dependency=True):
    """Convert a model using TOCO from `input_format` to `output_format`.

    Typically this is to convert from TensorFlow GraphDef to TFLite, in which
    case the default `input_format` and `output_format` are sufficient.

    Args:
    input_data: Input data (i.e. often `sess.graph_def`).
    input_tensors: List of input tensors. Type and shape are computed using
      `foo.get_shape()` and `foo.dtype`.
    output_tensors: List of output tensors (only .name is used from this).
    inference_type: Currently must be `{FLOAT, QUANTIZED_UINT8}`.
    input_format: Type of data to read (currently must be TENSORFLOW_GRAPHDEF).
    output_format: Type of data to write (currently must be TFLITE or
      GRAPHVIZ_DOT)
    quantized_input_stats: For each member of input_tensors the mean and
      std deviation of training data. Only needed if `inference_type` is
      `QUANTIZED_UINT8`.
    drop_control_dependency: Drops control dependencies silently. This is due
      to tf lite not supporting control dependencies.

    Returns:
    The converted data. For example if tflite was the destination, then
    this will be a tflite flatbuffer in a bytes array.

    Raises:
    ValueError: If the input tensor type is unknown
    RuntimeError: If TOCO fails to convert (in which case the runtime error's
      error text will contain the TOCO error log)
    """
    toco = _toco_flags_pb2.TocoFlags()
    toco.input_format = input_format
    toco.output_format = output_format
    toco.drop_control_dependency = drop_control_dependency
    model = _model_flags_pb2.ModelFlags()
    toco.inference_type = inference_type
    for idx, input_tensor in enumerate(input_tensors):
        if input_tensor.dtype == _dtypes.float32:
            tflite_input_type = FLOAT
        elif input_tensor.dtype == _dtypes.int32:
            tflite_input_type = INT32
        elif input_tensor.dtype == _dtypes.int64:
            tflite_input_type = INT64
        else:
            raise ValueError("Tensors %s not known type %r" %
                             (input_tensor.name, input_tensor.dtype))

        input_array = model.input_arrays.add()

        if inference_type == QUANTIZED_UINT8:
            if tflite_input_type == FLOAT:
                tflite_input_type = QUANTIZED_UINT8
            input_array.mean, input_array.std = quantized_input_stats[idx]

        input_array.name = _tensor_name(input_tensor)
        input_array.shape.dims.extend(map(int, input_tensor.get_shape()))
        toco.inference_input_type = tflite_input_type

    for output_tensor in output_tensors:
        model.output_arrays.append(_tensor_name(output_tensor))

    success = toco_convert_protos(model.SerializeToString(),
                                  toco.SerializeToString(),
                                  input_data.SerializeToString(),
                                  output_filename)
    return success
示例#5
0
def toco_convert(input_data,
                 input_tensors,
                 output_tensors,
                 inference_type=lite_constants.FLOAT,
                 inference_input_type=None,
                 input_format=lite_constants.TENSORFLOW_GRAPHDEF,
                 output_format=lite_constants.TFLITE,
                 quantized_input_stats=None,
                 default_ranges_stats=None,
                 drop_control_dependency=True,
                 reorder_across_fake_quant=False,
                 allow_custom_ops=False,
                 change_concat_input_ranges=False):
    """Convert a model using TOCO from `input_format` to `output_format`.

  Typically this is to convert from TensorFlow GraphDef to TFLite, in which
  case the default `input_format` and `output_format` are sufficient.

  Args:
    input_data: Input data (i.e. often `sess.graph_def`).
    input_tensors: List of input tensors. Type and shape are computed using
      `foo.get_shape()` and `foo.dtype`.
    output_tensors: List of output tensors (only .name is used from this).
    inference_type: Target data type of arrays in the output file. Currently
      must be `{FLOAT, QUANTIZED_UINT8}`.  (default FLOAT)
    inference_input_type: Target data type of input arrays. Allows for a
      different type for input arrays in the case of quantization. Currently
      must be `{FLOAT, QUANTIZED_UINT8}`. (default `inference_type`)
    input_format: Type of data to read Currently must be
      `{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
    output_format: Output file format. Currently must be `{TFLITE,
      GRAPHVIZ_DOT}`. (default TFLITE)
    quantized_input_stats: Dict of strings representing input tensor names
      mapped to tuple of integers representing the mean and standard deviation
      of the training data (e.g., {"foo" : (0., 1.)}). Only need if
      `inference_type` is `QUANTIZED_UINT8`. (default None)
    default_ranges_stats: Tuple of integers representing (min, max) range values
      for all arrays without a specified range. Intended for experimenting with
      quantization via "dummy quantization". (default None)
    drop_control_dependency: Boolean indicating whether to drop control
      dependencies silently. This is due to TFLite not supporting control
      dependencies. (default True)
    reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
      nodes in unexpected locations. Used when the location of the FakeQuant
      nodes is preventing graph transformations necessary to convert the graph.
      Results in a graph that differs from the quantized training graph,
      potentially causing differing arithmetic behavior. (default False)
    change_concat_input_ranges: Boolean to change behavior of min/max ranges for
      inputs and outputs of the concat operator for quantized models. Changes
      the ranges of concat operator overlap when true. (default False)
    allow_custom_ops: Boolean indicating whether to allow custom operations.
      When false any unknown operation is an error. When true, custom ops are
      created for any op that is unknown. The developer will need to provide
      these to the TensorFlow Lite runtime with a custom resolver.
      (default False)

  Returns:
    The converted data. For example if TFLite was the destination, then
    this will be a tflite flatbuffer in a bytes array.

  Raises:
    ValueError: If the input tensor type is unknown
    RuntimeError: If TOCO fails to convert (in which case the runtime error's
      error text will contain the TOCO error log)
  """
    toco = _toco_flags_pb2.TocoFlags()
    toco.input_format = input_format
    toco.output_format = output_format
    toco.inference_type = inference_type
    if inference_input_type:
        toco.inference_input_type = inference_input_type
    toco.drop_control_dependency = drop_control_dependency
    toco.reorder_across_fake_quant = reorder_across_fake_quant
    toco.allow_custom_ops = allow_custom_ops
    if default_ranges_stats:
        toco.default_ranges_min = default_ranges_stats[0]
        toco.default_ranges_max = default_ranges_stats[1]

    model = _model_flags_pb2.ModelFlags()
    model.change_concat_input_ranges = change_concat_input_ranges
    for idx, input_tensor in enumerate(input_tensors):
        if input_tensor.dtype == _dtypes.float32:
            tflite_input_type = lite_constants.FLOAT
        elif input_tensor.dtype == _dtypes.int32:
            tflite_input_type = lite_constants.INT32
        elif input_tensor.dtype == _dtypes.int64:
            tflite_input_type = lite_constants.INT64
        elif input_tensor.dtype == _dtypes.uint8:
            tflite_input_type = lite_constants.QUANTIZED_UINT8
        # TODO(aselle): Insert strings when they are available
        else:
            raise ValueError("Tensors %s not known type %r" %
                             (input_tensor.name, input_tensor.dtype))

        input_array = model.input_arrays.add()

        if inference_type == lite_constants.QUANTIZED_UINT8:
            if tflite_input_type == lite_constants.FLOAT:
                tflite_input_type = lite_constants.QUANTIZED_UINT8
            input_array.mean_value, input_array.std_value = quantized_input_stats[
                idx]

        input_array.name = tensor_name(input_tensor)
        input_array.shape.dims.extend(map(int, input_tensor.get_shape()))

    for output_tensor in output_tensors:
        model.output_arrays.append(tensor_name(output_tensor))

    # TODO(aselle): Consider handling the case of allowing quantized
    # inputs to be converted to float (via the toco.inference_input_type field).
    data = toco_convert_protos(model.SerializeToString(),
                               toco.SerializeToString(),
                               input_data.SerializeToString())
    return data
 def _get_model_flags_proto_from_file(self, filename):
   proto = _model_flags_pb2.ModelFlags()
   with gfile.Open(filename, "rb") as output_file:
     proto.ParseFromString(output_file.read())
     output_file.close()
   return proto