示例#1
0
 def _concrete_fn_from_tf_keras_or_h5(keras_model):
     if isinstance(keras_model, _tf.keras.Model):
         input_signature = _saving_utils.model_input_signature(
             keras_model, keep_original_batch_size=True)
         fn = _saving_utils.trace_model_call(keras_model, input_signature)
     else:
         keras_model = _tf.keras.models.load_model(keras_model)
         input_signature = _saving_utils.model_input_signature(
             keras_model, keep_original_batch_size=True)
         fn = _saving_utils.trace_model_call(keras_model, input_signature)
     return [fn.get_concrete_function()]
示例#2
0
  def _generate_input_signature(self, layer):
    """Inspects layer object and returns the inferred input signature.

    Args:
      layer: Layer object.

    Returns:
      List of possibly nested TensorSpecs of the layer call function inputs.
      The list does not contain the `training` argument.
    """
    if (isinstance(layer.call, def_function.Function) and
        layer.call.input_signature is not None):
      return layer.call.input_signature
    else:
      if isinstance(layer, training_lib.Model):
        return saving_utils.model_input_signature(layer)
      elif layer.input_spec is not None:

        def to_tensor_spec_or_none(x):
          spec = input_spec.to_tensor_spec(x, layer.dtype)
          # If the shape is too general (e.g. multiple dimensions are allowed),
          # return None so that separate functions can be generated for each
          # inferred input signature.
          # TODO(b/134962016): currently partial signatures are not supported.
          if spec.shape == tensor_shape.TensorShape(None):
            return None
          return spec
        input_signature = [nest.map_structure(
            to_tensor_spec_or_none, layer.input_spec)]

        return input_signature
      else:
        return None
示例#3
0
def get_tf_keras_io_names(model):
    """
    Utility function to get tf.keras inputs/outputs names from a tf.keras model.

    Parameter
    ---------
    model: tf.keras.Model
    """
    input_names, output_names = [], []
    try:
        # The order of outputs in conc_func.structured_outputs is the same order
        # that Keras predicts in, which can be different from model.outputs
        input_signature = _saving_utils.model_input_signature(
            model, keep_original_batch_size=True)
        fn = _saving_utils.trace_model_call(model, input_signature)
        conc_func = fn.get_concrete_function()
        for key in conc_func.structured_outputs:
            output_names.append(
                conc_func.structured_outputs[key].name.split(":")[0])
    except:
        for o in model.outputs:
            output_names.append(o.name.split(":")[0].split("/")[-1])
    for i in model.inputs:
        input_names.append(i.name.split(":")[0])
    return input_names, output_names
示例#4
0
def from_keras_model(model: keras.Model) -> tf.lite.TFLiteConverter:
    """Creates a TFLiteConverter object from a Keras model.

    Args:
      model: tf.Keras.Model

    Returns:
      TFLiteConverter object.
    """
    input_signature = None
    # If the model's call is not a `tf.function`, then we need to first get its
    # input signature from `model_input_signature` method. We can't directly
    # call `trace_model_call` because otherwise the batch dimension is set
    # to None.
    # Once we have better support for dynamic shapes, we can remove this.
    if not isinstance(model.call, _def_function.Function):
        # Pass `keep_original_batch_size=True` will ensure that we get an input
        # signature including the batch dimension specified by the user.
        input_signature = _saving_utils.model_input_signature(
            model, keep_original_batch_size=True)

    func = _saving_utils.trace_model_call(model, input_signature)
    func._defun_with_scope = lambda s: _defun_with_scope(func, s)
    concrete_func = func.get_concrete_function()
    return tf.lite.TFLiteConverter([concrete_func])
示例#5
0
    def save_model_if_path_exists(self, path, save_input_signature=False):
        if not path:
            return

        if hvd.size() > 1:
            raise ValueError(
                'SavedModel conversion not supported in HybridParallel mode')

        if save_input_signature:
            input_sig = model_input_signature(self,
                                              keep_original_batch_size=True)
            call_graph = tf.function(self)
            signatures = call_graph.get_concrete_function(input_sig[0])
        else:
            signatures = None

        options = tf.saved_model.SaveOptions(
            experimental_variable_policy=tf.saved_model.experimental.
            VariablePolicy.SAVE_VARIABLE_DEVICES)

        tf.keras.models.save_model(model=self,
                                   filepath=path,
                                   overwrite=True,
                                   signatures=signatures,
                                   options=options)
示例#6
0
def concrete_function_from_keras_model(model):
    input_signature = None
    if version.parse(tf.__version__) >= version.parse("2.1"):
        # If the model's call is not a `tf.function`, then we need to first get its
        # input signature from `model_input_signature` method. We can't directly
        # call `trace_model_call` because otherwise the batch dimension is set
        # to None.
        # Once we have better support for dynamic shapes, we can remove this.
        if not isinstance(model.call, def_function.Function):
            # Pass `keep_original_batch_size=True` will ensure that we get an input
            # signature including the batch dimension specified by the user.
            input_signature = saving_utils.model_input_signature(
                model, keep_original_batch_size=True)

    func = saving_utils.trace_model_call(model, input_signature)
    return func.get_concrete_function()
示例#7
0
def constantize(fname):
    model = models.load_model(fname)

    input_signature = None
    # If the model is not a function then the model may include
    # a specific batch size, so we include it as well.
    if not isinstance(model.call, def_function.Function):
        input_signature = saving_utils.model_input_signature(
            model, keep_original_batch_size=True)

    func = saving_utils.trace_model_call(model, input_signature)
    concrete_func = func.get_concrete_function()
    _, graph_def = convert.convert_variables_to_constants_v2_as_graph(
        concrete_func, lower_control_flow=False)

    return graph_def
示例#8
0
    def save_model_if_path_exists(self, path, save_input_signature=False):
        if not path:
            return

        if hvd.size() > 1:
            raise ValueError('SavedModel conversion not supported in HybridParallel mode')

        if save_input_signature:
            input_sig = model_input_signature(self, keep_original_batch_size=True)
            call_graph = tf.function(self)
            signatures = call_graph.get_concrete_function(input_sig[0])
        else:
            signatures = None

        tf.keras.models.save_model(
            model=self,
            filepath=path,
            overwrite=True,
            signatures=signatures)
def _convert_tf_saved_model(output_dir,
                            saved_model_dir=None,
                            keras_model=None,
                            signature_def='serving_default',
                            saved_model_tags='serve',
                            quantization_dtype_map=None,
                            skip_op_check=False,
                            strip_debug_ops=False,
                            use_structured_outputs_names=False,
                            weight_shard_size_bytes=1024 * 1024 * 4,
                            control_flow_v2=False,
                            experiments=False,
                            metadata=None,
                            frozen_graph_dir=None):
    """Take a SavedModel or KerasModel and convert to Tensorflow.js graph model.

  Args:
    output_dir: string The name of the output directory. The directory
      will consist of
      - a file named 'model.json'
      - possibly sharded binary weight files.
    saved_model_dir: string The saved model directory.
    : string The names of the output nodes, comma separated.
    keras_model: An in-memory Keras model object.
    signature_def: string Tagset of the SignatureDef to load. Defaults to
      'serving_default'.
    saved_model_tags: tags of the GraphDef to load. Defaults to 'serve'.
    quantization_dtype_map: A mapping from dtype
      (`uint8`, `uint16`, `float16`) to weights names. The weight mapping
      supports wildcard substitution.
    skip_op_check: Bool whether to skip the op check.
    strip_debug_ops: Bool whether to strip debug ops.
    use_structured_outputs_names: Bool whether output of graph model will follow
      the structured_outputs format.
    weight_shard_size_bytes: Shard size (in bytes) of the weight files.
      The size of each weight file will be <= this value.
    control_flow_v2: Bool whether to enable control flow v2 ops.
    experiments: Bool enable experimental features.
    metadata: User defined metadata map.
    frozen_graph_dir: The directory to keep the intermediate frozen graph of
      model.
  """
    if signature_def is None:
        signature_def = 'serving_default'

    if not tf.io.gfile.exists(output_dir):
        tf.io.gfile.makedirs(output_dir)
    output_graph = os.path.join(output_dir,
                                common.ARTIFACT_MODEL_JSON_FILE_NAME)

    saved_model_tags_list = None
    if saved_model_tags:
        saved_model_tags_list = saved_model_tags.split(',')

    model = None
    concrete_func = None
    saved_model_sigature = None
    if saved_model_dir:
        saved_model_sigature = _find_signature(saved_model_dir,
                                               saved_model_tags, signature_def)
        model = _load_model(saved_model_dir, saved_model_tags_list)
        _check_signature_in_model(model, signature_def)
        concrete_func = model.signatures[signature_def]
    elif keras_model:
        model = keras_model
        input_signature = None
        # If the model's call is not a `tf.function`, then we need to first get its
        # input signature from `model_input_signature` method. We can't directly
        # call `trace_model_call` because otherwise the batch dimension is set
        # to None.
        if not isinstance(model.call, def_function.Function):
            # Pass `keep_original_batch_size=True` will ensure that we get an input
            # signature including the batch dimension specified by the user.
            input_signature = model_input_signature(
                model, keep_original_batch_size=True)
        func = trace_model_call(model, input_signature)
        concrete_func = func.get_concrete_function()
    else:
        raise Exception(
            'Provide either a saved model or keras model to convert.')

    output_node_names = []
    for output_tensor in concrete_func.outputs:
        output_node_names.append(output_tensor.name.split(':')[0])

    num_outputs = len(output_node_names)
    structured_outputs = concrete_func.structured_outputs
    if use_structured_outputs_names and structured_outputs is not None:
        if not isinstance(structured_outputs, dict):
            raise Exception('Converter only supports dict structured_outputs.')

        # As per tensorflow/python/util/nest.py: "If `structure` is or contains a
        # dict instance, the keys will be sorted to pack the flat sequence
        # in deterministic order."
        sorted_keys = sorted(structured_outputs.keys())

        # Check if structure is a simple dictionary.
        # We don't support anything more complex due to the GraphModel.predict
        # function return type in typescript.
        test_sequence = list(range(num_outputs))
        actual_structure = tf.nest.pack_sequence_as(structured_outputs,
                                                    test_sequence, True)
        expected_structure = dict(zip(sorted_keys, test_sequence))
        if actual_structure != expected_structure:
            raise Exception(
                'Converter only supports structured_outputs of form '
                '{"key1": value1, "key2":value2 ... })')

        metadata = metadata or {}
        metadata[common.STRUCTURED_OUTPUTS_KEYS_KEY] = sorted_keys

    # TensorFlow doesn't encode the saved model version in the graph in a
    # reliable way. Try to freeze the graph using V2 utils. If that fails, freeze
    # the graph using V1 utils.
    frozen_initializer_graph = None
    try:
        frozen_graph = _freeze_saved_model_v2(concrete_func, control_flow_v2)
    except BaseException:
        if saved_model_dir:
            (frozen_graph, frozen_initializer_graph) = _freeze_saved_model_v1(
                saved_model_dir, saved_model_tags_list, output_node_names)
        else:
            print('Can not freeze saved model v1.')
            return

    if frozen_graph_dir:
        output_graph = os.path.join(frozen_graph_dir,
                                    common.ARTIFACT_MODEL_JSON_FILE_NAME)
        frozen_file = output_graph + '.frozen'
        with tf.compat.v1.gfile.GFile(frozen_file, 'wb') as f:
            f.write(frozen_graph.as_graph_def().SerializeToString())

    inputs = [x for x in concrete_func.inputs if not x.dtype == 'resource']
    signature = _build_signature_def(frozen_graph, inputs,
                                     concrete_func.outputs,
                                     saved_model_sigature)

    define_transform_graph_func()

    tf_version = None
    try:
        tf_version = model.tensorflow_version
    except:  # pylint: disable=W0702
        # keras model does not have tensorflow_version, hard code to the latest
        # tensorflow version.
        tf_version = tf.__version__

    optimize_graph(frozen_graph,
                   signature,
                   output_graph,
                   tf_version,
                   quantization_dtype_map=quantization_dtype_map,
                   skip_op_check=skip_op_check,
                   strip_debug_ops=strip_debug_ops,
                   weight_shard_size_bytes=weight_shard_size_bytes,
                   experiments=experiments,
                   initializer_graph=frozen_initializer_graph,
                   metadata=metadata)
def _convert_tf_saved_model(output_dir,
                            saved_model_dir=None,
                            keras_model=None,
                            signature_def='serving_default',
                            saved_model_tags='serve',
                            quantization_dtype_map=None,
                            skip_op_check=False,
                            strip_debug_ops=False,
                            weight_shard_size_bytes=1024 * 1024 * 4,
                            control_flow_v2=False,
                            experiments=False,
                            metadata=None):
    """Take a SavedModel or KerasModel and convert to Tensorflow.js graph model.

  Args:
    output_dir: string The name of the output directory. The directory
      will consist of
      - a file named 'model.json'
      - possibly sharded binary weight files.
    saved_model_dir: string The saved model directory.
    : string The names of the output nodes, comma separated.
    keras_model: An in-memory Keras model object.
    signature_def: string Tagset of the SignatureDef to load. Defaults to
      'serving_default'.
    saved_model_tags: tags of the GraphDef to load. Defaults to 'serve'.
    quantization_dtype_map: A mapping from dtype
      (`uint8`, `uint16`, `float16`) to weights names. The weight mapping
      supports wildcard substitution.
    skip_op_check: Bool whether to skip the op check.
    strip_debug_ops: Bool whether to strip debug ops.
    weight_shard_size_bytes: Shard size (in bytes) of the weight files.
      The size of each weight file will be <= this value.
    control_flow_v2: Bool whether to enable control flow v2 ops.
    experiments: Bool enable experimental features.
    metadata: User defined metadata map.
  """
    if signature_def is None:
        signature_def = 'serving_default'

    if not tf.io.gfile.exists(output_dir):
        tf.io.gfile.makedirs(output_dir)
    output_graph = os.path.join(output_dir,
                                common.ARTIFACT_MODEL_JSON_FILE_NAME)

    saved_model_tags_list = None
    if saved_model_tags:
        saved_model_tags_list = saved_model_tags.split(',')

    model = None
    concrete_func = None
    saved_model_sigature = None
    if saved_model_dir:
        saved_model_sigature = _find_signature(saved_model_dir,
                                               saved_model_tags, signature_def)
        model = _load_model(saved_model_dir, saved_model_tags_list)
        _check_signature_in_model(model, signature_def)
        concrete_func = model.signatures[signature_def]
    elif keras_model:
        model = keras_model
        input_signature = None
        # If the model's call is not a `tf.function`, then we need to first get its
        # input signature from `model_input_signature` method. We can't directly
        # call `trace_model_call` because otherwise the batch dimension is set
        # to None.
        if not isinstance(model.call, def_function.Function):
            # Pass `keep_original_batch_size=True` will ensure that we get an input
            # signature including the batch dimension specified by the user.
            input_signature = model_input_signature(
                model, keep_original_batch_size=True)
        func = trace_model_call(model, input_signature)
        concrete_func = func.get_concrete_function()
    else:
        raise Exception(
            'Provide either a saved model or keras model to convert.')

    output_node_names = []
    for output_tensor in concrete_func.outputs:
        output_node_names.append(output_tensor.name.split(':')[0])

    # TensorFlow doesn't encode the saved model version in the graph in a
    # reliable way. Try to freeze the graph using V2 utils. If that fails, freeze
    # the graph using V1 utils.
    frozen_initializer_graph = None
    try:
        frozen_graph = _freeze_saved_model_v2(concrete_func, control_flow_v2)
    except BaseException:
        if saved_model_dir:
            (frozen_graph, frozen_initializer_graph) = _freeze_saved_model_v1(
                saved_model_dir, saved_model_tags_list, output_node_names)
        else:
            print('Can not freeze saved model v1.')
            return

    inputs = [x for x in concrete_func.inputs if not x.dtype == 'resource']
    signature = _build_signature_def(frozen_graph, inputs,
                                     concrete_func.outputs,
                                     saved_model_sigature)

    # Check if the TransformGraph is available to be imported, this package is
    # available in g3 but not in oss version of TensorFlow.
    transform_graph_available = True
    try:
        from tensorflow.tools.graph_transforms import TransformGraph  # pylint: disable=C0415
    except:  # pylint: disable=W0702
        transform_graph_available = False

    # Define the strip graph functions when TransformGraph is available, this will
    # strip the unused nodes from the graph.
    if transform_graph_available:

        def _strip_unused_nodes(frozen_graph, concrete_func,
                                output_node_names):
            # Find the names of the input nodes needed to extract the minimal
            # inference graph. This is particularly useful for cases when the concrete
            # function contains nodes that do not contribute the inference computation
            # defined by the input/output pair. This would also eliminate op
            # unsupported error caused by nodes outside of the minial infrerence
            # graph.
            input_node_names = []
            input_tensors = {}
            for input_tensor in concrete_func.inputs:
                if input_tensor.dtype != 'resource':
                    op_name = input_tensor.name.split(':')[0]
                    # The graph freezing may turn the original inputs into constants, or
                    # remove them from the graph, so we need to ignore those.
                    try:
                        op = frozen_graph.get_operation_by_name(op_name)
                        if op.type != 'Const':
                            input_node_names.append(op_name)
                            input_tensors[op_name] = input_tensor
                    except KeyError:
                        # The original input was removed when the graph was frozen.
                        continue

            graph_transformations = ['strip_unused_nodes']
            stripped_graph_def = TransformGraph(frozen_graph.as_graph_def(),
                                                input_node_names,
                                                output_node_names,
                                                graph_transformations)

            # The transform graph library cannot support input nodes that has dynamic
            # shape, this code will update the dtype and shape based on the
            # input tensor manually.
            for node in stripped_graph_def.node:
                if node.name in input_tensors:
                    if node.attr['shape'] and node.attr['shape'].shape:
                        node.attr['shape'].shape.CopyFrom(
                            input_tensors[node.name].shape.as_proto())
                    if node.attr['dtype'] and node.attr['dtype'].type:
                        node.attr['dtype'].type = input_tensors[
                            node.name].dtype.as_datatype_enum

            with tf.Graph().as_default() as stripped_graph:
                tf.import_graph_def(stripped_graph_def, name='')
                return stripped_graph

        frozen_graph = _strip_unused_nodes(frozen_graph, concrete_func,
                                           output_node_names)

    version = None
    try:
        version = model.tensorflow_version
    except:  # pylint: disable=W0702
        # keras model does not have tensorflow_version, hard code to the latest
        # tensorflow version.
        version = tf.__version__

    optimize_graph(frozen_graph,
                   signature,
                   output_graph,
                   version,
                   quantization_dtype_map=quantization_dtype_map,
                   skip_op_check=skip_op_check,
                   strip_debug_ops=strip_debug_ops,
                   weight_shard_size_bytes=weight_shard_size_bytes,
                   experiments=experiments,
                   initializer_graph=frozen_initializer_graph,
                   metadata=metadata)
def _convert_tf_saved_model(output_dir,
                            saved_model_dir=None,
                            keras_model=None,
                            signature_def='serving_default',
                            saved_model_tags='serve',
                            quantization_dtype_map=None,
                            skip_op_check=False,
                            strip_debug_ops=False,
                            weight_shard_size_bytes=1024 * 1024 * 4,
                            control_flow_v2=False,
                            experiments=False,
                            metadata=None):
  """Take a SavedModel or KerasModel and convert to Tensorflow.js graph model.

  Args:
    output_dir: string The name of the output directory. The directory
      will consist of
      - a file named 'model.json'
      - possibly sharded binary weight files.
    saved_model_dir: string The saved model directory.
    : string The names of the output nodes, comma separated.
    keras_model: An in-memory Keras model object.
    signature_def: string Tagset of the SignatureDef to load. Defaults to
      'serving_default'.
    saved_model_tags: tags of the GraphDef to load. Defaults to 'serve'.
    quantization_dtype_map: A mapping from dtype
      (`uint8`, `uint16`, `float16`) to weights names. The weight mapping
      supports wildcard substitution.
    skip_op_check: Bool whether to skip the op check.
    strip_debug_ops: Bool whether to strip debug ops.
    weight_shard_size_bytes: Shard size (in bytes) of the weight files.
      The size of each weight file will be <= this value.
    control_flow_v2: Bool whether to enable control flow v2 ops.
    experiments: Bool enable experimental features.
    metadata: User defined metadata map.
  """
  if signature_def is None:
    signature_def = 'serving_default'

  if not tf.io.gfile.exists(output_dir):
    tf.io.gfile.makedirs(output_dir)
  output_graph = os.path.join(
      output_dir, common.ARTIFACT_MODEL_JSON_FILE_NAME)

  saved_model_tags_list = None
  if saved_model_tags:
    saved_model_tags_list = saved_model_tags.split(',')

  model = None
  concrete_func = None
  saved_model_sigature = None
  if saved_model_dir:
    saved_model_sigature = _find_signature(saved_model_dir, saved_model_tags,
                                           signature_def)
    model = _load_model(saved_model_dir, saved_model_tags_list)
    _check_signature_in_model(model, signature_def)
    concrete_func = model.signatures[signature_def]
  elif keras_model:
    model = keras_model
    input_signature = None
    # If the model's call is not a `tf.function`, then we need to first get its
    # input signature from `model_input_signature` method. We can't directly
    # call `trace_model_call` because otherwise the batch dimension is set
    # to None.
    if not isinstance(model.call, def_function.Function):
      # Pass `keep_original_batch_size=True` will ensure that we get an input
      # signature including the batch dimension specified by the user.
      input_signature = model_input_signature(
          model, keep_original_batch_size=True)
    func = trace_model_call(model, input_signature)
    concrete_func = func.get_concrete_function()
  else:
    raise Exception('Provide either a saved model or keras model to convert.')

  output_node_names = []
  for output_tensor in concrete_func.outputs:
    output_node_names.append(output_tensor.name.split(':')[0])

  # TensorFlow doesn't encode the saved model version in the graph in a
  # reliable way. Try to freeze the graph using V2 utils. If that fails, freeze
  # the graph using V1 utils.
  frozen_initializer_graph = None
  try:
    frozen_graph = _freeze_saved_model_v2(concrete_func, control_flow_v2)
  except BaseException:
    if saved_model_dir:
      (frozen_graph,
       frozen_initializer_graph) = _freeze_saved_model_v1(saved_model_dir,
                                                          saved_model_tags_list,
                                                          output_node_names)
    else:
      print('Can not freeze saved model v1.')
      return

  inputs = [x for x in concrete_func.inputs if not x.dtype == 'resource']
  signature = _build_signature_def(
      frozen_graph, inputs, concrete_func.outputs, saved_model_sigature)

  define_transform_graph_func()

  version = None
  try:
    version = model.tensorflow_version
  except: # pylint: disable=W0702
    # keras model does not have tensorflow_version, hard code to the latest
    # tensorflow version.
    version = tf.__version__

  optimize_graph(frozen_graph, signature,
                 output_graph, version,
                 quantization_dtype_map=quantization_dtype_map,
                 skip_op_check=skip_op_check,
                 strip_debug_ops=strip_debug_ops,
                 weight_shard_size_bytes=weight_shard_size_bytes,
                 experiments=experiments,
                 initializer_graph=frozen_initializer_graph,
                 metadata=metadata)