예제 #1
0
  def test_specify_input_signature(self):
    model = testing_utils.get_small_sequential_mlp(10, 3, None)
    inputs = array_ops.ones((8, 5))

    with self.assertRaisesRegexp(ValueError, 'input shapes have not been set'):
      saving_utils.trace_model_call(model)

    fn = saving_utils.trace_model_call(
        model, [tensor_spec.TensorSpec(shape=[None, 5], dtype=dtypes.float32)])
    signature_outputs = fn(inputs)
    expected_outputs = {model.output_names[0]: model(inputs)}
    self._assert_all_close(expected_outputs, signature_outputs)
예제 #2
0
  def test_trace_model_outputs(self):
    input_dim = 5 if testing_utils.get_model_type() == 'functional' else None
    model = testing_utils.get_small_mlp(10, 3, input_dim)
    inputs = array_ops.ones((8, 5))

    if input_dim is None:
      with self.assertRaisesRegexp(ValueError,
                                   'input shapes have not been set'):
        saving_utils.trace_model_call(model)
      model._set_inputs(inputs)

    fn = saving_utils.trace_model_call(model)
    signature_outputs = fn(inputs)
    expected_outputs = {model.output_names[0]: model(inputs)}

    self._assert_all_close(expected_outputs, signature_outputs)
예제 #3
0
  def test_trace_features_layer(self):
    columns = [feature_column_v2.numeric_column('x')]
    model = sequential.Sequential(
        [feature_column_v2.DenseFeatures(columns)])
    model_input = {'x': constant_op.constant([[1.]])}
    model.predict(model_input, steps=1)
    fn = saving_utils.trace_model_call(model)
    self.assertAllClose({'output_1': [[1.]]}, fn({'x': [[1.]]}))

    columns = [feature_column_v2.numeric_column('x'),
               feature_column_v2.numeric_column('y')]
    model = sequential.Sequential(
        [feature_column_v2.DenseFeatures(columns)])
    model_input = {'x': constant_op.constant([[1.]]),
                   'y': constant_op.constant([[2.]])}
    model.predict(model_input, steps=1)
    fn = saving_utils.trace_model_call(model)
    self.assertAllClose({'output_1': [[1., 2.]]},
                        fn({'x': [[1.]], 'y': [[2.]]}))
예제 #4
0
  def from_keras_model(cls, model):
    """Creates a TFLiteConverter object from a Keras model.

    Args:
      model: tf.Keras.Model

    Returns:
      TFLiteConverter object.
    """
    func = _saving_utils.trace_model_call(model)
    concrete_func = func.get_concrete_function()
    return cls([concrete_func])
예제 #5
0
  def test_trace_model_outputs_after_fitting(self):
    input_dim = 5 if testing_utils.get_model_type() == 'functional' else None
    model = testing_utils.get_small_mlp(10, 3, input_dim)
    model.compile(optimizer='sgd', loss='mse')
    model.fit(x=np.random.random((8, 5)),
              y=np.random.random((8, 3)), epochs=2)

    inputs = array_ops.ones((8, 5))

    fn = saving_utils.trace_model_call(model)
    signature_outputs = fn(inputs)
    expected_outputs = {model.output_names[0]: model(inputs)}

    self._assert_all_close(expected_outputs, signature_outputs)
예제 #6
0
  def test_trace_multi_io_model_outputs(self):
    input_dim = 5
    num_classes = 3
    num_classes_b = 4
    input_a = keras.layers.Input(shape=(input_dim,), name='input_a')
    input_b = keras.layers.Input(shape=(input_dim,), name='input_b')

    dense = keras.layers.Dense(num_classes, name='dense')
    dense2 = keras.layers.Dense(num_classes_b, name='dense2')
    dropout = keras.layers.Dropout(0.5, name='dropout')
    branch_a = [input_a, dense]
    branch_b = [input_b, dense, dense2, dropout]

    model = testing_utils.get_multi_io_model(branch_a, branch_b)

    input_a_np = np.random.random((10, input_dim)).astype(np.float32)
    input_b_np = np.random.random((10, input_dim)).astype(np.float32)

    if testing_utils.get_model_type() == 'subclass':
      with self.assertRaisesRegexp(ValueError,
                                   'input shapes have not been set'):
        saving_utils.trace_model_call(model)

    model.compile(optimizer='sgd', loss='mse')
    model.fit(x=[np.random.random((8, input_dim)).astype(np.float32),
                 np.random.random((8, input_dim)).astype(np.float32)],
              y=[np.random.random((8, num_classes)).astype(np.float32),
                 np.random.random((8, num_classes_b)).astype(np.float32)],
              epochs=2)

    fn = saving_utils.trace_model_call(model)
    signature_outputs = fn([input_a_np, input_b_np])
    outputs = model([input_a_np, input_b_np])
    expected_outputs = {model.output_names[0]: outputs[0],
                        model.output_names[1]: outputs[1]}

    self._assert_all_close(expected_outputs, signature_outputs)
예제 #7
0
def concrete_function_from_keras_model(model):
    input_signature = None
    if version.parse(tf.__version__) >= version.parse("2.1"):
        # If the model's call is not a `tf.function`, then we need to first get its
        # input signature from `model_input_signature` method. We can't directly
        # call `trace_model_call` because otherwise the batch dimension is set
        # to None.
        # Once we have better support for dynamic shapes, we can remove this.
        if not isinstance(model.call, def_function.Function):
            # Pass `keep_original_batch_size=True` will ensure that we get an input
            # signature including the batch dimension specified by the user.
            input_signature = saving_utils.model_input_signature(
                model, keep_original_batch_size=True
            )

    func = saving_utils.trace_model_call(model, input_signature)
    return func.get_concrete_function()
예제 #8
0
    def _from_tf_keras_model(keras_model):
        from tensorflow.python.keras.saving import saving_utils
        from tensorflow.python.framework.convert_to_constants import (
            convert_variables_to_constants_v2, )

        if not isinstance(keras_model, tf.keras.Model):
            keras_model = tf.keras.models.load_model(keras_model, None)

        tf.keras.backend.clear_session()
        tf.keras.backend.set_learning_phase(False)
        fn = saving_utils.trace_model_call(keras_model)
        cf = fn.get_concrete_function()
        try:
            frozen_fn = convert_variables_to_constants_v2(cf)
            return frozen_fn.graph.as_graph_def(add_shapes=True)
        except Exception:
            raise NotImplementedError("Unhandled tf.keras model format")
예제 #9
0
def from_keras(model_path, input_names, output_names):
    """Load keras model - experimental for now."""
    from tensorflow.python import keras as _keras
    from tensorflow.python.eager import context
    from tensorflow.python.keras.saving import saving_utils as _saving_utils

    # Handles Keras when Eager mode is enabled.
    custom_objects = None
    with tf.device("/cpu:0"):
        if context.executing_eagerly():
            _keras.backend.clear_session()
            _keras.backend.set_learning_phase(False)
            keras_model = _keras.models.load_model(model_path, custom_objects)

            function = _saving_utils.trace_model_call(keras_model)
            concrete_func = function.get_concrete_function()
            # allow to pass inputs and outputs from caller if we don't want all of them
            input_names = [
                input_tensor.name for input_tensor in concrete_func.inputs
                if input_tensor.dtype != tf.dtypes.resource
            ]
            output_names = [
                output_tensor.name for output_tensor in concrete_func.outputs
                if output_tensor.dtype != tf.dtypes.resource
            ]
            frozen_graph = from_function(concrete_func, input_names,
                                         output_names)
        else:
            # Handles Keras when Eager mode is disabled.
            _keras.backend.clear_session()
            _keras.backend.set_learning_phase(False)
            keras_model = _keras.models.load_model(model_path, custom_objects)
            # allow to pass inputs and outputs from caller if we don't want all of them
            input_names = keras_model.inputs
            output_names = keras_model.outputs
            sess = _keras.backend.get_session()
            input_names = inputs_without_resource(sess, input_names)
            frozen_graph = freeze_session(sess,
                                          input_names=input_names,
                                          output_names=output_names)
            tf_reset_default_graph()
            with tf_session() as sess:
                frozen_graph = tf_optimize(input_names, output_names,
                                           frozen_graph)
            tf_reset_default_graph()
    return frozen_graph, input_names, output_names
예제 #10
0
    def test_trace_model_outputs_after_fitting(self):
        input_dim = 5 if testing_utils.get_model_type(
        ) == 'functional' else None
        model = testing_utils.get_small_mlp(10, 3, input_dim)
        model.compile(optimizer='sgd',
                      loss='mse',
                      run_eagerly=testing_utils.should_run_eagerly())
        model.fit(x=np.random.random((8, 5)),
                  y=np.random.random((8, 3)),
                  epochs=2)

        inputs = array_ops.ones((8, 5))

        fn = saving_utils.trace_model_call(model)
        signature_outputs = fn(inputs)
        expected_outputs = {model.output_names[0]: model(inputs)}

        self._assert_all_close(expected_outputs, signature_outputs)
예제 #11
0
def extract_outputs_from_subclassing_model(model, output_dict, input_names, output_names):
    from tensorflow.python.keras.saving import saving_utils as _saving_utils
    from tensorflow.python.util import object_identity
    from ._graph_cvt import convert_variables_to_constants_v2 as _convert_to_constants

    function = _saving_utils.trace_model_call(model)
    concrete_func = function.get_concrete_function()
    output_names.extend([ts_.name for ts_ in concrete_func.outputs])
    output_dict.update(build_layer_outputs(model, concrete_func.graph, concrete_func.outputs))
    graph_def, converted_input_indices = _convert_to_constants(
        concrete_func, lower_control_flow=True)
    input_tensors = concrete_func.graph.internal_captures
    converted_inputs = object_identity.ObjectIdentitySet(
        [input_tensors[index] for index in converted_input_indices])
    input_names.extend([
        tensor.name for tensor in concrete_func.inputs if tensor not in converted_inputs])

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')

    return tf_graph
예제 #12
0
  def test_subclassed_model_with_input_signature(self):

    class Model(keras.Model):

      def __init__(self):
        super(Model, self).__init__()
        self.dense = keras.layers.Dense(3, name='dense')

      @def_function.function(
          input_signature=[[tensor_spec.TensorSpec([None, 5], dtypes.float32),
                            tensor_spec.TensorSpec([None], dtypes.float32)]],)
      def call(self, inputs, *args):
        x, y = inputs
        return self.dense(x) + y

    model = Model()
    fn = saving_utils.trace_model_call(model)
    x = array_ops.ones((8, 5), dtype=dtypes.float32)
    y = array_ops.ones((3,), dtype=dtypes.float32)
    expected_outputs = {'output_1': model([x, y])}
    signature_outputs = fn([x, y])
    self._assert_all_close(expected_outputs, signature_outputs)
예제 #13
0
    def test_subclassed_model_with_input_signature(self):
        class Model(keras.Model):
            def __init__(self):
                super(Model, self).__init__()
                self.dense = keras.layers.Dense(3, name='dense')

            @def_function.function(
                input_signature=[[
                    tensor_spec.TensorSpec([None, 5], dtypes.float32),
                    tensor_spec.TensorSpec([None], dtypes.float32)
                ]], )
            def call(self, inputs, *args):
                x, y = inputs
                return self.dense(x) + y

        model = Model()
        fn = saving_utils.trace_model_call(model)
        x = array_ops.ones((8, 5), dtype=dtypes.float32)
        y = array_ops.ones((3, ), dtype=dtypes.float32)
        expected_outputs = {'output_1': model([x, y])}
        signature_outputs = fn([x, y])
        self._assert_all_close(expected_outputs, signature_outputs)
예제 #14
0
def _graph_def_from_saved_model_or_keras_model(filename):
    """
    Utility function that returns GraphDef object from the given SavedModel or HDF5 model.
    :param filename: TensorFlow SavedModel directory or Keras HDF5 model (.h5) file.
    :return: TensorFlow GraphDef object.
    """
    try:
        import tensorflow as tf
        from tensorflow.python.keras.saving import saving_utils as _saving_utils
        from tensorflow.python.framework import convert_to_constants as _convert_to_constants
        model = tf.keras.models.load_model(filename)
        tf.keras.backend.set_learning_phase(False)
        func = _saving_utils.trace_model_call(model)
        concrete_func = func.get_concrete_function()
        # concrete_func = model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
        frozen_func = _convert_to_constants.convert_variables_to_constants_v2(concrete_func)
        graph_def = frozen_func.graph.as_graph_def(add_shapes=True)
    except ImportError as e:
        raise ImportError('Failed to import TensorFlow utilities. {}.'.format(e))
    except Exception as e:
        raise RuntimeError('Failed to load SavedModel or .h5 model. {}.'.format(e))
    return graph_def
예제 #15
0
def _graph_def_from_saved_model_or_keras_model(filename):
    """
    Utility function that returns GraphDef object from the given SavedModel or HDF5 model.
    :param filename: TensorFlow SavedModel directory or Keras HDF5 model (.h5) file.
    :return: TensorFlow GraphDef object.
    """
    try:
        import tensorflow as tf
        from tensorflow.python.keras.saving import saving_utils as _saving_utils
        from tensorflow.python.framework import convert_to_constants as _convert_to_constants
        if filename.endswith('.h5'):
            model = tf.keras.models.load_model(filename)
            tf.keras.backend.set_learning_phase(False)
            func = _saving_utils.trace_model_call(model)
            concrete_func = func.get_concrete_function()
        else:
            model = tf.saved_model.load(filename)
            signatures = model.signatures
            if len(signatures) == 0:
                raise ValueError(
                    'Unable to load a model with no signatures provided.')
            if len(signatures) >= 2:
                raise ValueError(
                    'Unable to load a model with multiple signatures')
            concrete_func = list(signatures.values())[0]
        frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
            concrete_func)
        graph_def = frozen_func.graph.as_graph_def(add_shapes=True)
    except ImportError as e:
        raise ImportError(
            'Failed to import TensorFlow utilities. {}.'.format(e))
    except ValueError as e:
        raise ValueError(
            'Failed to load SavedModel or .h5 model. {}.'.format(e))
    except Exception as e:
        raise RuntimeError(
            'Failed to load SavedModel or .h5 model. {}.'.format(e))
    return graph_def
예제 #16
0
    def from_keras_model(cls, model):
        """Creates a TFLiteConverter object from a Keras model.

    Args:
      model: tf.Keras.Model

    Returns:
      TFLiteConverter object.
    """
        input_signature = None
        # If the model's call is not a `tf.function`, then we need to first get its
        # input signature from `model_input_signature` method. We can't directly
        # call `trace_model_call` because otherwise the batch dimension is set
        # to None.
        # Once we have better support for dynamic shapes, we can remove this.
        if not isinstance(model.call, _def_function.Function):
            # Pass `keep_original_batch_size=True` will ensure that we get an input
            # signature including the batch dimension specified by the user.
            input_signature = _saving_utils.model_input_signature(
                model, keep_original_batch_size=True)

        func = _saving_utils.trace_model_call(model, input_signature)
        concrete_func = func.get_concrete_function()
        return cls([concrete_func])
예제 #17
0
def from_keras(model,
               input_signature=None,
               opset=None,
               custom_ops=None,
               custom_op_handlers=None,
               custom_rewriter=None,
               inputs_as_nchw=None,
               extra_opset=None,
               shape_override=None,
               target=None,
               large_model=False,
               output_path=None):
    """Returns a ONNX model_proto for a tf.keras model.

    Args:
        model: the tf.keras model we want to convert
        input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input
        opset: the opset to be used for the ONNX model, default is the latest
        target: list of workarounds applied to help certain platforms
        custom_op_handlers: dictionary of custom ops handlers
        custom_rewriter: list of custom graph rewriters
        extra_opset: list of extra opset's, for example the opset's used by custom ops
        shape_override: dict with inputs that override the shapes given by tensorflow
        inputs_as_nchw: transpose inputs in list from nchw to nhwc
        large_model: use the ONNX external tensor storage format
        output_path: save model to output_path

    Returns:
        An ONNX model_proto and an external_tensor_storage dict.
    """
    if LooseVersion(tf.__version__) < "2.0":
        raise NotImplementedError("from_keras requires tf-2.0 or newer")

    from tensorflow.python.keras.saving import saving_utils as _saving_utils  # pylint: disable=import-outside-toplevel

    # let tensorflow do the checking if model is a valid model
    function = _saving_utils.trace_model_call(model, input_signature)
    try:
        concrete_func = function.get_concrete_function()
    except TypeError as e:
        # Legacy keras models don't accept the training arg tf provides so we hack around it
        if "got an unexpected keyword argument 'training'" not in str(e):
            raise e
        model_call = model.call

        def wrap_call(*args, training=False, **kwargs):
            return model_call(*args, **kwargs)

        model.call = wrap_call
        function = _saving_utils.trace_model_call(model, input_signature)
        concrete_func = function.get_concrete_function()
        # Put it back
        model.call = model_call

    # These inputs will be removed during freezing (includes resources, etc.)
    graph_captures = concrete_func.graph._captures  # pylint: disable=protected-access
    captured_inputs = [
        t_name.name for t_val, t_name in graph_captures.values()
    ]
    input_names = [
        input_tensor.name for input_tensor in concrete_func.inputs
        if input_tensor.name not in captured_inputs
    ]
    output_names = [
        output_tensor.name for output_tensor in concrete_func.outputs
        if output_tensor.dtype != tf.dtypes.resource
    ]

    initialized_tables = None
    tensors_to_rename = tensor_names_from_structed(concrete_func, input_names,
                                                   output_names)
    reverse_lookup = {v: k for k, v in tensors_to_rename.items()}

    if model.output_names:
        # model.output_names is an optional field of Keras models indicating output order. It is None if unused.
        output_names = [reverse_lookup[out] for out in model.output_names]
    elif isinstance(concrete_func.structured_outputs, dict):
        # Other models specify output order using the key order of structured_outputs
        output_names = [
            reverse_lookup[out]
            for out in concrete_func.structured_outputs.keys()
        ]

    with tf.device("/cpu:0"):
        frozen_graph = tf_loader.from_function(concrete_func,
                                               input_names,
                                               output_names,
                                               large_model=large_model)
        model_proto, external_tensor_storage = _convert_common(
            frozen_graph,
            name=model.name,
            continue_on_error=True,
            target=target,
            opset=opset,
            custom_op_handlers=custom_ops,
            extra_opset=extra_opset,
            shape_override=shape_override,
            input_names=input_names,
            output_names=output_names,
            inputs_as_nchw=inputs_as_nchw,
            large_model=large_model,
            tensors_to_rename=tensors_to_rename,
            initialized_tables=initialized_tables,
            output_path=output_path)

        return model_proto, external_tensor_storage
예제 #18
0
def from_keras(model,
               input_signature=None,
               opset=None,
               custom_ops=None,
               custom_op_handlers=None,
               custom_rewriter=None,
               inputs_as_nchw=None,
               extra_opset=None,
               shape_override=None,
               target=None,
               large_model=False,
               output_path=None):
    """Returns a ONNX model_proto for a tf.keras model.

    Args:
        model: the tf.keras model we want to convert
        input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input
        opset: the opset to be used for the ONNX model, default is the latest
        target: list of workarounds applied to help certain platforms
        custom_op_handlers: dictionary of custom ops handlers
        custom_rewriter: list of custom graph rewriters
        extra_opset: list of extra opset's, for example the opset's used by custom ops
        shape_override: dict with inputs that override the shapes given by tensorflow
        inputs_as_nchw: transpose inputs in list from nchw to nhwc
        large_model: use the ONNX external tensor storage format
        output_path: save model to output_path

    Returns:
        An ONNX model_proto and an external_tensor_storage dict.
    """
    if LooseVersion(tf.__version__) < "2.0":
        raise NotImplementedError("from_keras requires tf-2.0 or newer")

    if not input_signature:
        raise ValueError("from_keras requires input_signature")

    from tensorflow.python.keras.saving import saving_utils as _saving_utils  # pylint: disable=import-outside-toplevel

    # let tensorflow do the checking if model is a valid model
    function = _saving_utils.trace_model_call(model, input_signature)
    concrete_func = function.get_concrete_function(*input_signature)

    input_names = [
        input_tensor.name for input_tensor in concrete_func.inputs
        if input_tensor.dtype != tf.dtypes.resource
    ]
    output_names = [
        output_tensor.name for output_tensor in concrete_func.outputs
        if output_tensor.dtype != tf.dtypes.resource
    ]

    initialized_tables = None
    tensors_to_rename = tensor_names_from_structed(concrete_func, input_names,
                                                   output_names)

    with tf.device("/cpu:0"):
        frozen_graph = tf_loader.from_function(concrete_func,
                                               input_names,
                                               output_names,
                                               large_model=large_model)
        model_proto, external_tensor_storage = _convert_common(
            frozen_graph,
            name=model.name,
            continue_on_error=True,
            target=target,
            opset=opset,
            custom_op_handlers=custom_ops,
            extra_opset=extra_opset,
            shape_override=shape_override,
            input_names=input_names,
            output_names=output_names,
            inputs_as_nchw=inputs_as_nchw,
            large_model=large_model,
            tensors_to_rename=tensors_to_rename,
            initialized_tables=initialized_tables,
            output_path=output_path)

        return model_proto, external_tensor_storage
예제 #19
0
def _convert_tf_saved_model(output_dir,
                            saved_model_dir=None,
                            keras_model=None,
                            signature_def='serving_default',
                            saved_model_tags='serve',
                            quantization_dtype_map=None,
                            skip_op_check=False,
                            strip_debug_ops=False,
                            use_structured_outputs_names=False,
                            weight_shard_size_bytes=1024 * 1024 * 4,
                            control_flow_v2=False,
                            experiments=False,
                            metadata=None,
                            frozen_graph_dir=None):
    """Take a SavedModel or KerasModel and convert to Tensorflow.js graph model.

  Args:
    output_dir: string The name of the output directory. The directory
      will consist of
      - a file named 'model.json'
      - possibly sharded binary weight files.
    saved_model_dir: string The saved model directory.
    : string The names of the output nodes, comma separated.
    keras_model: An in-memory Keras model object.
    signature_def: string Tagset of the SignatureDef to load. Defaults to
      'serving_default'.
    saved_model_tags: tags of the GraphDef to load. Defaults to 'serve'.
    quantization_dtype_map: A mapping from dtype
      (`uint8`, `uint16`, `float16`) to weights names. The weight mapping
      supports wildcard substitution.
    skip_op_check: Bool whether to skip the op check.
    strip_debug_ops: Bool whether to strip debug ops.
    use_structured_outputs_names: Bool whether output of graph model will follow
      the structured_outputs format.
    weight_shard_size_bytes: Shard size (in bytes) of the weight files.
      The size of each weight file will be <= this value.
    control_flow_v2: Bool whether to enable control flow v2 ops.
    experiments: Bool enable experimental features.
    metadata: User defined metadata map.
    frozen_graph_dir: The directory to keep the intermediate frozen graph of
      model.
  """
    if signature_def is None:
        signature_def = 'serving_default'

    if not tf.io.gfile.exists(output_dir):
        tf.io.gfile.makedirs(output_dir)
    output_graph = os.path.join(output_dir,
                                common.ARTIFACT_MODEL_JSON_FILE_NAME)

    saved_model_tags_list = None
    if saved_model_tags:
        saved_model_tags_list = saved_model_tags.split(',')

    model = None
    concrete_func = None
    saved_model_sigature = None
    if saved_model_dir:
        saved_model_sigature = _find_signature(saved_model_dir,
                                               saved_model_tags, signature_def)
        model = _load_model(saved_model_dir, saved_model_tags_list)
        _check_signature_in_model(model, signature_def)
        concrete_func = model.signatures[signature_def]
    elif keras_model:
        model = keras_model
        input_signature = None
        # If the model's call is not a `tf.function`, then we need to first get its
        # input signature from `model_input_signature` method. We can't directly
        # call `trace_model_call` because otherwise the batch dimension is set
        # to None.
        if not isinstance(model.call, def_function.Function):
            # Pass `keep_original_batch_size=True` will ensure that we get an input
            # signature including the batch dimension specified by the user.
            input_signature = model_input_signature(
                model, keep_original_batch_size=True)
        func = trace_model_call(model, input_signature)
        concrete_func = func.get_concrete_function()
    else:
        raise Exception(
            'Provide either a saved model or keras model to convert.')

    output_node_names = []
    for output_tensor in concrete_func.outputs:
        output_node_names.append(output_tensor.name.split(':')[0])

    num_outputs = len(output_node_names)
    structured_outputs = concrete_func.structured_outputs
    if use_structured_outputs_names and structured_outputs is not None:
        if not isinstance(structured_outputs, dict):
            raise Exception('Converter only supports dict structured_outputs.')

        # As per tensorflow/python/util/nest.py: "If `structure` is or contains a
        # dict instance, the keys will be sorted to pack the flat sequence
        # in deterministic order."
        sorted_keys = sorted(structured_outputs.keys())

        # Check if structure is a simple dictionary.
        # We don't support anything more complex due to the GraphModel.predict
        # function return type in typescript.
        test_sequence = list(range(num_outputs))
        actual_structure = tf.nest.pack_sequence_as(structured_outputs,
                                                    test_sequence, True)
        expected_structure = dict(zip(sorted_keys, test_sequence))
        if actual_structure != expected_structure:
            raise Exception(
                'Converter only supports structured_outputs of form '
                '{"key1": value1, "key2":value2 ... })')

        metadata = metadata or {}
        metadata[common.STRUCTURED_OUTPUTS_KEYS_KEY] = sorted_keys

    # TensorFlow doesn't encode the saved model version in the graph in a
    # reliable way. Try to freeze the graph using V2 utils. If that fails, freeze
    # the graph using V1 utils.
    frozen_initializer_graph = None
    try:
        frozen_graph = _freeze_saved_model_v2(concrete_func, control_flow_v2)
    except BaseException:
        if saved_model_dir:
            (frozen_graph, frozen_initializer_graph) = _freeze_saved_model_v1(
                saved_model_dir, saved_model_tags_list, output_node_names)
        else:
            print('Can not freeze saved model v1.')
            return

    if frozen_graph_dir:
        output_graph = os.path.join(frozen_graph_dir,
                                    common.ARTIFACT_MODEL_JSON_FILE_NAME)
        frozen_file = output_graph + '.frozen'
        with tf.compat.v1.gfile.GFile(frozen_file, 'wb') as f:
            f.write(frozen_graph.as_graph_def().SerializeToString())

    inputs = [x for x in concrete_func.inputs if not x.dtype == 'resource']
    signature = _build_signature_def(frozen_graph, inputs,
                                     concrete_func.outputs,
                                     saved_model_sigature)

    define_transform_graph_func()

    tf_version = None
    try:
        tf_version = model.tensorflow_version
    except:  # pylint: disable=W0702
        # keras model does not have tensorflow_version, hard code to the latest
        # tensorflow version.
        tf_version = tf.__version__

    optimize_graph(frozen_graph,
                   signature,
                   output_graph,
                   tf_version,
                   quantization_dtype_map=quantization_dtype_map,
                   skip_op_check=skip_op_check,
                   strip_debug_ops=strip_debug_ops,
                   weight_shard_size_bytes=weight_shard_size_bytes,
                   experiments=experiments,
                   initializer_graph=frozen_initializer_graph,
                   metadata=metadata)
예제 #20
0
def export_saved_model(model,
                       saved_model_path,
                       custom_objects=None,
                       as_text=False,
                       input_signature=None,
                       serving_only=False):
  """Exports a `tf.keras.Model` as a Tensorflow SavedModel.

  Note that at this time, subclassed models can only be saved using
  `serving_only=True`.

  The exported `SavedModel` is a standalone serialization of Tensorflow objects,
  and is supported by TF language APIs and the Tensorflow Serving system.
  To load the model, use the function
  `tf.keras.experimental.load_from_saved_model`.

  The `SavedModel` contains:

  1. a checkpoint containing the model weights.
  2. a `SavedModel` proto containing the Tensorflow backend graph. Separate
     graphs are saved for prediction (serving), train, and evaluation. If
     the model has not been compiled, then only the graph computing predictions
     will be exported.
  3. the model's json config. If the model is subclassed, this will only be
     included if the model's `get_config()` method is overwritten.

  Example:

  ```python
  import tensorflow as tf

  # Create a tf.keras model.
  model = tf.keras.Sequential()
  model.add(tf.keras.layers.Dense(1, input_shape=[10]))
  model.summary()

  # Save the tf.keras model in the SavedModel format.
  path = '/tmp/simple_keras_model'
  tf.keras.experimental.export_saved_model(model, path)

  # Load the saved keras model back.
  new_model = tf.keras.experimental.load_from_saved_model(path)
  new_model.summary()
  ```

  Args:
    model: A `tf.keras.Model` to be saved. If the model is subclassed, the flag
      `serving_only` must be set to True.
    saved_model_path: a string specifying the path to the SavedModel directory.
    custom_objects: Optional dictionary mapping string names to custom classes
      or functions (e.g. custom loss functions).
    as_text: bool, `False` by default. Whether to write the `SavedModel` proto
      in text format. Currently unavailable in serving-only mode.
    input_signature: A possibly nested sequence of `tf.TensorSpec` objects, used
      to specify the expected model inputs. See `tf.function` for more details.
    serving_only: bool, `False` by default. When this is true, only the
      prediction graph is saved.

  Raises:
    NotImplementedError: If the model is a subclassed model, and serving_only is
      False.
    ValueError: If the input signature cannot be inferred from the model.
    AssertionError: If the SavedModel directory already exists and isn't empty.
  """
  warnings.warn('`tf.keras.experimental.export_saved_model` is deprecated'
                'and will be removed in a future version. '
                'Please use `model.save(..., save_format="tf")` or '
                '`tf.keras.models.save_model(..., save_format="tf")`.')
  if serving_only:
    save_lib.save(
        model,
        saved_model_path,
        signatures=saving_utils.trace_model_call(model, input_signature))
  else:
    _save_v1_format(model, saved_model_path, custom_objects, as_text,
                    input_signature)

  try:
    _export_model_json(model, saved_model_path)
  except NotImplementedError:
    logging.warning('Skipped saving model JSON, subclassed model does not have '
                    'get_config() defined.')
예제 #21
0
def export(model,
           saved_model_path,
           custom_objects=None,
           as_text=None,
           input_signature=None,
           serving_only=False):
    """Saves a `tf.keras.Model` into Tensorflow SavedModel format.

  `save_model` generates new files/folders under the `saved_model_path` folder:
  1) a checkpoint containing the model weights.
  2) a saved_model.pb file containing the model's MetaGraphs. The prediction
     graph is always exported. The evaluaton and training graphs are exported
     if the following conditions are met:
     - Evaluation: model loss is defined.
     - Training: model is compiled with an optimizer defined under `tf.train`.
       This is because `tf.keras.optimizers.Optimizer` instances cannot be
       saved to checkpoints.
  3) Model's json configuration, if model.get_config() has been implemented.
     This file can be used to reload the model using
     tf.keras.models.model_from_json(). Note that if any custom objects were
     used, they should be passed to the `custom_object` argument when loading
     the model.

  Model limitations:
  - Sequential and functional models can always be saved.
  - Subclassed models can only be saved when `serving_only=True`. This is due to
    the current implementation copying the model in order to export the training
    and evaluation graphs. Because the topology of subclassed models cannot be
    determined, the subclassed models cannot be cloned. Subclassed models will
    be entirely exportable in the future.

  Note that each mode is exported in separate graphs, so different modes do not
  share variables. To use the train graph with evaluation or prediction graphs,
  create a new checkpoint if variable values have been updated.

  Example:

  ```python
  import tensorflow as tf

  # Create a tf.keras model.
  model = tf.keras.Sequential()
  model.add(tf.keras.layers.Dense(1, input_shape=[10]))
  model.summary()

  # Save the tf.keras model in the SavedModel format.
  saved_to_path = tf.keras.experimental.export(
        model, '/tmp/my_simple_tf_keras_saved_model')

  # Load the saved keras model back.
  model_prime = tf.keras.experimental.load_from_saved_model(saved_to_path)
  model_prime.summary()
  ```

  Args:
    model: A `tf.keras.Model` to be saved. If the model is subclassed, the flag
      `serving_only` must be set to True.
    saved_model_path: a string specifying the path to the SavedModel directory.
      The SavedModel will be saved to a timestamped folder created within this
      directory.
    custom_objects: Optional dictionary mapping string names to custom classes
      or functions (e.g. custom loss functions).
    as_text: whether to write the `SavedModel` proto in text format. Currently
      unavailable in serving-only mode.
    input_signature: A possibly nested sequence of `tf.TensorSpec` objects, used
      to specify the expected model inputs. `input_signature`'s nested structure
      should match the expected nested structure of the inputs to the model. If
      this is not set, this function will attempt to infer the input shapes and
      dtypes from the model. Note that if the model is subclassed, the tensor
      inputs to the call function should be nested in the first argument (this
      is a general requirement for using subclassed models with Keras functions
      .fit(), .predict(), etc.).
    serving_only: Export only the outputs produced from calling the model in
      predict mode. The losses, optimizer, and other training configurations are
      not saved. If the SavedModel will only be used for serving (rather than
      retraining), or if the model is subclassed, this can be set to True.

  Returns:
    String path to the SavedModel folder, a subdirectory of `saved_model_path`.

  Raises:
    NotImplementedError: If the model is a subclassed model, and serving_only is
      False.
    ValueError: If the input signature cannot be inferred from the model.
  """
    export_dir = model_utils.get_timestamped_export_dir(saved_model_path)

    if serving_only:
        save_lib.save(model,
                      export_dir,
                      signatures=saving_utils.trace_model_call(
                          model, input_signature))
    else:
        _save_v1_format(model, export_dir, custom_objects, as_text,
                        input_signature)

    try:
        _export_model_json(model, export_dir)
    except NotImplementedError:
        logging.warning(
            'Skipped saving model JSON, subclassed model does not have '
            'get_config() defined.')

    return export_dir
예제 #22
0
    def dump(cls, path, obj, variables_to_constants=False, output_names=None, *args, **kwargs):
        """
        Extracts a TensorFlow graph from an object *obj* and saves it at *path*. The graph is
        optionally transformed into a simpler representation with all its variables converted to
        constants when *variables_to_constants* is *True*. The saved file contains the graph as a
        protobuf. The accepted types of *obj* greatly depend on the available API versions.

        When the v1 API is found (which is also the case when ``tf.compat.v1`` is available in v2),
        ``Graph``, ``GraphDef`` and ``Session`` objects are accepted. However, when
        *variables_to_constants* is *True*, *obj* must be a session and *output_names* should refer
        to names of operations whose subgraphs are extracted (usually just one).

        For TensorFlow v2, *obj* can also be a compiled keras model, or either a polymorphic or
        concrete function as returned by ``tf.function``. Polymorphic functions either must have a
        defined input signature (``tf.function(input_signature=(...,))``) or they must accept no
        arguments in the first place. See the TensorFlow documentation on `concrete functions
        <https://www.tensorflow.org/guide/concrete_function>`__ for more info.

        *args* and *kwargs* are forwarded to ``tf.train.write_graph`` (v1) or ``tf.io.write_graph``
        (v2).
        """
        tf, tf1, tf_version = cls.import_tf()
        path = get_path(path)
        graph_dir, graph_name = os.path.split(path)

        # default as_text value
        kwargs.setdefault("as_text", path.endswith((".pbtxt", ".pb.txt")))

        # convert keras models and polymorphic functions to concrete functions, v2 only
        if tf_version[0] != "1":
            from tensorflow.python.keras.saving import saving_utils
            from tensorflow.python.eager.def_function import Function
            from tensorflow.python.eager.function import ConcreteFunction

            if isinstance(obj, tf.keras.Model):
                learning_phase_orig = tf.keras.backend.learning_phase()
                tf.keras.backend.set_learning_phase(False)
                model_func = saving_utils.trace_model_call(obj)
                if model_func.function_spec.arg_names and not model_func.input_signature:
                    raise ValueError("when obj is a keras model callable accepting arguments, its "
                        "input signature must be frozen by building the model")
                obj = model_func.get_concrete_function()
                tf.keras.backend.set_learning_phase(learning_phase_orig)

            elif isinstance(obj, Function):
                if obj.function_spec.arg_names and not obj.input_signature:
                    raise ValueError("when obj is a polymorphic function accepting arguments, its "
                        "input signature must be frozen")
                obj = obj.get_concrete_function()

        # convert variables to constants
        if variables_to_constants:
            if tf1 and isinstance(obj, tf1.Session):
                if not output_names:
                    raise ValueError("when variables_to_constants is true, output_names must "
                        "contain operations to export, got '{}' instead".format(output_names))
                obj = tf1.graph_util.convert_variables_to_constants(obj, obj.graph.as_graph_def(),
                    output_names)

            elif tf_version[0] != "1":
                from tensorflow.python.framework import convert_to_constants

                if not isinstance(obj, ConcreteFunction):
                    raise TypeError("when variables_to_constants is true, obj must be a concrete "
                        "or polymorphic function, got '{}' instead".format(obj))
                obj = convert_to_constants.convert_variables_to_constants_v2(obj)

            else:
                raise TypeError("cannot convert variables to constants for object '{}', type not "
                    "understood for TensorFlow version {}".format(obj, tf.__version__))

        # extract the graph
        if tf1 and isinstance(obj, tf1.Session):
            graph = obj.graph
        elif tf_version[0] != "1" and isinstance(obj, ConcreteFunction):
            graph = obj.graph
        else:
            graph = obj

        # write it
        if tf_version[0] == "1":
            return tf1.train.write_graph(graph, graph_dir, graph_name, *args, **kwargs)
        else:
            return tf.io.write_graph(graph, graph_dir, graph_name, *args, **kwargs)
예제 #23
0
  def from_keras_model_file(cls,
                            model_file,
                            input_arrays=None,
                            input_shapes=None,
                            output_arrays=None,
                            custom_objects=None):
    """Creates a TFLiteConverter class from a tf.keras model file.

    Args:
      model_file: Full filepath of HDF5 file containing the tf.keras model.
      input_arrays: List of input tensors to freeze graph with. Uses input
        arrays from SignatureDef when none are provided. (default None)
      input_shapes: Dict of strings representing input tensor names to list of
        integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
        Automatically determined when input shapes is None (e.g., {"foo" :
          None}). (default None)
      output_arrays: List of output tensors to freeze graph with. Uses output
        arrays from SignatureDef when none are provided. (default None)
      custom_objects: Dict mapping names (strings) to custom classes or
        functions to be considered during model deserialization. (default None)

    Returns:
      TFLiteConverter class.
    """
    # Handles Keras when Eager mode is enabled.
    if context.executing_eagerly():
      if input_arrays or output_arrays:
        raise ValueError("`input_arrays` and `output_arrays` are unsupported "
                         "with Eager mode. If your model requires any of these "
                         "parameters, please use disable_eager_execution().")

      _keras.backend.set_learning_phase(False)
      keras_model = _keras.models.load_model(model_file, custom_objects)

      function = _saving_utils.trace_model_call(keras_model)
      concrete_func = function.get_concrete_function()

      frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
          concrete_func)
      _set_tensor_shapes(frozen_func.inputs, input_shapes)
      return cls(frozen_func.graph.as_graph_def(), frozen_func.inputs,
                 frozen_func.outputs)

    # Handles Keras when Eager mode is disabled.
    _keras.backend.clear_session()
    _keras.backend.set_learning_phase(False)
    keras_model = _keras.models.load_model(model_file, custom_objects)
    sess = _keras.backend.get_session()

    # Get input and output tensors.
    if input_arrays:
      input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
    else:
      input_tensors = keras_model.inputs

    if output_arrays:
      output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
    else:
      output_tensors = keras_model.outputs
    _set_tensor_shapes(input_tensors, input_shapes)

    graph_def = _freeze_graph(sess, input_tensors, output_tensors)
    return cls(graph_def, input_tensors, output_tensors)
예제 #24
0
def export(
    model, saved_model_path, custom_objects=None, as_text=None,
    input_signature=None, serving_only=False):
  """Saves a `tf.keras.Model` into Tensorflow SavedModel format.

  `save_model` generates new files/folders under the `saved_model_path` folder:
  1) a checkpoint containing the model weights.
  2) a saved_model.pb file containing the model's MetaGraphs. The prediction
     graph is always exported. The evaluaton and training graphs are exported
     if the following conditions are met:
     - Evaluation: model loss is defined.
     - Training: model is compiled with an optimizer defined under `tf.train`.
       This is because `tf.keras.optimizers.Optimizer` instances cannot be
       saved to checkpoints.
  3) Model's json configuration, if model.get_config() has been implemented.
     This file can be used to reload the model using
     tf.keras.models.model_from_json(). Note that if any custom objects were
     used, they should be passed to the `custom_object` argument when loading
     the model.

  Model limitations:
  - Sequential and functional models can always be saved.
  - Subclassed models can only be saved when `serving_only=True`. This is due to
    the current implementation copying the model in order to export the training
    and evaluation graphs. Because the topology of subclassed models cannot be
    determined, the subclassed models cannot be cloned. Subclassed models will
    be entirely exportable in the future.

  Note that each mode is exported in separate graphs, so different modes do not
  share variables. To use the train graph with evaluation or prediction graphs,
  create a new checkpoint if variable values have been updated.

  Example:

  ```python
  import tensorflow as tf

  # Create a tf.keras model.
  model = tf.keras.Sequential()
  model.add(tf.keras.layers.Dense(1, input_shape=[10]))
  model.summary()

  # Save the tf.keras model in the SavedModel format.
  saved_to_path = tf.keras.experimental.export(
        model, '/tmp/my_simple_tf_keras_saved_model')

  # Load the saved keras model back.
  model_prime = tf.keras.experimental.load_from_saved_model(saved_to_path)
  model_prime.summary()
  ```

  Args:
    model: A `tf.keras.Model` to be saved. If the model is subclassed, the flag
      `serving_only` must be set to True.
    saved_model_path: a string specifying the path to the SavedModel directory.
      The SavedModel will be saved to a timestamped folder created within this
      directory.
    custom_objects: Optional dictionary mapping string names to custom classes
      or functions (e.g. custom loss functions).
    as_text: whether to write the `SavedModel` proto in text format. Currently
      unavailable in serving-only mode.
    input_signature: A possibly nested sequence of `tf.TensorSpec` objects, used
      to specify the expected model inputs. `input_signature`'s nested structure
      should match the expected nested structure of the inputs to the model. If
      this is not set, this function will attempt to infer the input shapes and
      dtypes from the model. Note that if the model is subclassed, the tensor
      inputs to the call function should be nested in the first argument (this
      is a general requirement for using subclassed models with Keras functions
      .fit(), .predict(), etc.).
    serving_only: Export only the outputs produced from calling the model in
      predict mode. The losses, optimizer, and other training configurations are
      not saved. If the SavedModel will only be used for serving (rather than
      retraining), or if the model is subclassed, this can be set to True.

  Returns:
    String path to the SavedModel folder, a subdirectory of `saved_model_path`.

  Raises:
    NotImplementedError: If the model is a subclassed model, and serving_only is
      False.
    ValueError: If the input signature cannot be inferred from the model.
  """
  export_dir = model_utils.get_timestamped_export_dir(saved_model_path)

  if serving_only:
    save_lib.save(
        model, export_dir,
        signatures=saving_utils.trace_model_call(model, input_signature))
  else:
    _save_v1_format(model, export_dir, custom_objects, as_text, input_signature)

  try:
    _export_model_json(model, export_dir)
  except NotImplementedError:
    logging.warning('Skipped saving model JSON, subclassed model does not have '
                    'get_config() defined.')

  return export_dir
예제 #25
0
    def from_keras_model_file(cls,
                              model_file,
                              input_arrays=None,
                              input_shapes=None,
                              output_arrays=None,
                              custom_objects=None):
        """Creates a TFLiteConverter class from a tf.keras model file.

    Args:
      model_file: Full filepath of HDF5 file containing the tf.keras model.
      input_arrays: List of input tensors to freeze graph with. Uses input
        arrays from SignatureDef when none are provided. (default None)
      input_shapes: Dict of strings representing input tensor names to list of
        integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
        Automatically determined when input shapes is None (e.g., {"foo" :
          None}). (default None)
      output_arrays: List of output tensors to freeze graph with. Uses output
        arrays from SignatureDef when none are provided. (default None)
      custom_objects: Dict mapping names (strings) to custom classes or
        functions to be considered during model deserialization. (default None)

    Returns:
      TFLiteConverter class.
    """
        # Handles Keras when Eager mode is enabled.
        if context.executing_eagerly():
            if input_arrays or output_arrays:
                raise ValueError(
                    "`input_arrays` and `output_arrays` are unsupported "
                    "with Eager mode. If your model requires any of these "
                    "parameters, please use disable_eager_execution().")

            _keras.backend.set_learning_phase(False)
            keras_model = _keras.models.load_model(model_file, custom_objects)

            function = _saving_utils.trace_model_call(keras_model)
            concrete_func = function.get_concrete_function()

            frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
                concrete_func, lower_control_flow=False)
            _set_tensor_shapes(frozen_func.inputs, input_shapes)
            return cls(frozen_func.graph.as_graph_def(),
                       frozen_func.inputs,
                       frozen_func.outputs,
                       experimental_debug_info_func=_build_debug_info_func(
                           frozen_func.graph))

        # Handles Keras when Eager mode is disabled.
        _keras.backend.clear_session()
        _keras.backend.set_learning_phase(False)
        keras_model = _keras.models.load_model(model_file, custom_objects)
        sess = _keras.backend.get_session()

        # Get input and output tensors.
        if input_arrays:
            input_tensors = _get_tensors_from_tensor_names(
                sess.graph, input_arrays)
        else:
            input_tensors = keras_model.inputs

        if output_arrays:
            output_tensors = _get_tensors_from_tensor_names(
                sess.graph, output_arrays)
        else:
            output_tensors = keras_model.outputs
        _set_tensor_shapes(input_tensors, input_shapes)

        graph_def = _freeze_graph(sess, input_tensors, output_tensors)
        return cls(graph_def,
                   input_tensors,
                   output_tensors,
                   experimental_debug_info_func=_build_debug_info_func(
                       sess.graph))
예제 #26
0
def _convert_tf_saved_model(output_dir,
                            saved_model_dir=None,
                            keras_model=None,
                            signature_def='serving_default',
                            saved_model_tags='serve',
                            quantization_dtype_map=None,
                            skip_op_check=False,
                            strip_debug_ops=False,
                            weight_shard_size_bytes=1024 * 1024 * 4,
                            control_flow_v2=False,
                            experiments=False,
                            metadata=None):
    """Take a SavedModel or KerasModel and convert to Tensorflow.js graph model.

  Args:
    output_dir: string The name of the output directory. The directory
      will consist of
      - a file named 'model.json'
      - possibly sharded binary weight files.
    saved_model_dir: string The saved model directory.
    : string The names of the output nodes, comma separated.
    keras_model: An in-memory Keras model object.
    signature_def: string Tagset of the SignatureDef to load. Defaults to
      'serving_default'.
    saved_model_tags: tags of the GraphDef to load. Defaults to 'serve'.
    quantization_dtype_map: A mapping from dtype
      (`uint8`, `uint16`, `float16`) to weights names. The weight mapping
      supports wildcard substitution.
    skip_op_check: Bool whether to skip the op check.
    strip_debug_ops: Bool whether to strip debug ops.
    weight_shard_size_bytes: Shard size (in bytes) of the weight files.
      The size of each weight file will be <= this value.
    control_flow_v2: Bool whether to enable control flow v2 ops.
    experiments: Bool enable experimental features.
    metadata: User defined metadata map.
  """
    if signature_def is None:
        signature_def = 'serving_default'

    if not tf.io.gfile.exists(output_dir):
        tf.io.gfile.makedirs(output_dir)
    output_graph = os.path.join(output_dir,
                                common.ARTIFACT_MODEL_JSON_FILE_NAME)

    saved_model_tags_list = None
    if saved_model_tags:
        saved_model_tags_list = saved_model_tags.split(',')

    model = None
    concrete_func = None
    saved_model_sigature = None
    if saved_model_dir:
        saved_model_sigature = _find_signature(saved_model_dir,
                                               saved_model_tags, signature_def)
        model = _load_model(saved_model_dir, saved_model_tags_list)
        _check_signature_in_model(model, signature_def)
        concrete_func = model.signatures[signature_def]
    elif keras_model:
        model = keras_model
        input_signature = None
        # If the model's call is not a `tf.function`, then we need to first get its
        # input signature from `model_input_signature` method. We can't directly
        # call `trace_model_call` because otherwise the batch dimension is set
        # to None.
        if not isinstance(model.call, def_function.Function):
            # Pass `keep_original_batch_size=True` will ensure that we get an input
            # signature including the batch dimension specified by the user.
            input_signature = model_input_signature(
                model, keep_original_batch_size=True)
        func = trace_model_call(model, input_signature)
        concrete_func = func.get_concrete_function()
    else:
        raise Exception(
            'Provide either a saved model or keras model to convert.')

    output_node_names = []
    for output_tensor in concrete_func.outputs:
        output_node_names.append(output_tensor.name.split(':')[0])

    # TensorFlow doesn't encode the saved model version in the graph in a
    # reliable way. Try to freeze the graph using V2 utils. If that fails, freeze
    # the graph using V1 utils.
    frozen_initializer_graph = None
    try:
        frozen_graph = _freeze_saved_model_v2(concrete_func, control_flow_v2)
    except BaseException:
        if saved_model_dir:
            (frozen_graph, frozen_initializer_graph) = _freeze_saved_model_v1(
                saved_model_dir, saved_model_tags_list, output_node_names)
        else:
            print('Can not freeze saved model v1.')
            return

    inputs = [x for x in concrete_func.inputs if not x.dtype == 'resource']
    signature = _build_signature_def(frozen_graph, inputs,
                                     concrete_func.outputs,
                                     saved_model_sigature)

    # Check if the TransformGraph is available to be imported, this package is
    # available in g3 but not in oss version of TensorFlow.
    transform_graph_available = True
    try:
        from tensorflow.tools.graph_transforms import TransformGraph  # pylint: disable=C0415
    except:  # pylint: disable=W0702
        transform_graph_available = False

    # Define the strip graph functions when TransformGraph is available, this will
    # strip the unused nodes from the graph.
    if transform_graph_available:

        def _strip_unused_nodes(frozen_graph, concrete_func,
                                output_node_names):
            # Find the names of the input nodes needed to extract the minimal
            # inference graph. This is particularly useful for cases when the concrete
            # function contains nodes that do not contribute the inference computation
            # defined by the input/output pair. This would also eliminate op
            # unsupported error caused by nodes outside of the minial infrerence
            # graph.
            input_node_names = []
            input_tensors = {}
            for input_tensor in concrete_func.inputs:
                if input_tensor.dtype != 'resource':
                    op_name = input_tensor.name.split(':')[0]
                    # The graph freezing may turn the original inputs into constants, or
                    # remove them from the graph, so we need to ignore those.
                    try:
                        op = frozen_graph.get_operation_by_name(op_name)
                        if op.type != 'Const':
                            input_node_names.append(op_name)
                            input_tensors[op_name] = input_tensor
                    except KeyError:
                        # The original input was removed when the graph was frozen.
                        continue

            graph_transformations = ['strip_unused_nodes']
            stripped_graph_def = TransformGraph(frozen_graph.as_graph_def(),
                                                input_node_names,
                                                output_node_names,
                                                graph_transformations)

            # The transform graph library cannot support input nodes that has dynamic
            # shape, this code will update the dtype and shape based on the
            # input tensor manually.
            for node in stripped_graph_def.node:
                if node.name in input_tensors:
                    if node.attr['shape'] and node.attr['shape'].shape:
                        node.attr['shape'].shape.CopyFrom(
                            input_tensors[node.name].shape.as_proto())
                    if node.attr['dtype'] and node.attr['dtype'].type:
                        node.attr['dtype'].type = input_tensors[
                            node.name].dtype.as_datatype_enum

            with tf.Graph().as_default() as stripped_graph:
                tf.import_graph_def(stripped_graph_def, name='')
                return stripped_graph

        frozen_graph = _strip_unused_nodes(frozen_graph, concrete_func,
                                           output_node_names)

    version = None
    try:
        version = model.tensorflow_version
    except:  # pylint: disable=W0702
        # keras model does not have tensorflow_version, hard code to the latest
        # tensorflow version.
        version = tf.__version__

    optimize_graph(frozen_graph,
                   signature,
                   output_graph,
                   version,
                   quantization_dtype_map=quantization_dtype_map,
                   skip_op_check=skip_op_check,
                   strip_debug_ops=strip_debug_ops,
                   weight_shard_size_bytes=weight_shard_size_bytes,
                   experiments=experiments,
                   initializer_graph=frozen_initializer_graph,
                   metadata=metadata)
예제 #27
0
def export_saved_model(model,
                       saved_model_path,
                       custom_objects=None,
                       as_text=False,
                       input_signature=None,
                       serving_only=False):
  """Exports a `tf.keras.Model` as a Tensorflow SavedModel.

  Note that at this time, subclassed models can only be saved using
  `serving_only=True`.

  The exported `SavedModel` is a standalone serialization of Tensorflow objects,
  and is supported by TF language APIs and the Tensorflow Serving system.
  To load the model, use the function
  `tf.keras.experimental.load_from_saved_model`.

  The `SavedModel` contains:

  1. a checkpoint containing the model weights.
  2. a `SavedModel` proto containing the Tensorflow backend graph. Separate
     graphs are saved for prediction (serving), train, and evaluation. If
     the model has not been compiled, then only the graph computing predictions
     will be exported.
  3. the model's json config. If the model is subclassed, this will only be
     included if the model's `get_config()` method is overwritten.

  Example:

  ```python
  import tensorflow as tf

  # Create a tf.keras model.
  model = tf.keras.Sequential()
  model.add(tf.keras.layers.Dense(1, input_shape=[10]))
  model.summary()

  # Save the tf.keras model in the SavedModel format.
  path = '/tmp/simple_keras_model'
  tf.keras.experimental.export_saved_model(model, path)

  # Load the saved keras model back.
  new_model = tf.keras.experimental.load_from_saved_model(path)
  new_model.summary()
  ```

  Args:
    model: A `tf.keras.Model` to be saved. If the model is subclassed, the flag
      `serving_only` must be set to True.
    saved_model_path: a string specifying the path to the SavedModel directory.
    custom_objects: Optional dictionary mapping string names to custom classes
      or functions (e.g. custom loss functions).
    as_text: bool, `False` by default. Whether to write the `SavedModel` proto
      in text format. Currently unavailable in serving-only mode.
    input_signature: A possibly nested sequence of `tf.TensorSpec` objects, used
      to specify the expected model inputs. See `tf.function` for more details.
    serving_only: bool, `False` by default. When this is true, only the
      prediction graph is saved.

  Raises:
    NotImplementedError: If the model is a subclassed model, and serving_only is
      False.
    ValueError: If the input signature cannot be inferred from the model.
    AssertionError: If the SavedModel directory already exists and isn't empty.
  """
  if serving_only:
    save_lib.save(
        model,
        saved_model_path,
        signatures=saving_utils.trace_model_call(model, input_signature))
  else:
    _save_v1_format(model, saved_model_path, custom_objects, as_text,
                    input_signature)

  try:
    _export_model_json(model, saved_model_path)
  except NotImplementedError:
    logging.warning('Skipped saving model JSON, subclassed model does not have '
                    'get_config() defined.')
예제 #28
0
def default_save_signature(layer):
    original_losses = _reset_layer_losses(layer)
    fn = saving_utils.trace_model_call(layer)
    fn.get_concrete_function()
    _restore_layer_losses(original_losses)
    return fn
def _convert_tf_saved_model(output_dir,
                            saved_model_dir=None,
                            keras_model=None,
                            signature_def='serving_default',
                            saved_model_tags='serve',
                            quantization_dtype_map=None,
                            skip_op_check=False,
                            strip_debug_ops=False,
                            weight_shard_size_bytes=1024 * 1024 * 4,
                            control_flow_v2=False,
                            experiments=False,
                            metadata=None):
  """Take a SavedModel or KerasModel and convert to Tensorflow.js graph model.

  Args:
    output_dir: string The name of the output directory. The directory
      will consist of
      - a file named 'model.json'
      - possibly sharded binary weight files.
    saved_model_dir: string The saved model directory.
    : string The names of the output nodes, comma separated.
    keras_model: An in-memory Keras model object.
    signature_def: string Tagset of the SignatureDef to load. Defaults to
      'serving_default'.
    saved_model_tags: tags of the GraphDef to load. Defaults to 'serve'.
    quantization_dtype_map: A mapping from dtype
      (`uint8`, `uint16`, `float16`) to weights names. The weight mapping
      supports wildcard substitution.
    skip_op_check: Bool whether to skip the op check.
    strip_debug_ops: Bool whether to strip debug ops.
    weight_shard_size_bytes: Shard size (in bytes) of the weight files.
      The size of each weight file will be <= this value.
    control_flow_v2: Bool whether to enable control flow v2 ops.
    experiments: Bool enable experimental features.
    metadata: User defined metadata map.
  """
  if signature_def is None:
    signature_def = 'serving_default'

  if not tf.io.gfile.exists(output_dir):
    tf.io.gfile.makedirs(output_dir)
  output_graph = os.path.join(
      output_dir, common.ARTIFACT_MODEL_JSON_FILE_NAME)

  saved_model_tags_list = None
  if saved_model_tags:
    saved_model_tags_list = saved_model_tags.split(',')

  model = None
  concrete_func = None
  saved_model_sigature = None
  if saved_model_dir:
    saved_model_sigature = _find_signature(saved_model_dir, saved_model_tags,
                                           signature_def)
    model = _load_model(saved_model_dir, saved_model_tags_list)
    _check_signature_in_model(model, signature_def)
    concrete_func = model.signatures[signature_def]
  elif keras_model:
    model = keras_model
    input_signature = None
    # If the model's call is not a `tf.function`, then we need to first get its
    # input signature from `model_input_signature` method. We can't directly
    # call `trace_model_call` because otherwise the batch dimension is set
    # to None.
    if not isinstance(model.call, def_function.Function):
      # Pass `keep_original_batch_size=True` will ensure that we get an input
      # signature including the batch dimension specified by the user.
      input_signature = model_input_signature(
          model, keep_original_batch_size=True)
    func = trace_model_call(model, input_signature)
    concrete_func = func.get_concrete_function()
  else:
    raise Exception('Provide either a saved model or keras model to convert.')

  output_node_names = []
  for output_tensor in concrete_func.outputs:
    output_node_names.append(output_tensor.name.split(':')[0])

  # TensorFlow doesn't encode the saved model version in the graph in a
  # reliable way. Try to freeze the graph using V2 utils. If that fails, freeze
  # the graph using V1 utils.
  frozen_initializer_graph = None
  try:
    frozen_graph = _freeze_saved_model_v2(concrete_func, control_flow_v2)
  except BaseException:
    if saved_model_dir:
      (frozen_graph,
       frozen_initializer_graph) = _freeze_saved_model_v1(saved_model_dir,
                                                          saved_model_tags_list,
                                                          output_node_names)
    else:
      print('Can not freeze saved model v1.')
      return

  inputs = [x for x in concrete_func.inputs if not x.dtype == 'resource']
  signature = _build_signature_def(
      frozen_graph, inputs, concrete_func.outputs, saved_model_sigature)

  define_transform_graph_func()

  version = None
  try:
    version = model.tensorflow_version
  except: # pylint: disable=W0702
    # keras model does not have tensorflow_version, hard code to the latest
    # tensorflow version.
    version = tf.__version__

  optimize_graph(frozen_graph, signature,
                 output_graph, version,
                 quantization_dtype_map=quantization_dtype_map,
                 skip_op_check=skip_op_check,
                 strip_debug_ops=strip_debug_ops,
                 weight_shard_size_bytes=weight_shard_size_bytes,
                 experiments=experiments,
                 initializer_graph=frozen_initializer_graph,
                 metadata=metadata)