Example #1
0
    def test_trace_multi_io_model_outputs(self):
        input_dim = 5
        num_classes = 3
        num_classes_b = 4
        input_a = keras.layers.Input(shape=(input_dim, ), name='input_a')
        input_b = keras.layers.Input(shape=(input_dim, ), name='input_b')

        dense = keras.layers.Dense(num_classes, name='dense')
        dense2 = keras.layers.Dense(num_classes_b, name='dense2')
        dropout = keras.layers.Dropout(0.5, name='dropout')
        branch_a = [input_a, dense]
        branch_b = [input_b, dense, dense2, dropout]

        model = test_utils.get_multi_io_model(branch_a, branch_b)

        input_a_ts = tf.constant(
            np.random.random((10, input_dim)).astype(np.float32))
        input_b_ts = tf.constant(
            np.random.random((10, input_dim)).astype(np.float32))

        if test_utils.get_model_type() == 'subclass':
            with self.assertRaisesRegex(ValueError,
                                        '.*input shape is not availabl*'):
                saving_utils.trace_model_call(model)

        model.compile(optimizer='sgd',
                      loss='mse',
                      run_eagerly=test_utils.should_run_eagerly())
        model.fit(x=[
            np.random.random((8, input_dim)).astype(np.float32),
            np.random.random((8, input_dim)).astype(np.float32)
        ],
                  y=[
                      np.random.random((8, num_classes)).astype(np.float32),
                      np.random.random((8, num_classes_b)).astype(np.float32)
                  ],
                  epochs=2)

        fn = saving_utils.trace_model_call(model)
        # tf.function requires that the input structures match when calling a
        # ConcreteFunction. For some reason V1 models defines the inputs as a list,
        # while V2 models sets the inputs as a tuple.
        if (not tf.executing_eagerly()
                and test_utils.get_model_type() != 'functional'):
            signature_outputs = fn([input_a_ts, input_b_ts])
        else:
            signature_outputs = fn((input_a_ts, input_b_ts))
        outputs = model([input_a_ts, input_b_ts])
        if model.output_names:
            expected_outputs = {
                model.output_names[0]: outputs[0],
                model.output_names[1]: outputs[1]
            }
        else:
            expected_outputs = {'output_1': outputs[0], 'output_2': outputs[1]}
        self._assert_all_close(expected_outputs, signature_outputs)
Example #2
0
  def test_specify_input_signature(self):
    model = testing_utils.get_small_sequential_mlp(10, 3, None)
    inputs = tf.ones((8, 5))

    with self.assertRaisesRegex(ValueError, 'input shapes have not been set'):
      saving_utils.trace_model_call(model)

    fn = saving_utils.trace_model_call(
        model, [tf.TensorSpec(shape=[None, 5], dtype=tf.float32)])
    signature_outputs = fn(inputs)
    if model.output_names:
      expected_outputs = {model.output_names[0]: model(inputs)}
    else:
      expected_outputs = {'output_1': model(inputs)}
    self._assert_all_close(expected_outputs, signature_outputs)
Example #3
0
    def test_model_with_fixed_input_dim(self):
        """Ensure that the batch_dim is removed when saving.

        When serving or retraining, it is important to reset the batch dim.
        This can be an issue inside of tf.function. See b/132783590 for context.
        """
        model = test_utils.get_small_mlp(10, 3, 5)

        loss_object = keras.losses.MeanSquaredError()
        optimizer = gradient_descent.SGD()

        @tf.function
        def train_step(data, labels):
            with tf.GradientTape() as tape:
                predictions = model(data)
                loss = loss_object(labels, predictions)
            gradients = tape.gradient(loss, model.trainable_variables)
            optimizer.apply_gradients(zip(gradients, model.trainable_variables))

        x = np.random.random((8, 5))
        y = np.random.random((8, 3))

        train_step(x, y)

        fn = saving_utils.trace_model_call(model)
        self.assertEqual(
            fn.structured_input_signature[0][0].shape.as_list(),
            tf.TensorShape([None, 5]).as_list(),
        )
Example #4
0
    def test_subclassed_model_with_input_signature(self):
        class Model(keras.Model):
            def __init__(self):
                super().__init__()
                self.dense = keras.layers.Dense(3, name="dense")

            @tf.function(
                input_signature=[
                    [
                        tf.TensorSpec([None, 5], tf.float32),
                        tf.TensorSpec([None], tf.float32),
                    ]
                ],
            )
            def call(self, inputs, *args):
                x, y = inputs
                return self.dense(x) + y

        model = Model()
        fn = saving_utils.trace_model_call(model)
        x = tf.ones((8, 5), dtype=tf.float32)
        y = tf.ones((3,), dtype=tf.float32)
        expected_outputs = {"output_1": model([x, y])}
        signature_outputs = fn([x, y])
        self._assert_all_close(expected_outputs, signature_outputs)
Example #5
0
    def test_trace_multi_io_model_outputs(self):
        input_dim = 5
        num_classes = 3
        num_classes_b = 4
        input_a = keras.layers.Input(shape=(input_dim, ), name='input_a')
        input_b = keras.layers.Input(shape=(input_dim, ), name='input_b')

        dense = keras.layers.Dense(num_classes, name='dense')
        dense2 = keras.layers.Dense(num_classes_b, name='dense2')
        dropout = keras.layers.Dropout(0.5, name='dropout')
        branch_a = [input_a, dense]
        branch_b = [input_b, dense, dense2, dropout]

        model = testing_utils.get_multi_io_model(branch_a, branch_b)

        input_a_np = np.random.random((10, input_dim)).astype(np.float32)
        input_b_np = np.random.random((10, input_dim)).astype(np.float32)

        if testing_utils.get_model_type() == 'subclass':
            with self.assertRaisesRegex(ValueError,
                                        'input shapes have not been set'):
                saving_utils.trace_model_call(model)

        model.compile(optimizer='sgd',
                      loss='mse',
                      run_eagerly=testing_utils.should_run_eagerly())
        model.fit(x=[
            np.random.random((8, input_dim)).astype(np.float32),
            np.random.random((8, input_dim)).astype(np.float32)
        ],
                  y=[
                      np.random.random((8, num_classes)).astype(np.float32),
                      np.random.random((8, num_classes_b)).astype(np.float32)
                  ],
                  epochs=2)

        fn = saving_utils.trace_model_call(model)
        signature_outputs = fn([input_a_np, input_b_np])
        outputs = model([input_a_np, input_b_np])
        if model.output_names:
            expected_outputs = {
                model.output_names[0]: outputs[0],
                model.output_names[1]: outputs[1]
            }
        else:
            expected_outputs = {'output_1': outputs[0], 'output_2': outputs[1]}
        self._assert_all_close(expected_outputs, signature_outputs)
Example #6
0
    def test_trace_features_layer(self):
        columns = [tf.feature_column.numeric_column("x")]
        model = sequential.Sequential([dense_features.DenseFeatures(columns)])
        model_input = {"x": tf.constant([[1.0]])}
        model.predict(model_input, steps=1)
        fn = saving_utils.trace_model_call(model)
        self.assertAllClose({"output_1": [[1.0]]}, fn(model_input))

        columns = [
            tf.feature_column.numeric_column("x"),
            tf.feature_column.numeric_column("y"),
        ]
        model = sequential.Sequential([dense_features.DenseFeatures(columns)])
        model_input = {"x": tf.constant([[1.0]]), "y": tf.constant([[2.0]])}
        model.predict(model_input, steps=1)
        fn = saving_utils.trace_model_call(model)
        self.assertAllClose({"output_1": [[1.0, 2.0]]}, fn(model_input))
Example #7
0
    def test_trace_features_layer(self):
        columns = [tf.feature_column.numeric_column('x')]
        model = sequential.Sequential([dense_features.DenseFeatures(columns)])
        model_input = {'x': tf.constant([[1.]])}
        model.predict(model_input, steps=1)
        fn = saving_utils.trace_model_call(model)
        self.assertAllClose({'output_1': [[1.]]}, fn(model_input))

        columns = [
            tf.feature_column.numeric_column('x'),
            tf.feature_column.numeric_column('y')
        ]
        model = sequential.Sequential([dense_features.DenseFeatures(columns)])
        model_input = {'x': tf.constant([[1.]]), 'y': tf.constant([[2.]])}
        model.predict(model_input, steps=1)
        fn = saving_utils.trace_model_call(model)
        self.assertAllClose({'output_1': [[1., 2.]]}, fn(model_input))
Example #8
0
  def test_trace_model_outputs(self):
    input_dim = 5 if testing_utils.get_model_type() == 'functional' else None
    model = testing_utils.get_small_mlp(10, 3, input_dim)
    inputs = tf.ones((8, 5))

    if input_dim is None:
      with self.assertRaisesRegex(ValueError, 'input shapes have not been set'):
        saving_utils.trace_model_call(model)
      model._set_inputs(inputs)

    fn = saving_utils.trace_model_call(model)
    signature_outputs = fn(inputs)
    if model.output_names:
      expected_outputs = {model.output_names[0]: model(inputs)}
    else:
      expected_outputs = {'output_1': model(inputs)}

    self._assert_all_close(expected_outputs, signature_outputs)
Example #9
0
    def test_trace_model_outputs_after_fitting(self):
        input_dim = 5 if test_utils.get_model_type() == 'functional' else None
        model = test_utils.get_small_mlp(10, 3, input_dim)
        model.compile(optimizer='sgd',
                      loss='mse',
                      run_eagerly=test_utils.should_run_eagerly())
        model.fit(x=np.random.random((8, 5)).astype(np.float32),
                  y=np.random.random((8, 3)).astype(np.float32),
                  epochs=2)

        inputs = tf.ones((8, 5))

        fn = saving_utils.trace_model_call(model)
        signature_outputs = fn(inputs)
        if model.output_names:
            expected_outputs = {model.output_names[0]: model(inputs)}
        else:
            expected_outputs = {'output_1': model(inputs)}

        self._assert_all_close(expected_outputs, signature_outputs)
Example #10
0
def default_save_signature(layer):
  original_losses = _reset_layer_losses(layer)
  fn = saving_utils.trace_model_call(layer)
  fn.get_concrete_function()
  _restore_layer_losses(original_losses)
  return fn
Example #11
0
def export_saved_model(model,
                       saved_model_path,
                       custom_objects=None,
                       as_text=False,
                       input_signature=None,
                       serving_only=False):
    """Exports a `tf.keras.Model` as a Tensorflow SavedModel.

  Note that at this time, subclassed models can only be saved using
  `serving_only=True`.

  The exported `SavedModel` is a standalone serialization of Tensorflow objects,
  and is supported by TF language APIs and the Tensorflow Serving system.
  To load the model, use the function
  `tf.keras.experimental.load_from_saved_model`.

  The `SavedModel` contains:

  1. a checkpoint containing the model weights.
  2. a `SavedModel` proto containing the Tensorflow backend graph. Separate
     graphs are saved for prediction (serving), train, and evaluation. If
     the model has not been compiled, then only the graph computing predictions
     will be exported.
  3. the model's json config. If the model is subclassed, this will only be
     included if the model's `get_config()` method is overwritten.

  Example:

  ```python
  import tensorflow as tf

  # Create a tf.keras model.
  model = tf.keras.Sequential()
  model.add(tf.keras.layers.Dense(1, input_shape=[10]))
  model.summary()

  # Save the tf.keras model in the SavedModel format.
  path = '/tmp/simple_keras_model'
  tf.keras.experimental.export_saved_model(model, path)

  # Load the saved keras model back.
  new_model = tf.keras.experimental.load_from_saved_model(path)
  new_model.summary()
  ```

  Args:
    model: A `tf.keras.Model` to be saved. If the model is subclassed, the flag
      `serving_only` must be set to True.
    saved_model_path: a string specifying the path to the SavedModel directory.
    custom_objects: Optional dictionary mapping string names to custom classes
      or functions (e.g. custom loss functions).
    as_text: bool, `False` by default. Whether to write the `SavedModel` proto
      in text format. Currently unavailable in serving-only mode.
    input_signature: A possibly nested sequence of `tf.TensorSpec` objects, used
      to specify the expected model inputs. See `tf.function` for more details.
    serving_only: bool, `False` by default. When this is true, only the
      prediction graph is saved.

  Raises:
    NotImplementedError: If the model is a subclassed model, and serving_only is
      False.
    ValueError: If the input signature cannot be inferred from the model.
    AssertionError: If the SavedModel directory already exists and isn't empty.
  """
    warnings.warn('`tf.keras.experimental.export_saved_model` is deprecated'
                  'and will be removed in a future version. '
                  'Please use `model.save(..., save_format="tf")` or '
                  '`tf.keras.models.save_model(..., save_format="tf")`.')
    if serving_only:
        tf.saved_model.save(model,
                            saved_model_path,
                            signatures=saving_utils.trace_model_call(
                                model, input_signature))
    else:
        _save_v1_format(model, saved_model_path, custom_objects, as_text,
                        input_signature)

    try:
        _export_model_json(model, saved_model_path)
    except NotImplementedError:
        logging.warning(
            'Skipped saving model JSON, subclassed model does not have '
            'get_config() defined.')