Esempio n. 1
0
  def test_custom_metric_model(self):
    # TODO(b/134519980): Issue with `model.fit` if the model call function uses
    # a `tf.function` in graph mode.
    if not tf.executing_eagerly():
      return

    x = np.random.random((1, 3))
    y = np.random.random((1, 4))

    class CustomMetric(keras.metrics.MeanSquaredError):
      pass

    def zero_metric(y_true, y_pred):
      del y_true, y_pred
      return 0

    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    model.compile(loss='mse', optimizer='SGD',
                  metrics=[CustomMetric(), zero_metric])
    model.fit(x, y)
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')

    with self.assertRaisesRegex(ValueError, 'custom_objects'):
      keras_load.load(saved_model_dir)

    with generic_utils.CustomObjectScope(
        {'CustomMetric': CustomMetric, 'zero_metric': zero_metric}):
      loaded = keras_load.load(saved_model_dir)

    self.evaluate([v.initializer for v in loaded.variables])
    loaded.fit(x, y)
Esempio n. 2
0
    def test_load_model_with_name_conflict_raises_error(self):
        class LinearModel(SubclassedModelWithConfig):
            pass

        model = LinearModel(2, 3)
        model(np.random.random((5, 10)).astype(np.float32))
        model.save(self.path, save_format='tf')
        with self.assertRaisesRegex(
                RuntimeError,
                'Unable to restore object of class \'LinearModel\''):
            keras_load.load(self.path, compile=True)
Esempio n. 3
0
  def test_custom_metric_model(self):

    class CustomMetric(keras.metrics.MeanSquaredError):
      pass

    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    model.compile(loss='mse', optimizer='rmsprop', metrics=[CustomMetric()])

    saved_model_dir = self._save_model_dir()
    tf.saved_model.save(model, saved_model_dir)
    with self.assertRaisesRegex(ValueError, 'custom_objects'):
      keras_load.load(saved_model_dir)

    keras_load.load(saved_model_dir, compile=False)
Esempio n. 4
0
    def _test_metric_save_and_load(self,
                                   metric,
                                   save_dir,
                                   num_tensor_args,
                                   shape=(1, 5),
                                   test_sample_weight=True):
        with self.cached_session():
            tf.saved_model.save(metric, save_dir)
            loaded = keras_load.load(save_dir)
            self.evaluate([v.initializer for v in loaded.variables])
            self.assertEqual(metric.name, loaded.name)
            self.assertEqual(metric.dtype, loaded.dtype)

            inputs = self.generate_inputs(num_tensor_args, shape)
            actual = self.evaluate(metric(*inputs))
            self.assertAllClose(actual, loaded(*inputs))
            self.assertAllClose(metric.variables, loaded.variables)

            # Test with separate calls to update state and result.
            inputs = self.generate_inputs(num_tensor_args, shape)
            self.evaluate(metric.update_state(*inputs))
            self.evaluate(loaded.update_state(*inputs))
            actual = self.evaluate(metric.result())
            self.assertAllClose(actual, loaded.result())

            if test_sample_weight:
                # Test with sample weights input.
                inputs = self.generate_inputs(num_tensor_args, shape)
                sample_weight = self.generate_inputs(1, [])[0]
                inputs.append(sample_weight)

                actual = self.evaluate(metric(*inputs))
                self.assertAllClose(actual, loaded(*inputs))
            return loaded
Esempio n. 5
0
  def testSaveLayerMultipleInputs(self):
    class CustomLayer(keras.layers.Layer):

      def call(self, *input_list):
        self.add_loss(input_list[-2] * 2, inputs=True)
        return sum(input_list[:-1])  # The test's last input is a non-tensor arg

    # TODO(b/175902133): Models only support one input argument. Also, create a
    # subclassed model because functional/sequential models still have funky
    # behavior when calling with multiple non-nested arguments.
    class CustomModel(keras.Model):

      def build(self, _):
        self.layer = CustomLayer()

      def call(self, inputs):
        inputs = inputs[:]
        inputs.append(object())  # Test that the layer handles non-tensor inputs
        return self.layer(*inputs)

    model = CustomModel()
    inp = [tf.constant(i, shape=[1, 1], dtype=tf.float32)
           for i in range(1, 5)]
    expected = model(inp)
    expected_loss = model.get_losses_for(inp)
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')
    loaded = keras_load.load(saved_model_dir)
    actual = loaded(inp)
    actual_loss = loaded.get_losses_for(inp)
    self.assertAllEqual(self.evaluate(expected),
                        self.evaluate(actual))
    self.assertAllEqual(self.evaluate(expected_loss),
                        self.evaluate(actual_loss))
Esempio n. 6
0
    def test_trainable_weights(self):
        layer = keras.layers.Dense(4, name='custom_layer')
        layer.build([
            3,
        ])
        layer.add_weight('extra_weight',
                         shape=[],
                         initializer=tf.compat.v1.constant_initializer(11),
                         trainable=True)
        layer.add_weight('extra_weight_2',
                         shape=[],
                         initializer=tf.compat.v1.constant_initializer(12),
                         trainable=False)

        saved_model_dir = self._save_model_dir()
        self.evaluate(tf.compat.v1.variables_initializer(layer.variables))
        tf.saved_model.save(layer, saved_model_dir)
        loaded = keras_load.load(saved_model_dir)
        self.evaluate(tf.compat.v1.variables_initializer(loaded.variables))

        equal_attrs = ['name', '_expects_training_arg', 'trainable']
        for attr in equal_attrs:
            self.assertEqual(getattr(layer, attr), getattr(loaded, attr))

        all_close = ['weights', 'trainable_weights', 'non_trainable_weights']
        for attr in all_close:
            self.assertAllClose(self.evaluate(getattr(layer, attr)),
                                self.evaluate(getattr(loaded, attr)))
Esempio n. 7
0
 def test_revived_model_has_save_spec(self):
     model = SubclassedModelWithConfig(2, 3)
     model.predict(np.random.random((5, 10)).astype(np.float32))
     model.save(self.path, save_format='tf')
     revived = keras_load.load(self.path, compile=True)
     self.assertAllEqual(model._get_save_spec(dynamic_batch=False),
                         revived._get_save_spec(dynamic_batch=False))
Esempio n. 8
0
  def testSaveConvLSTM2D(self, stateful):
    data_format = 'channels_first'
    batch, timesteps, channels, rows, cols = 12, 10, 8, 4, 4
    input_arr = np.ones(
        (batch, timesteps, channels, rows, cols)).astype('float32')
    layer = keras.layers.ConvLSTM2D(
        filters=16, kernel_size=(1, 1), data_format=data_format,
        stateful=stateful)
    x = keras.Input(batch_shape=(batch, timesteps, channels, rows, cols))
    y = layer(x)
    model = keras.Model(x, y)

    predict_1 = model(input_arr)
    self.evaluate([v.initializer for v in model.variables])
    saved_model_dir = self._save_model_dir()

    model.save(saved_model_dir, save_format='tf')
    del model

    loaded = keras_load.load(saved_model_dir)
    self.evaluate([v.initializer for v in loaded.variables])
    if stateful:
      loaded.reset_states()
    predict_2 = loaded(input_arr)
    self.assertAllClose(predict_1, predict_2)
Esempio n. 9
0
  def testDisablingBatchNormTrainableBeforeSaving(self):
    # We disable trainable on the batchnorm layers before saving
    model = keras.models.Sequential(
        keras.layers.BatchNormalization(input_shape=(1,)))
    model.trainable = False
    self.evaluate(tf.compat.v1.variables_initializer(model.variables))
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')
    loaded = keras_load.load(saved_model_dir)
    self.evaluate(tf.compat.v1.variables_initializer(loaded.variables))
    input_arr = tf.constant([[11], [12], [13]], dtype=tf.float32)
    input_arr2 = tf.constant([[14], [15], [16]], dtype=tf.float32)
    self.assertAllClose(self.evaluate(loaded.layers[-1].moving_mean), [0])

    # Trainable should still be disabled after loading
    self.evaluate(loaded(input_arr, training=True))
    if not tf.executing_eagerly():
      self.evaluate(loaded.get_updates_for(input_arr))
    self.assertAllClose(self.evaluate(loaded.layers[-1].moving_mean), [0.0])

    # Re-enabling trainable on the loaded model should cause the batchnorm
    # layer to start training again.
    # Note: this only works in v2.
    if tf.executing_eagerly():
      loaded.trainable = True
      self.evaluate(loaded(input_arr, training=True))
      self.assertAllClose(self.evaluate(loaded.layers[-1].moving_mean), [0.12])

      self.evaluate(loaded(input_arr2, training=False))
      self.assertAllClose(self.evaluate(loaded.layers[-1].moving_mean), [0.12])
Esempio n. 10
0
 def test_must_restore_from_config_registration(self):
   layer = GlobalLayerThatShouldFailIfNotAdded()
   saved_model_dir = self._save_model_dir()
   model = testing_utils.get_model_from_layers(
       [layer], input_shape=[3], model_type='functional')
   model.save(saved_model_dir, save_format='tf')
   _ = keras_load.load(saved_model_dir)
Esempio n. 11
0
  def test_metadata_input_spec(self):
    class LayerWithNestedSpec(keras.layers.Layer):

      def __init__(self):
        super(LayerWithNestedSpec, self).__init__()
        self.input_spec = {
            'a': keras.layers.InputSpec(max_ndim=3, axes={-1: 2}),
            'b': keras.layers.InputSpec(shape=(None, 2, 3), dtype='int32')}

      @property
      def _use_input_spec_as_call_signature(self):
        return True

    layer = LayerWithNestedSpec()
    saved_model_dir = self._save_model_dir()
    model = testing_utils.get_model_from_layers(
        [layer], model_type='subclass')
    model({'a': tf.constant([[2, 4]]),
           'b': tf.ones([1, 2, 3], dtype=tf.int32)})
    model.save(saved_model_dir, save_format='tf')
    loaded_model = keras_load.load(saved_model_dir)
    loaded = loaded_model.layers[-1]
    self.assertEqual(3, loaded.input_spec['a'].max_ndim)
    self.assertEqual({-1: 2}, loaded.input_spec['a'].axes)
    self.assertAllEqual([None, 2, 3], loaded.input_spec['b'].shape)
    self.assertEqual('int32', loaded.input_spec['b'].dtype)
Esempio n. 12
0
  def testBatchNormUpdates(self):
    model = keras.models.Sequential(
        keras.layers.BatchNormalization(input_shape=(1,)))
    self.evaluate(tf.compat.v1.variables_initializer(model.variables))
    saved_model_dir = self._save_model_dir()

    with self.captureWritesToStream(sys.stderr) as captured_logs:
      model.save(saved_model_dir, save_format='tf')
      loaded = keras_load.load(saved_model_dir)

    # Assert that saving does not log deprecation warnings
    # (even if it needs to set learning phase for compat reasons)
    if tf.executing_eagerly():
      self.assertNotIn('deprecated', captured_logs.contents())

    input_arr = tf.constant([[11], [12], [13]], dtype=tf.float32)
    input_arr2 = tf.constant([[14], [15], [16]], dtype=tf.float32)
    self.assertAllClose(self.evaluate(loaded.layers[-1].moving_mean), [0])

    self.evaluate(loaded(input_arr, training=True))
    if not tf.executing_eagerly():
      self.evaluate(loaded.get_updates_for(input_arr))
    self.assertAllClose(self.evaluate(loaded.layers[-1].moving_mean), [0.12])

    self.evaluate(loaded(input_arr2, training=False))
    if not tf.executing_eagerly():
      self.evaluate(loaded.get_updates_for(input_arr2))
    self.assertAllClose(self.evaluate(loaded.layers[-1].moving_mean), [0.12])
Esempio n. 13
0
  def test_compiled_model(self):
    # TODO(b/134519980): Issue with model.fit if the model call function uses
    # a tf.function (Graph mode only).
    if not tf.executing_eagerly():
      return

    input_arr = np.random.random((1, 3))
    target_arr = np.random.random((1, 4))

    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    expected_predict = model.predict(input_arr)

    # Compile and save model.
    model.compile('rmsprop', 'mse')
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')

    loaded = keras_load.load(saved_model_dir)
    actual_predict = loaded.predict(input_arr)
    self.assertAllClose(expected_predict, actual_predict)

    loss_before = loaded.evaluate(input_arr, target_arr)
    loaded.fit(input_arr, target_arr)
    loss_after = loaded.evaluate(input_arr, target_arr)
    self.assertLess(loss_after, loss_before)
    predict = loaded.predict(input_arr)

    ckpt_path = os.path.join(self.get_temp_dir(), 'weights')
    loaded.save_weights(ckpt_path)

    # Ensure that the checkpoint is compatible with the original model.
    model.load_weights(ckpt_path)
    self.assertAllClose(predict, model.predict(input_arr))
Esempio n. 14
0
    def test_revived_sequential(self):
        model = keras.models.Sequential()
        model.add(
            keras.layers.Dense(5,
                               input_shape=(3, ),
                               kernel_regularizer=regularizers.get('l2')))
        model.add(
            keras.layers.Dense(2, kernel_regularizer=regularizers.get('l2')))

        self.evaluate(tf.compat.v1.variables_initializer(model.variables))

        saved_model_dir = self._save_model_dir()
        model.save(saved_model_dir, save_format='tf')
        loaded = keras_load.load(saved_model_dir)

        self.assertLen(loaded.layers, 2)
        self.assertLen(loaded.losses, 2)

        loaded.pop()

        self.assertLen(loaded.layers, 1)
        self.assertLen(loaded.losses, 1)

        loaded.add(
            keras.layers.Dense(2, kernel_regularizer=regularizers.get('l2')))

        self.assertLen(loaded.layers, 2)
        self.assertLen(loaded.losses, 2)
Esempio n. 15
0
  def testSaveWithRaggedInputs(self):

    class EmbeddingMerger(keras.layers.Layer):

      def __init__(self, list_features, **kwargs):
        super().__init__(**kwargs)
        self._supports_ragged_inputs = True
        self.embeddings = {
            feature: keras.layers.Embedding(10, 3) for feature in list_features}
        self.mean = keras.layers.Lambda(
            tf.reduce_mean, arguments=dict(axis=1))

      def call(self, inputs):
        tensors = [self.embeddings[col](inputs[col]) for col in inputs]
        tensors = [self.mean(inp) for inp in tensors]
        return keras.layers.Add()(tensors)

    list_features = ['feature_1', 'feature_2']
    feature_1 = tf.ragged.constant([[0.], [1, 3]])
    feature_2 = tf.ragged.constant([[1., 2], [4]])
    f = {'feature_1': feature_1,
         'feature_2': feature_2}
    f_inputs = {
        'feature_1': keras.Input(shape=(None,), name='feature_1', ragged=True),
        'feature_2': keras.Input(shape=(None,), name='feature_2', ragged=True)}

    out = EmbeddingMerger(list_features)(f_inputs)
    model = keras.Model(f_inputs, out)
    self.evaluate(tf.compat.v1.variables_initializer(model.variables))
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')

    loaded = keras_load.load(saved_model_dir)
    self.evaluate(tf.compat.v1.variables_initializer(loaded.variables))
    self.assertAllClose(model.predict(f), loaded.predict(f))
Esempio n. 16
0
    def test_revive(self):
        input_shape = None
        if testing_utils.get_model_type() == 'functional':
            input_shape = (2, 3)

        layer_with_config = CustomLayerWithConfig(1., 2)
        layer_without_config = CustomLayerNoConfig(3., 4)
        subclassed_with_config = SubclassedModelWithConfig(4., 6.)
        subclassed_without_config = SubclassedModelNoConfig(7., 8.)

        inputs = keras.Input((2, 3))
        x = CustomLayerWithConfig(1., 2)(inputs)
        x = CustomLayerNoConfig(3., 4)(x)
        x = SubclassedModelWithConfig(4., 6.)(x)
        x = SubclassedModelNoConfig(7., 8.)(x)
        inner_model_functional = keras.Model(inputs, x)

        inner_model_sequential = keras.Sequential([
            CustomLayerWithConfig(1., 2),
            CustomLayerNoConfig(3., 4),
            SubclassedModelWithConfig(4., 6.),
            SubclassedModelNoConfig(7., 8.)
        ])

        class SubclassedModel(keras.Model):
            def __init__(self):
                super(SubclassedModel, self).__init__()
                self.all_layers = [
                    CustomLayerWithConfig(1., 2),
                    CustomLayerNoConfig(3., 4),
                    SubclassedModelWithConfig(4., 6.),
                    SubclassedModelNoConfig(7., 8.)
                ]

            def call(self, inputs):
                x = inputs
                for layer in self.all_layers:
                    x = layer(x)
                return x

        inner_model_subclassed = SubclassedModel()

        layers = [
            layer_with_config, layer_without_config, subclassed_with_config,
            subclassed_without_config, inner_model_functional,
            inner_model_sequential, inner_model_subclassed
        ]
        model = testing_utils.get_model_from_layers(layers,
                                                    input_shape=input_shape)
        # Run data through the Model to create save spec and weights.
        model.predict(np.ones((10, 2, 3)), batch_size=10)

        # Test that the correct checkpointed values are loaded, whether the layer is
        # created from the config or SavedModel.
        layer_with_config.c.assign(2 * layer_with_config.c)
        layer_without_config.c.assign(3 * layer_without_config.c)

        model.save(self.path, save_format='tf')
        revived = keras_load.load(self.path)
        self._assert_revived_correctness(model, revived)
Esempio n. 17
0
 def test_revive_subclassed_with_nested_model(self):
     model = SubclassedModelNoConfig(1., 2.)
     # Run data through the Model to create save spec and weights.
     model.predict(np.ones((10, 2, 3)), batch_size=10)
     model.save(self.path, save_format='tf')
     revived = keras_load.load(self.path)
     self._assert_revived_correctness(model, revived)
Esempio n. 18
0
  def test_load_with_partially_failed_serialization(self):

    class BadCustomLayer(keras.layers.Layer):

      def __call__(self, inputs):
        return inputs

    class Model(keras.models.Model):

      def __init__(self):
        super(Model, self).__init__()
        self.layer = BadCustomLayer()

      @tf.function(
          input_signature=[tf.TensorSpec([None, 1])])
      def call(self, inputs):
        return self.layer(inputs)

    model = Model()
    inp = tf.constant([[1.0]])
    model(inp)
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')

    loaded = keras_load.load(saved_model_dir)
    self.assertAllEqual([[1.0]], self.evaluate(loaded(inp)))
    with self.assertRaisesRegex(ValueError, 'call function was not serialized'):
      loaded.layer(inp)
Esempio n. 19
0
  def testSaveMultipleInputs(self):
    class CustomLayer(keras.layers.Layer):

      def call(self, *input_list):
        self.add_loss(input_list[-2] * 2, inputs=True)
        return sum(input_list[:-1])  # The test's last input is a non-tensor arg

    class CustomModel(keras.Model):

      def build(self, _):
        self.layer = CustomLayer()

      def call(self, *inputs):
        inputs = list(inputs)
        inputs.append(object())  # Test that the layer handles non-tensor inputs
        return self.layer(*inputs)

    model = CustomModel()
    inp = [tf.constant(i, shape=[1, 1], dtype=tf.float32)
           for i in range(1, 5)]
    expected = model(*inp)
    expected_loss = model.get_losses_for(inp)
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')
    loaded = keras_load.load(saved_model_dir)
    actual = loaded(*inp)
    actual_loss = loaded.get_losses_for(inp)
    self.assertAllEqual(self.evaluate(expected),
                        self.evaluate(actual))
    self.assertAllEqual(self.evaluate(expected_loss),
                        self.evaluate(actual_loss))
Esempio n. 20
0
  def test_wrapped_layer_training(self):
    class Custom(keras.models.Model):

      def __init__(self):
        super(Custom, self).__init__()
        self.layer = LayerWithLearningPhase()

      def call(self, inputs):
        return self.layer(inputs)
    model = Custom()
    x = tf.constant(1., shape=[1, 1])
    expected_default = model(x)
    expected_training_true = model(x, training=True)
    expected_training_false = model(x, training=False)
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')

    def assert_loaded_model(loaded):
      actual_default = loaded(x)
      actual_training_true = loaded(x, training=True)
      actual_training_false = loaded(x, training=False)
      self.assertAllClose(
          [expected_default, expected_training_true, expected_training_false],
          [actual_default, actual_training_true, actual_training_false])

    assert_loaded_model(keras_load.load(saved_model_dir))
    assert_loaded_model(tf.saved_model.load(saved_model_dir))
Esempio n. 21
0
  def test_trainable_weights(self):
    """Tests that trainable status of individual weights is preserved."""
    layer = keras.layers.Dense(4, name='custom_layer')
    layer.build([None, 3])
    layer.add_weight(
        'extra_weight', shape=[],
        initializer=tf.compat.v1.constant_initializer(11),
        trainable=True)
    layer.add_weight(
        'extra_weight_2', shape=[],
        initializer=tf.compat.v1.constant_initializer(12),
        trainable=False)
    model = keras.Sequential([keras.Input([3,]), layer])

    saved_model_dir = self._save_model_dir()
    self.evaluate(tf.compat.v1.variables_initializer(layer.variables))
    model.save(saved_model_dir, save_format='tf')
    loaded_model = keras_load.load(saved_model_dir)
    self.evaluate(tf.compat.v1.variables_initializer(loaded_model.variables))

    loaded = loaded_model.layers[-1]

    equal_attrs = ['name', '_expects_training_arg', 'trainable']
    for attr in equal_attrs:
      self.assertEqual(getattr(layer, attr), getattr(loaded, attr))

    all_close = ['weights', 'trainable_weights', 'non_trainable_weights']
    for attr in all_close:
      self.assertAllClose(self.evaluate(getattr(layer, attr)),
                          self.evaluate(getattr(loaded, attr)))
Esempio n. 22
0
 def test_revive_unregistered_sequential(self):
     model = UnregisteredCustomSequentialModel()
     x = np.random.random((2, 2, 3)).astype(np.float32)
     model(x)
     model.save(self.path, save_format='tf')
     revived = keras_load.load(self.path)
     self._assert_revived_correctness(model, revived)
Esempio n. 23
0
  def testSaveStatefulRNN(self, unroll):
    batch = 12
    timesteps = 10
    input_dim = 8
    input_arr = np.ones((batch, timesteps, input_dim)).astype('float32')

    cells = [keras.layers.LSTMCell(32), keras.layers.LSTMCell(64)]
    if unroll:
      x = keras.Input(batch_shape=(batch, timesteps, input_dim))
    else:
      x = keras.Input(batch_shape=(batch, None, input_dim))
    layer = keras.layers.RNN(cells, stateful=True, unroll=unroll)
    y = layer(x)

    model = keras.Model(x, y)
    model.compile('rmsprop', 'mse',
                  run_eagerly=testing_utils.should_run_eagerly())
    model.train_on_batch(
        np.zeros((batch, timesteps, input_dim)).astype('float32'),
        np.zeros((batch, 64)).astype('float32'))

    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')

    loaded = keras_load.load(saved_model_dir)
    loaded_layer = loaded.layers[1]

    if not tf.executing_eagerly():
      keras.backend.get_session()  # force variable initialization

    self.assertAllClose(layer.states, loaded_layer.states)
    self.assertAllClose(model(input_arr), loaded(input_arr))
Esempio n. 24
0
  def testReviveFunctionalModel(self):

    class CustomAdd(keras.layers.Add):

      def build(self, input_shape):
        self.w = self.add_weight('w', shape=[])
        super(CustomAdd, self).build(input_shape)

      def call(self, inputs):
        outputs = super(CustomAdd, self).call(inputs)
        return outputs * self.w

    input1 = keras.layers.Input(shape=(None, 3), name='input_1')
    input2 = keras.layers.Input(shape=(None, 3), name='input_2')

    d = keras.layers.Dense(4, name='dense_with_two_inbound_nodes')
    output1 = d(input1)
    output2 = d(input2)

    # Use a custom layer in this model to ensure that layers aren't being
    # recreated directly from the config.
    outputs = CustomAdd(name='custom')([output1, output2])
    model = keras.models.Model([input1, input2], outputs, name='save_model')

    self.evaluate(tf.compat.v1.variables_initializer(model.variables))
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')

    loaded = keras_load.load(saved_model_dir)
    self.assertEqual('save_model', loaded.name)
    self.assertLen(
        loaded.get_layer('dense_with_two_inbound_nodes')._inbound_nodes, 2)
    self.assertEqual('CustomAdd', type(loaded.get_layer('custom')).__name__)
    self.assertLen(loaded.get_layer('custom').weights, 1)
Esempio n. 25
0
 def test_revive_subclassed_with_sparse_model(self):
     model = SubclassedSparseModelNoConfig(1., 2.)
     # Run data through the Model to create save spec and weights.
     x = tf.sparse.from_dense(np.ones((10, 2, 3), dtype=np.float32))
     model.predict(x, batch_size=10)
     model.save(self.path, save_format='tf')
     revived = keras_load.load(self.path)
     self._assert_revived_correctness(model, revived)
Esempio n. 26
0
    def test_custom_metric_model(self):
        class CustomMetric(keras.metrics.MeanSquaredError):
            pass

        with self.cached_session():
            metric = CustomMetric()
            model = testing_utils.get_small_mlp(1, 4, input_dim=3)
            model.compile(loss='mse', optimizer='rmsprop', metrics=[metric])
            self.evaluate(tf.compat.v1.global_variables_initializer())
            self.evaluate([v.initializer for v in metric.variables])

            saved_model_dir = self._save_model_dir()
            tf.saved_model.save(model, saved_model_dir)
        with self.assertRaisesRegex(ValueError, 'custom_objects'):
            keras_load.load(saved_model_dir)

        keras_load.load(saved_model_dir, compile=False)
Esempio n. 27
0
  def test_save_without_tracing(self):

    class DoNotTrace(keras.layers.Layer):

      def __init__(self):
        super(DoNotTrace, self).__init__()
        self.input_spec = keras.layers.InputSpec(shape=[None])
        self.built = True

      def call(self, inputs):
        raise ValueError('I said do not trace')

      def get_config(self):
        return {}

      @property
      def _use_input_spec_as_call_signature(self):
        return True

    root = keras.models.Sequential()
    root.add(keras.layers.Input(shape=(3,)))
    root.attached_layer = DoNotTrace()

    saved_model_dir = self._save_model_dir()

    # With the default settings, the call function is traced.
    with self.assertRaisesRegex(ValueError, 'do not trace'):
      root.save(saved_model_dir, save_format='tf')

    # When saving the config only, the layer call function should not be not
    # traced.
    root.save(saved_model_dir, save_format='tf', save_traces=False)
    loaded = tf.saved_model.load(saved_model_dir)
    self.assertTrue(hasattr(loaded, 'attached_layer'))

    # This should raise an error when loaded without the custom object
    loaded = keras_load.load(saved_model_dir)
    with self.assertRaisesRegex(ValueError, 'Cannot call custom layer'):
      loaded.attached_layer(tf.constant([1.]))

    # Try loading with the custom objects
    with generic_utils.CustomObjectScope({'DoNotTrace': DoNotTrace}):
      loaded = keras_load.load(saved_model_dir)
    with self.assertRaisesRegex(ValueError, 'I said do not trace'):
      loaded.attached_layer(tf.constant([1.]))
Esempio n. 28
0
File: save.py Progetto: xhh315/keras
def load_model(filepath, custom_objects=None, compile=True, options=None):  # pylint: disable=redefined-builtin
    """Loads a model saved via `model.save()`.

  Usage:

  >>> model = tf.keras.Sequential([
  ...     tf.keras.layers.Dense(5, input_shape=(3,)),
  ...     tf.keras.layers.Softmax()])
  >>> model.save('/tmp/model')
  >>> loaded_model = tf.keras.models.load_model('/tmp/model')
  >>> x = tf.random.uniform((10, 3))
  >>> assert np.allclose(model.predict(x), loaded_model.predict(x))

  Note that the model weights may have different scoped names after being
  loaded. Scoped names include the model/layer names, such as
  `"dense_1/kernel:0"`. It is recommended that you use the layer properties to
  access specific variables, e.g. `model.get_layer("dense_1").kernel`.

  Args:
      filepath: One of the following:
          - String or `pathlib.Path` object, path to the saved model
          - `h5py.File` object from which to load the model
      custom_objects: Optional dictionary mapping names
          (strings) to custom classes or functions to be
          considered during deserialization.
      compile: Boolean, whether to compile the model
          after loading.
      options: Optional `tf.saved_model.LoadOptions` object that specifies
        options for loading from SavedModel.

  Returns:
      A Keras model instance. If the original model was compiled, and saved with
      the optimizer, then the returned model will be compiled. Otherwise, the
      model will be left uncompiled. In the case that an uncompiled model is
      returned, a warning is displayed if the `compile` argument is set to
      `True`.

  Raises:
      ImportError: if loading from an hdf5 file and h5py is not available.
      IOError: In case of an invalid savefile.
  """
    with generic_utils.SharedObjectLoadingScope():
        with generic_utils.CustomObjectScope(custom_objects or {}):
            with load_context.load_context(options):
                if (h5py is not None and (isinstance(filepath, h5py.File)
                                          or h5py.is_hdf5(filepath))):
                    return hdf5_format.load_model_from_hdf5(
                        filepath, custom_objects, compile)

                filepath = path_to_string(filepath)
                if isinstance(filepath, six.string_types):
                    loader_impl.parse_saved_model(filepath)
                    return saved_model_load.load(filepath, compile, options)

    raise IOError(
        'Unable to load model. Filepath is not an hdf5 file (or h5py is not '
        'available) or SavedModel.')
Esempio n. 29
0
    def test_must_restore_from_config_custom_object_scope(self):
        class LayerThatShouldFailIfNotAdded(keras.layers.Layer):
            _must_restore_from_config = True

        layer = LayerThatShouldFailIfNotAdded()
        saved_model_dir = self._save_model_dir()
        tf.saved_model.save(layer, saved_model_dir)
        with generic_utils.CustomObjectScope(
            {'LayerThatShouldFailIfNotAdded': LayerThatShouldFailIfNotAdded}):
            _ = keras_load.load(saved_model_dir)
Esempio n. 30
0
 def test_revive_sequential_inputs(self):
     model = keras.models.Sequential([
         keras.Input((None, ), dtype=tf.string),
         keras.layers.Lambda(tf.strings.lower)
     ])
     model.save(self.path, save_format='tf')
     revived = keras_load.load(self.path)
     revived_layers = list(
         revived._flatten_layers(include_self=False, recursive=False))
     self.assertEqual(tf.string, revived_layers[0].dtype)