Пример #1
0
    def test_Bidirectional_with_constants_layer_passing_initial_state(self):
        with self.cached_session():
            # Test basic case.
            x = keras.Input((5, 5))
            c = keras.Input((3, ))
            s_for = keras.Input((32, ))
            s_bac = keras.Input((32, ))
            cell = _RNNCellWithConstants(32, 3)
            custom_objects = {"_RNNCellWithConstants": _RNNCellWithConstants}
            with generic_utils.CustomObjectScope(custom_objects):
                layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
            y = layer(x, initial_state=[s_for, s_bac], constants=c)
            model = keras.Model([x, s_for, s_bac, c], y)
            model.compile(optimizer="rmsprop", loss="mse")
            model.train_on_batch(
                [
                    np.zeros((6, 5, 5)),
                    np.zeros((6, 32)),
                    np.zeros((6, 32)),
                    np.zeros((6, 3)),
                ],
                np.zeros((6, 64)),
            )

            # Test basic case serialization.
            x_np = np.random.random((6, 5, 5))
            s_fw_np = np.random.random((6, 32))
            s_bk_np = np.random.random((6, 32))
            c_np = np.random.random((6, 3))
            y_np = model.predict([x_np, s_fw_np, s_bk_np, c_np])
            weights = model.get_weights()
            config = layer.get_config()

            with generic_utils.CustomObjectScope(custom_objects):
                layer = keras.layers.Bidirectional.from_config(
                    copy.deepcopy(config))
            y = layer(x, initial_state=[s_for, s_bac], constants=c)
            model = keras.Model([x, s_for, s_bac, c], y)
            model.set_weights(weights)
            y_np_2 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
            self.assertAllClose(y_np, y_np_2, atol=1e-4)

            # Verify that state is used
            y_np_2_different_s = model.predict(
                [x_np, s_fw_np + 10.0, s_bk_np + 10.0, c_np])
            assert np.mean(y_np - y_np_2_different_s) != 0

            # Test flat list inputs
            with generic_utils.CustomObjectScope(custom_objects):
                layer = keras.layers.Bidirectional.from_config(
                    copy.deepcopy(config))
            y = layer([x, s_for, s_bac, c])
            model = keras.Model([x, s_for, s_bac, c], y)
            model.set_weights(weights)
            y_np_3 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
            self.assertAllClose(y_np, y_np_3, atol=1e-4)
Пример #2
0
  def test_custom_metric_model(self):
    # TODO(b/134519980): Issue with `model.fit` if the model call function uses
    # a `tf.function` in graph mode.
    if not tf.executing_eagerly():
      return

    x = np.random.random((1, 3))
    y = np.random.random((1, 4))

    class CustomMetric(keras.metrics.MeanSquaredError):
      pass

    def zero_metric(y_true, y_pred):
      del y_true, y_pred
      return 0

    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    model.compile(loss='mse', optimizer='SGD',
                  metrics=[CustomMetric(), zero_metric])
    model.fit(x, y)
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')

    with self.assertRaisesRegex(ValueError, 'custom_objects'):
      keras_load.load(saved_model_dir)

    with generic_utils.CustomObjectScope(
        {'CustomMetric': CustomMetric, 'zero_metric': zero_metric}):
      loaded = keras_load.load(saved_model_dir)

    self.evaluate([v.initializer for v in loaded.variables])
    loaded.fit(x, y)
Пример #3
0
  def test_custom_metric(self, base_cls, num_tensor_args, requires_build):

    class CustomMetric(base_cls):

      def update_state(self, *args):  # pylint: disable=useless-super-delegation
        # Sometimes built-in metrics return an op in update_state. Custom
        # metrics don't support returning ops, so wrap the update_state method
        # while returning nothing.
        super(CustomMetric, self).update_state(*args)

    with self.cached_session():
      metric = CustomMetric()
      save_dir = self._save_model_dir('first_save')

      if requires_build:
        metric(*self.generate_inputs(num_tensor_args))  # pylint: disable=not-callable

      self.evaluate([v.initializer for v in metric.variables])

      with self.assertRaisesRegex(ValueError,
                                  'Unable to restore custom object'):
        self._test_metric_save_and_load(metric, save_dir, num_tensor_args)
      with generic_utils.CustomObjectScope({'CustomMetric': CustomMetric}):
        loaded = self._test_metric_save_and_load(
            metric,
            save_dir,
            num_tensor_args,
            test_sample_weight=False)

        self._test_metric_save_and_load(
            loaded,
            self._save_model_dir('second_save'),
            num_tensor_args,
            test_sample_weight=False)
Пример #4
0
def load_model(filepath, custom_objects=None, compile=True, options=None):  # pylint: disable=redefined-builtin
    """Loads a model saved via `model.save()`.

  Usage:

  >>> model = tf.keras.Sequential([
  ...     tf.keras.layers.Dense(5, input_shape=(3,)),
  ...     tf.keras.layers.Softmax()])
  >>> model.save('/tmp/model')
  >>> loaded_model = tf.keras.models.load_model('/tmp/model')
  >>> x = tf.random.uniform((10, 3))
  >>> assert np.allclose(model.predict(x), loaded_model.predict(x))

  Note that the model weights may have different scoped names after being
  loaded. Scoped names include the model/layer names, such as
  `"dense_1/kernel:0"`. It is recommended that you use the layer properties to
  access specific variables, e.g. `model.get_layer("dense_1").kernel`.

  Args:
      filepath: One of the following:
          - String or `pathlib.Path` object, path to the saved model
          - `h5py.File` object from which to load the model
      custom_objects: Optional dictionary mapping names
          (strings) to custom classes or functions to be
          considered during deserialization.
      compile: Boolean, whether to compile the model
          after loading.
      options: Optional `tf.saved_model.LoadOptions` object that specifies
        options for loading from SavedModel.

  Returns:
      A Keras model instance. If the original model was compiled, and saved with
      the optimizer, then the returned model will be compiled. Otherwise, the
      model will be left uncompiled. In the case that an uncompiled model is
      returned, a warning is displayed if the `compile` argument is set to
      `True`.

  Raises:
      ImportError: if loading from an hdf5 file and h5py is not available.
      IOError: In case of an invalid savefile.
  """
    with generic_utils.SharedObjectLoadingScope():
        with generic_utils.CustomObjectScope(custom_objects or {}):
            with load_context.load_context(options):
                if (h5py is not None and (isinstance(filepath, h5py.File)
                                          or h5py.is_hdf5(filepath))):
                    return hdf5_format.load_model_from_hdf5(
                        filepath, custom_objects, compile)

                filepath = path_to_string(filepath)
                if isinstance(filepath, six.string_types):
                    loader_impl.parse_saved_model(filepath)
                    return saved_model_load.load(filepath, compile, options)

    raise IOError(
        'Unable to load model. Filepath is not an hdf5 file (or h5py is not '
        'available) or SavedModel.')
Пример #5
0
    def test_must_restore_from_config_custom_object_scope(self):
        class LayerThatShouldFailIfNotAdded(keras.layers.Layer):
            _must_restore_from_config = True

        layer = LayerThatShouldFailIfNotAdded()
        saved_model_dir = self._save_model_dir()
        tf.saved_model.save(layer, saved_model_dir)
        with generic_utils.CustomObjectScope(
            {'LayerThatShouldFailIfNotAdded': LayerThatShouldFailIfNotAdded}):
            _ = keras_load.load(saved_model_dir)
Пример #6
0
    def test_Bidirectional_with_constants(self):
        with self.cached_session():
            # Test basic case.
            x = keras.Input((5, 5))
            c = keras.Input((3, ))
            cell = _RNNCellWithConstants(32, 3)
            custom_objects = {"_RNNCellWithConstants": _RNNCellWithConstants}
            with generic_utils.CustomObjectScope(custom_objects):
                layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
            y = layer(x, constants=c)
            model = keras.Model([x, c], y)
            model.compile(optimizer="rmsprop", loss="mse")
            model.train_on_batch([np.zeros(
                (6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 64)))

            # Test basic case serialization.
            x_np = np.random.random((6, 5, 5))
            c_np = np.random.random((6, 3))
            y_np = model.predict([x_np, c_np])
            weights = model.get_weights()
            config = layer.get_config()

            with generic_utils.CustomObjectScope(custom_objects):
                layer = keras.layers.Bidirectional.from_config(
                    copy.deepcopy(config))
            y = layer(x, constants=c)
            model = keras.Model([x, c], y)
            model.set_weights(weights)
            y_np_2 = model.predict([x_np, c_np])
            self.assertAllClose(y_np, y_np_2, atol=1e-4)

            # Test flat list inputs
            with generic_utils.CustomObjectScope(custom_objects):
                layer = keras.layers.Bidirectional.from_config(
                    copy.deepcopy(config))
            y = layer([x, c])
            model = keras.Model([x, c], y)
            model.set_weights(weights)
            y_np_3 = model.predict([x_np, c_np])
            self.assertAllClose(y_np, y_np_3, atol=1e-4)
Пример #7
0
  def test_must_restore_from_config_custom_object_scope(self):

    class LayerThatShouldFailIfNotAdded(keras.layers.Layer):
      _must_restore_from_config = True

    layer = LayerThatShouldFailIfNotAdded()
    model = testing_utils.get_model_from_layers(
        [layer], input_shape=[3], model_type='functional')
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')
    with generic_utils.CustomObjectScope(
        {'LayerThatShouldFailIfNotAdded': LayerThatShouldFailIfNotAdded}):
      _ = keras_load.load(saved_model_dir)
Пример #8
0
    def test_custom_metric_wrapped_call(self):
        class NegativeMean(keras.metrics.Mean):
            @tf.function(input_signature=[tf.TensorSpec(None, tf.float32)])
            def update_state(self, value):
                super(NegativeMean, self).update_state(-value)

        metric = NegativeMean()
        self.evaluate([v.initializer for v in metric.variables])
        with generic_utils.CustomObjectScope({'NegativeMean': NegativeMean}):
            self._test_metric_save_and_load(metric,
                                            self._save_model_dir(),
                                            1,
                                            test_sample_weight=False)
Пример #9
0
def compile_args_from_training_config(training_config, custom_objects=None):
    """Return model.compile arguments from training config."""
    if custom_objects is None:
        custom_objects = {}

    with generic_utils.CustomObjectScope(custom_objects):
        optimizer_config = training_config["optimizer_config"]
        optimizer = optimizers.deserialize(optimizer_config)

        # Recover losses.
        loss = None
        loss_config = training_config.get("loss", None)
        if loss_config is not None:
            loss = _deserialize_nested_config(losses.deserialize, loss_config)

        # Recover metrics.
        metrics = None
        metrics_config = training_config.get("metrics", None)
        if metrics_config is not None:
            metrics = _deserialize_nested_config(
                _deserialize_metric, metrics_config
            )

        # Recover weighted metrics.
        weighted_metrics = None
        weighted_metrics_config = training_config.get("weighted_metrics", None)
        if weighted_metrics_config is not None:
            weighted_metrics = _deserialize_nested_config(
                _deserialize_metric, weighted_metrics_config
            )

        sample_weight_mode = (
            training_config["sample_weight_mode"]
            if hasattr(training_config, "sample_weight_mode")
            else None
        )
        loss_weights = training_config["loss_weights"]

    return dict(
        optimizer=optimizer,
        loss=loss,
        metrics=metrics,
        weighted_metrics=weighted_metrics,
        loss_weights=loss_weights,
        sample_weight_mode=sample_weight_mode,
    )
Пример #10
0
  def test_save_without_tracing(self):

    class DoNotTrace(keras.layers.Layer):

      def __init__(self):
        super(DoNotTrace, self).__init__()
        self.input_spec = keras.layers.InputSpec(shape=[None])
        self.built = True

      def call(self, inputs):
        raise ValueError('I said do not trace')

      def get_config(self):
        return {}

      @property
      def _use_input_spec_as_call_signature(self):
        return True

    root = keras.models.Sequential()
    root.add(keras.layers.Input(shape=(3,)))
    root.attached_layer = DoNotTrace()

    saved_model_dir = self._save_model_dir()

    # With the default settings, the call function is traced.
    with self.assertRaisesRegex(ValueError, 'do not trace'):
      root.save(saved_model_dir, save_format='tf')

    # When saving the config only, the layer call function should not be not
    # traced.
    root.save(saved_model_dir, save_format='tf', save_traces=False)
    loaded = tf.saved_model.load(saved_model_dir)
    self.assertTrue(hasattr(loaded, 'attached_layer'))

    # This should raise an error when loaded without the custom object
    loaded = keras_load.load(saved_model_dir)
    with self.assertRaisesRegex(ValueError, 'Cannot call custom layer'):
      loaded.attached_layer(tf.constant([1.]))

    # Try loading with the custom objects
    with generic_utils.CustomObjectScope({'DoNotTrace': DoNotTrace}):
      loaded = keras_load.load(saved_model_dir)
    with self.assertRaisesRegex(ValueError, 'I said do not trace'):
      loaded.attached_layer(tf.constant([1.]))
Пример #11
0
  def test_shared_objects(self):
    class OuterLayer(keras.layers.Layer):

      def __init__(self, inner_layer):
        super(OuterLayer, self).__init__()
        self.inner_layer = inner_layer

      def call(self, inputs):
        return self.inner_layer(inputs)

      def get_config(self):
        return {
            'inner_layer': generic_utils.serialize_keras_object(
                self.inner_layer)
        }

      @classmethod
      def from_config(cls, config):
        return cls(generic_utils.deserialize_keras_object(
            config['inner_layer']))

    class InnerLayer(keras.layers.Layer):

      def __init__(self):
        super(InnerLayer, self).__init__()
        self.v = self.add_weight(name='v', shape=[], dtype=tf.float32)

      def call(self, inputs):
        return self.v + inputs

      @classmethod
      def from_config(cls, config):
        return cls()

    # Create a model with 2 output layers that share the same inner layer.
    inner_layer = InnerLayer()
    outer_layer_1 = OuterLayer(inner_layer)
    outer_layer_2 = OuterLayer(inner_layer)
    input_ = keras.Input(shape=(1,))
    model = keras.Model(
        inputs=input_, outputs=[outer_layer_1(input_), outer_layer_2(input_)])

    # Changes to the shared layer should affect both outputs.
    model.layers[1].inner_layer.v.assign(5)
    self.assertAllEqual(model(1), [6.0, 6.0])
    model.layers[1].inner_layer.v.assign(3)
    self.assertAllEqual(model(1), [4.0, 4.0])

    # After loading, changes to the shared layer should still affect both
    # outputs.
    def _do_assertions(loaded):
      loaded.layers[1].inner_layer.v.assign(5)
      self.assertAllEqual(loaded(1), [6.0, 6.0])
      loaded.layers[1].inner_layer.v.assign(3)
      self.assertAllEqual(loaded(1), [4.0, 4.0])
      loaded.layers[2].inner_layer.v.assign(5)
      self.assertAllEqual(loaded(1), [6.0, 6.0])
      loaded.layers[2].inner_layer.v.assign(3)
      self.assertAllEqual(loaded(1), [4.0, 4.0])

    # We'd like to make sure we only attach shared object IDs when strictly
    # necessary, so we'll recursively traverse the generated config to count
    # whether we have the exact number we expect.
    def _get_all_keys_recursive(dict_or_iterable):
      if isinstance(dict_or_iterable, dict):
        for key in dict_or_iterable.keys():
          yield key
        for key in _get_all_keys_recursive(dict_or_iterable.values()):
          yield key
      elif isinstance(dict_or_iterable, str):
        return
      else:
        try:
          for item in dict_or_iterable:
            for key in _get_all_keys_recursive(item):
              yield key
        # Not an iterable or dictionary
        except TypeError:
          return

    with generic_utils.CustomObjectScope({
        'OuterLayer': OuterLayer, 'InnerLayer': InnerLayer}):

      # Test saving and loading to disk
      save_format = test_utils.get_save_format()
      saved_model_dir = self._save_model_dir()
      keras.models.save_model(model, saved_model_dir, save_format=save_format)
      loaded = keras.models.load_model(saved_model_dir)
      _do_assertions(loaded)

      # Test recreating directly from config
      config = model.get_config()
      key_count = collections.Counter(_get_all_keys_recursive(config))
      self.assertEqual(key_count[generic_utils.SHARED_OBJECT_KEY], 2)
      loaded = keras.Model.from_config(config)
      _do_assertions(loaded)
Пример #12
0
        model.compile('rmsprop', 'mse', 'acc')
        y_true = np.random.randint(0, 3, (5, 1)).astype(np.float32)
        model.train_on_batch(x, y_true)
        model.save(self.path, include_optimizer=True, save_format='tf')
        revived = keras_load.load(self.path, compile=True)
        self.assertAllClose(model.test_on_batch(x, y_true),
                            revived.test_on_batch(x, y_true))

    def test_revived_model_has_save_spec(self):
        model = SubclassedModelWithConfig(2, 3)
        model.predict(np.random.random((5, 10)).astype(np.float32))
        model.save(self.path, save_format='tf')
        revived = keras_load.load(self.path, compile=True)
        self.assertAllEqual(model._get_save_spec(dynamic_batch=False),
                            revived._get_save_spec(dynamic_batch=False))


if __name__ == '__main__':
    tf.compat.v1.enable_eager_execution()
    with generic_utils.CustomObjectScope({
            'CustomLayerWithConfig':
            CustomLayerWithConfig,
            'CustomNetworkWithConfig':
            CustomNetworkWithConfig,
            'CustomNetworkWithConfigName':
            CustomNetworkWithConfigName,
            'SubclassedModelWithConfig':
            SubclassedModelWithConfig
    }):
        tf.test.main()
Пример #13
0
    def test_model(
        self,
        strategy_fn,
        use_operator=False,
        use_regularizer=False,
        policy_name="mixed_float16",
        get_config=False,
        save_format=None,
        use_input_spec=False,
    ):
        self._skip_if_strategy_unsupported(strategy_fn)
        self._skip_if_save_format_unsupported(save_format)
        if use_regularizer:
            weight_regularizer = mp_test_util.IdentityRegularizer()
            activity_regularizer = mp_test_util.ReduceSumRegularizer()
        else:
            weight_regularizer = activity_regularizer = None
        with strategy_fn().scope():
            with policy.policy_scope(policy_name):
                layer = mp_test_util.MultiplyLayer(
                    assert_type=tf.float16,
                    use_operator=use_operator,
                    regularizer=weight_regularizer,
                    activity_regularizer=activity_regularizer,
                    input_shape=(1,),
                )
                if use_input_spec:
                    layer.input_spec = input_spec.InputSpec(shape=(None, 1))
                model = test_utils.get_model_from_layers(
                    [layer], input_shape=(1,), input_dtype=tf.float16
                )
                if get_config:
                    config = model.get_config()
                    model = model.__class__.from_config(
                        config,
                        custom_objects={
                            "MultiplyLayer": mp_test_util.MultiplyLayer
                        },
                    )
                    (layer,) = (
                        layer
                        for layer in model.layers
                        if isinstance(layer, mp_test_util.MultiplyLayer)
                    )

                def loss_fn(y_true, y_pred):
                    del y_true
                    return tf.reduce_mean(y_pred)

                # Learning rate is small enough that if applied to a float16 variable,
                # the variable will not change. So this tests the learning rate not
                # applied to a float16 value, but instead the float32 variable.
                opt = gradient_descent.SGD(2**-14)
                # Use a fixed loss scale, as this test will fail if gradients are
                # skipped for a step due to dynamic loss scaling.
                opt = loss_scale_optimizer.LossScaleOptimizer(
                    opt, dynamic=False, initial_scale=8
                )
                model.compile(
                    opt,
                    loss=loss_fn,
                    run_eagerly=test_utils.should_run_eagerly(),
                )

        x = np.ones((2, 1))
        y = np.ones((2, 1))
        dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
        model.fit(dataset)
        # Variable starts at 1, and should have gradient of 2 ** -14 subtracted
        # from it.
        expected = 1 - 2**-14
        if use_regularizer:
            # Weight and activity regularizer each add another 2 ** -14 to the
            # gradient.
            expected -= 2 * 2**-14
        self.assertEqual(backend.eval(layer.v), expected)

        if save_format:
            with generic_utils.CustomObjectScope(
                {
                    "MultiplyLayer": mp_test_util.MultiplyLayer,
                    "loss_fn": loss_fn,
                }
            ):
                self._test_saving(model, dataset, save_format, use_regularizer)
Пример #14
0
            pass

        model = LinearModel(2, 3)
        model(np.random.random((5, 10)).astype(np.float32))
        model.save(self.path, save_format='tf')
        with self.assertRaisesRegex(
                RuntimeError,
                'Unable to restore object of class \'LinearModel\''):
            keras_load.load(self.path, compile=True)

    def test_load_model_with_name_conflict_registered_works(self):
        model = WideDeepModel(2, 3)
        model(np.random.random((5, 10)).astype(np.float32))
        model.save(self.path, save_format='tf')
        keras_load.load(self.path, compile=True)


if __name__ == '__main__':
    tf.compat.v1.enable_eager_execution()
    with generic_utils.CustomObjectScope({
            'CustomLayerWithConfig': CustomLayerWithConfig,
            'CustomNetworkWithConfig': CustomNetworkWithConfig,
            'CustomNetworkWithConfigName': CustomNetworkWithConfigName,
            'SubclassedModelWithConfig': SubclassedModelWithConfig,
            'FunctionalSubclassModel': FunctionalSubclassModel,
            'FunctionalSubclassModelWrongConfig':
            FunctionalSubclassModelWrongConfig,
            'WideDeepModel': WideDeepModel
    }):
        tf.test.main()
Пример #15
0
            pass

        model = LinearModel(2, 3)
        model(np.random.random((5, 10)).astype(np.float32))
        model.save(self.path, save_format="tf")
        with self.assertRaisesRegex(
                RuntimeError,
                "Unable to restore object of class 'LinearModel'"):
            keras_load.load(self.path, compile=True)

    def test_load_model_with_name_conflict_registered_works(self):
        model = WideDeepModel(2, 3)
        model(np.random.random((5, 10)).astype(np.float32))
        model.save(self.path, save_format="tf")
        keras_load.load(self.path, compile=True)


if __name__ == "__main__":
    tf.compat.v1.enable_eager_execution()
    with generic_utils.CustomObjectScope({
            "CustomLayerWithConfig": CustomLayerWithConfig,
            "CustomNetworkWithConfig": CustomNetworkWithConfig,
            "CustomNetworkWithConfigName": CustomNetworkWithConfigName,
            "SubclassedModelWithConfig": SubclassedModelWithConfig,
            "FunctionalSubclassModel": FunctionalSubclassModel,
            "FunctionalSubclassModelWrongConfig":
            FunctionalSubclassModelWrongConfig,
            "WideDeepModel": WideDeepModel,
    }):
        tf.test.main()