Exemple #1
0
    def test_sequential_weight_loading_group_name_with_incorrect_shape(self):
        if h5py is None:
            return

        h5_path = self._save_model_dir("test.h5")

        num_hidden = 5
        input_dim = 3
        num_classes = 2
        with tf.Graph().as_default(), self.cached_session():
            ref_model = keras.models.Sequential()
            ref_model.add(
                keras.layers.Dense(num_hidden, input_dim=input_dim, name="d1"))
            ref_model.add(keras.layers.Dense(num_classes, name="d2"))
            ref_model.compile(
                loss=keras.losses.MSE,
                optimizer=optimizer_v1.RMSprop(lr=0.0001),
                metrics=[keras.metrics.categorical_accuracy],
            )

            f_ref_model = h5py.File(h5_path, "w")
            keras.backend.set_value(ref_model.layers[1].bias,
                                    [3.5] * num_classes)
            hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model)

            f_model = h5py.File(h5_path, "r")
            model = keras.models.Sequential()
            model.add(
                keras.layers.Dense(num_hidden + 5,
                                   input_dim=input_dim,
                                   name="d1"))
            model.add(keras.layers.Dense(num_classes, name="d2"))
            model.compile(
                loss=keras.losses.MSE,
                optimizer=optimizer_v1.RMSprop(lr=0.0001),
                metrics=[keras.metrics.categorical_accuracy],
            )
            with self.assertRaises(
                    ValueError,
                    msg=
                    "Shape mismatch in layer #0 (named d1) for weight d1_1/kernel:0. "
                    "Weight expects shape (3, 10). "
                    "Received saved weight with shape (3, 5)",
            ):
                hdf5_format.load_weights_from_hdf5_group_by_name(
                    f_model, model)

            hdf5_format.load_weights_from_hdf5_group_by_name(
                f_model, model, skip_mismatch=True)
            self.assertAllClose(
                [3.5] * num_classes,
                keras.backend.get_value(model.layers[1].bias),
            )
    def testAssertModelCloneSameObjectsThrowError(self):
        input_arr = np.random.random((1, 3))
        target_arr = np.random.random((1, 3))

        model_graph = tf.Graph()
        clone_graph = tf.Graph()

        # Create two models with the same layers but different optimizers.
        with tf.compat.v1.Session(graph=model_graph):
            inputs = keras.layers.Input(shape=(3, ))
            x = keras.layers.Dense(2)(inputs)
            x = keras.layers.Dense(3)(x)
            model = keras.models.Model(inputs, x)

            model.compile(loss='mse',
                          optimizer=tf.compat.v1.train.AdadeltaOptimizer())
            model.train_on_batch(input_arr, target_arr)

        with tf.compat.v1.Session(graph=clone_graph):
            inputs = keras.layers.Input(shape=(3, ))
            x = keras.layers.Dense(2)(inputs)
            x = keras.layers.Dense(4)(x)
            x = keras.layers.Dense(3)(x)
            clone = keras.models.Model(inputs, x)
            clone.compile(loss='mse',
                          optimizer=optimizer_v1.RMSprop(lr=0.0001))
            clone.train_on_batch(input_arr, target_arr)
Exemple #3
0
 def test_rmsprop(self):
     with self.cached_session():
         self._test_optimizer(optimizer_v1.RMSprop())
         self._test_optimizer(optimizer_v1.RMSprop(decay=1e-3))
Exemple #4
0
 def testRMSpropCompatibility(self):
   opt_v1 = optimizer_v1.RMSprop()
   opt_v2 = rmsprop.RMSprop()
   self._testOptimizersCompatibility(opt_v1, opt_v2)