def testAssertModelCloneSameObjectsIgnoreOptimizer(self): input_arr = np.random.random((1, 3)) target_arr = np.random.random((1, 3)) model_graph = ops.Graph() clone_graph = ops.Graph() # Create two models with the same layers but different optimizers. with session.Session(graph=model_graph): inputs = keras.layers.Input(shape=(3,)) x = keras.layers.Dense(2)(inputs) x = keras.layers.Dense(3)(x) model = keras.models.Model(inputs, x) model.compile(loss='mse', optimizer=training_module.AdadeltaOptimizer()) model.train_on_batch(input_arr, target_arr) with session.Session(graph=clone_graph): inputs = keras.layers.Input(shape=(3,)) x = keras.layers.Dense(2)(inputs) x = keras.layers.Dense(3)(x) clone = keras.models.Model(inputs, x) clone.compile(loss='mse', optimizer=keras.optimizers.RMSprop(lr=0.0001)) clone.train_on_batch(input_arr, target_arr) keras_saved_model._assert_same_non_optimizer_objects( model, model_graph, clone, clone_graph)
def testAssertModelCloneSameObjectsThrowError(self): input_arr = np.random.random((1, 3)) target_arr = np.random.random((1, 3)) model_graph = ops.Graph() clone_graph = ops.Graph() # Create two models with the same layers but different optimizers. with session.Session(graph=model_graph): inputs = keras.layers.Input(shape=(3, )) x = keras.layers.Dense(2)(inputs) x = keras.layers.Dense(3)(x) model = keras.models.Model(inputs, x) model.compile(loss='mse', optimizer=training_module.AdadeltaOptimizer()) model.train_on_batch(input_arr, target_arr) with session.Session(graph=clone_graph): inputs = keras.layers.Input(shape=(3, )) x = keras.layers.Dense(2)(inputs) x = keras.layers.Dense(4)(x) x = keras.layers.Dense(3)(x) clone = keras.models.Model(inputs, x) clone.compile(loss='mse', optimizer=keras.optimizers.RMSprop(lr=0.0001)) clone.train_on_batch(input_arr, target_arr) with self.assertRaisesRegexp( errors.InternalError, 'Model and clone must use the same variables.'): keras_saved_model._assert_same_non_optimizer_objects( model, model_graph, clone, clone_graph)