def test_clone_optimizer_in_different_graph(self): with tf.Graph().as_default(): with self.session(): model = test_utils.get_small_sequential_mlp(3, 4) optimizer = keras.optimizers.optimizer_v2.adam.Adam() model.compile( optimizer, "mse", metrics=["acc", metrics.categorical_accuracy], ) model.fit( x=np.array([[1.0, 2.0, 3.0, 4.0]]), y=np.array([[1.0, 1.0, 1.0, 1.0]]), epochs=1, ) optimizer_config = optimizer.get_config() with tf.Graph().as_default(): with self.session(): with self.assertRaisesRegex( ValueError, "Cannot use the given session" ): models.clone_and_build_model(model, compile_clone=True) # The optimizer_config object allows the model to be cloned in a # different graph. models.clone_and_build_model( model, compile_clone=True, optimizer_config=optimizer_config )
def _clone_and_build_test_helper(self, model, model_type): inp = np.random.random((10, 4)) out = np.random.random((10, 4)) is_subclassed = (model_type == 'subclass') # With placeholder creation new_model = models.clone_and_build_model(model, compile_clone=True, in_place_reset=is_subclassed) self._assert_same_compile_params(new_model) new_model.train_on_batch(inp, out) new_model.evaluate(inp, out) # Create new tensors for inputs. input_a = keras.Input(shape=(4, ), name='a') new_model = models.clone_and_build_model(model, input_tensors=input_a, compile_clone=True, in_place_reset=is_subclassed) self._assert_same_compile_params(new_model) new_model.train_on_batch(inp, out) new_model.evaluate(inp, out) new_model = models.clone_and_build_model(model, input_tensors=input_a, target_tensors=None, compile_clone=True, in_place_reset=is_subclassed) self._assert_same_compile_params(new_model) new_model.train_on_batch(inp, out) new_model.evaluate(inp, out)
def test_clone_and_build_non_compiled_model(self): inp = np.random.random((10, 4)) out = np.random.random((10, 4)) model = _get_model() with self.assertRaisesRegex(ValueError, "has not been compiled"): models.clone_and_build_model(model, compile_clone=True) is_subclassed = test_utils.get_model_type() == "subclass" # With placeholder creation new_model = models.clone_and_build_model( model, compile_clone=False, in_place_reset=is_subclassed ) with self.assertRaisesRegex(RuntimeError, "must compile"): new_model.evaluate(inp, out) with self.assertRaisesRegex(RuntimeError, "must compile"): new_model.train_on_batch(inp, out) new_model.compile( test_utils.get_v2_optimizer("rmsprop"), "mse", run_eagerly=test_utils.should_run_eagerly(), ) new_model.train_on_batch(inp, out) # Create new tensors for inputs. input_a = keras.Input(shape=(4,)) new_model = models.clone_and_build_model( model, input_tensors=input_a, compile_clone=False, in_place_reset=is_subclassed, ) with self.assertRaisesRegex(RuntimeError, "must compile"): new_model.evaluate(inp, out) with self.assertRaisesRegex(RuntimeError, "must compile"): new_model.train_on_batch(inp, out) new_model.compile( test_utils.get_v2_optimizer("rmsprop"), "mse", run_eagerly=test_utils.should_run_eagerly(), ) new_model.train_on_batch(inp, out)
def assert_optimizer_iterations_increases(self, optimizer): model = _get_model() model.compile( optimizer, 'mse', metrics=['acc', metrics.categorical_accuracy], run_eagerly=test_utils.should_run_eagerly()) global_step = keras.backend.variable(123, dtype=tf.int64) clone_model = models.clone_and_build_model( model, compile_clone=True, optimizer_iterations=global_step, in_place_reset=(test_utils.get_model_type() == 'subclass')) inp = np.random.random((10, 4)) out = np.random.random((10, 4)) clone_model.train_on_batch(inp, out) self.assertEqual(backend.eval(global_step), 124)