class TestDistributionStrategyValidation(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.times( keras_test_lib.all_strategy_combinations_minus_default(), combinations.combine(cloning=[True, False]))) def test_layer_outside_scope(self, distribution, cloning): with self.cached_session(): with self.assertRaisesRegexp( ValueError, 'was not created in the distribution strategy'): x = keras.layers.Input(shape=(3,), name='input') y = keras.layers.Dense(4, name='dense')(x) with distribution.scope(): model = keras.Model(x, y) optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) @combinations.generate( combinations.times( keras_test_lib.all_strategy_combinations_minus_default(), combinations.combine(cloning=[True, False]))) def test_model_outside_scope(self, distribution, cloning): with self.cached_session(): with self.assertRaisesRegexp( ValueError, 'was not created in the distribution strategy'): x = keras.layers.Input(shape=(3,), name='input') y = keras.layers.Dense(4, name='dense')(x) model = keras.Model(x, y) with distribution.scope(): optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile(optimizer, loss, metrics=metrics, cloning=cloning)
class TestDistributionStrategySaveLoadWeights(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.times( keras_test_lib.all_strategy_combinations_minus_default(), combinations.combine( cloning=[True, False], optimizer=strategy_combinations.rmsprop_optimizer_keras_v2_fn)) ) def test_save_load_h5(self, distribution, optimizer, cloning): with self.cached_session(): dataset = keras_test_lib.get_dataset(distribution) with distribution.scope(): model = keras_test_lib.get_model() model.compile(optimizer(), 'mse', cloning=cloning) model.fit(dataset, epochs=1, steps_per_epoch=1) weights_file = tempfile.mktemp('.h5') model.save_weights(weights_file) model_2 = keras_test_lib.get_model() model_2.compile(optimizer(), 'mse', cloning=cloning) model_2.load_weights(weights_file) model_2.predict( keras_test_lib.get_predict_dataset(distribution), steps=2) model_2.fit(dataset, epochs=1, steps_per_epoch=1) @combinations.generate( combinations.times( keras_test_lib.all_strategy_combinations_minus_default(), combinations.combine( cloning=[True, False], optimizer=strategy_combinations.rmsprop_optimizer_keras_v2_fn)) ) def test_save_load_trackable(self, distribution, optimizer, cloning): # TODO(b/123533246): Enable the test for TPU once bug is fixed if (isinstance(distribution, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)) and distribution.extended.steps_per_run > 1): self.skipTest( 'MultiStep TPU Strategy deadlocks with optimizer restore.') with self.cached_session(): dataset = keras_test_lib.get_dataset(distribution) with distribution.scope(): model = keras_test_lib.get_model() model.compile(optimizer(), 'mse', cloning=cloning) model.fit(dataset, epochs=1, steps_per_epoch=1) weights_file = tempfile.mktemp() model.save_weights(weights_file) model_2 = keras_test_lib.get_model() model_2.compile(optimizer(), 'mse', cloning=cloning) model_2.load_weights(weights_file) model_2.predict( keras_test_lib.get_predict_dataset(distribution), steps=2) model_2.fit(dataset, epochs=1, steps_per_epoch=1)
class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph'])) def test_validating_dataset_input_tensors_with_shape_mismatch( self, distribution): with self.cached_session(): a = constant_op.constant([1, 2], shape=(1, 2)) b = constant_op.constant([[1, 2], [1, 2]], shape=(2, 2)) device_map = values.ReplicaDeviceMap(('/device:CPU:0', '/device:GPU:0')) x = values.DistributedValues(device_map, (a, b)) y = values.DistributedValues(device_map, (a, a)) # Removed device and input tensor shape details from the error message # since the order of the device and the corresponding input tensor shape # is not deterministic over different runs. with self.assertRaisesRegexp( ValueError, 'Input tensor shapes do not match for ' 'distributed tensor inputs ' 'DistributedValues:.+'): with distribution.scope(): distributed_training_utils.validate_distributed_dataset_inputs( distribution, x, y) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph', 'eager'])) def test_validating_dataset_input_tensors_with_dtype_mismatch( self, distribution): with self.cached_session(): a = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.int32) b = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.float64) device_map = values.ReplicaDeviceMap(('/device:CPU:0', '/device:GPU:0')) x = values.DistributedValues(device_map, (a, b)) y = values.DistributedValues(device_map, (a, a)) # Removed device and input tensor dtype details from the error message # since the order of the device and the corresponding input tensor dtype # is not deterministic over different runs. with self.assertRaisesRegexp( ValueError, 'Input tensor dtypes do not match for ' 'distributed tensor inputs ' 'DistributedValues:.+'): with distribution.scope(): distributed_training_utils.validate_distributed_dataset_inputs( distribution, x, y) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph', 'eager'], cloning=[True, False])) def test_unsupported_features(self, distribution, cloning): with self.cached_session(): with distribution.scope(): model = keras_test_lib.get_model() optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) dataset = keras_test_lib.get_dataset(distribution) # Test with validation split with self.assertRaisesRegexp( ValueError, '`validation_split` argument is not ' 'supported when input `x` is a dataset or a ' 'dataset iterator.+'): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, validation_split=0.5, validation_steps=2) # Test with sample weight. sample_weight = np.random.random((10,)) with self.assertRaisesRegexp( ValueError, '`sample_weight` argument is not supported when input ' '`x` is a dataset or a dataset iterator.'): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, sample_weight=sample_weight) # Test with not specifying the `steps` argument for dataset with infinite # cardinality. dataset = dataset.repeat() with self.assertRaisesRegexp( ValueError, 'When passing an infinitely ' 'repeating dataset, you must specify the ' '`steps_per_epoch` argument'): model.fit(dataset, epochs=1, verbose=0) with self.assertRaisesRegexp( ValueError, 'When passing an infinitely ' 'repeating dataset, you must specify the ' '`steps` argument'): model.evaluate(dataset, verbose=0) with self.assertRaisesRegexp( ValueError, 'When passing an infinitely ' 'repeating dataset, you must specify the ' '`steps` argument'): model.predict(dataset, verbose=0) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph', 'eager'], cloning=[True, False])) def test_calling_with_unsupported_predefined_callbacks( self, distribution, cloning): with self.cached_session(): with distribution.scope(): model = keras_test_lib.get_model() optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) dataset = keras_test_lib.get_dataset(distribution) def schedule(_): return 0.001 with self.assertRaisesRegexp( ValueError, 'You must specify a Keras Optimizer V2 when ' 'using'): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, callbacks=[keras.callbacks.LearningRateScheduler(schedule)]) with self.assertRaisesRegexp( ValueError, 'You must specify a Keras Optimizer V2 when ' 'using'): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, callbacks=[keras.callbacks.ReduceLROnPlateau()]) @combinations.generate( combinations.combine( distribution=[strategy_combinations.one_device_strategy], mode=['eager'], cloning=[True, False])) def test_distribution_strategy_with_run_eagerly(self, distribution, cloning): with distribution.scope(): x = keras.layers.Input(shape=(1,)) y = keras.layers.Dense(1, kernel_initializer='ones')(x) model = keras.models.Model(x, y) err_msg = ('We currently do not support enabling `run_eagerly` with ' 'distribution strategy.') with self.assertRaisesRegex(ValueError, err_msg): model.compile('sgd', run_eagerly=True, cloning=cloning) # TODO(b/124377929): Remove error assertions once subclassed models # are supported in DistributedStrategy. @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.one_device_strategy, ], mode=['graph', 'eager'], cloning=[True, False])) def test_distribution_strategy_on_subclassed_model(self, distribution, cloning): with distribution.scope(): class _SimpleMLP(keras.Model): def __init__(self, num_labels): super(_SimpleMLP, self).__init__() self.dense = keras.layers.Dense(num_labels) def call(self, inputs): return self.dense(inputs) model = _SimpleMLP(3) if cloning or not context.executing_eagerly(): with self.assertRaisesRegexp( ValueError, 'We currently do not support distribution strategy with a ' '`Sequential` model that is created without `input_shape`/' '`input_dim` set in its first layer or a subclassed model.'): model.compile('sgd', cloning=cloning) else: model.compile('sgd', cloning=cloning) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.one_device_strategy, ], mode=['graph', 'eager'], cloning=[True, False])) def test_distribution_strategy_on_deferred_sequential_model( self, distribution, cloning): with distribution.scope(): model = keras.models.Sequential() model.add(keras.layers.Dense(16, activation='relu')) model.add(keras.layers.Dense(3, activation='softmax')) if not cloning and context.executing_eagerly(): model.compile('sgd', cloning=cloning) else: with self.assertRaisesRegexp( ValueError, 'We currently do not support distribution strategy with a ' '`Sequential` model that is created without ' '`input_shape`/`input_dim` set in its first layer or ' 'a subclassed model.'): model.compile('sgd', cloning=cloning) @combinations.generate( keras_test_lib.all_strategy_combinations_minus_default()) def test_standalone_loss_without_loss_reduction(self, distribution): with distribution.scope(): loss_object = losses.MeanSquaredError() with self.assertRaisesRegexp( ValueError, 'Please use `tf.keras.losses.Reduction.SUM` or ' '`tf.keras.losses.Reduction.NONE`'): y = np.asarray([1, 0]) loss_object(y, y)
class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph'])) def test_validating_dataset_input_tensors_with_shape_mismatch( self, distribution): with self.cached_session(): a = constant_op.constant([1, 2], shape=(1, 2)) b = constant_op.constant([[1, 2], [1, 2]], shape=(2, 2)) x = values.DistributedValues((a, b)) y = values.DistributedValues((a, a)) # Removed device and input tensor shape details from the error message # since the order of the device and the corresponding input tensor shape # is not deterministic over different runs. with self.assertRaisesRegexp( ValueError, 'Input tensor shapes do not match for ' 'distributed tensor inputs ' 'DistributedValues:.+'): with distribution.scope(): distributed_training_utils.validate_distributed_dataset_inputs( distribution, x, y) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph', 'eager'])) def test_validating_dataset_input_tensors_with_dtype_mismatch( self, distribution): with self.cached_session(): a = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.int32) b = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.float64) x = values.DistributedValues((a, b)) y = values.DistributedValues((a, a)) # Removed device and input tensor dtype details from the error message # since the order of the device and the corresponding input tensor dtype # is not deterministic over different runs. with self.assertRaisesRegexp( ValueError, 'Input tensor dtypes do not match for ' 'distributed tensor inputs ' 'DistributedValues:.+'): with distribution.scope(): distributed_training_utils.validate_distributed_dataset_inputs( distribution, x, y) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph', 'eager'])) def test_unsupported_features(self, distribution, mode): with self.cached_session(): with distribution.scope(): model = keras_test_lib.get_model() optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' metrics = ['mae'] model.compile( optimizer, loss, metrics=metrics) dataset = keras_test_lib.get_dataset(distribution) # Test with validation split with self.assertRaises(ValueError): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, validation_split=0.5, validation_steps=2) # Test with sample weight. sample_weight = np.random.random((10,)) with self.assertRaises(ValueError): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, sample_weight=sample_weight) # Test with not specifying the `steps` argument for dataset with infinite # cardinality. dataset = dataset.repeat() with self.assertRaises(ValueError): model.fit(dataset, epochs=1, verbose=0) with self.assertRaises(ValueError): model.evaluate(dataset, verbose=0) with self.assertRaises(ValueError): model.predict(dataset, verbose=0) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.one_device_strategy, ], mode=['graph', 'eager'])) def test_distribution_strategy_on_subclassed_model( self, distribution): with distribution.scope(): class _SimpleMLP(keras.Model): def __init__(self, num_labels): super(_SimpleMLP, self).__init__() self.dense = keras.layers.Dense(num_labels) def call(self, inputs): return self.dense(inputs) model = _SimpleMLP(3) if not context.executing_eagerly(): with self.assertRaisesRegexp( ValueError, 'We currently do not support distribution strategy with a ' '`Sequential` model that is created without `input_shape`/' '`input_dim` set in its first layer or a subclassed model.'): model.compile( 'sgd') else: model.compile( 'sgd') @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.one_device_strategy, ], mode=['graph', 'eager'])) def test_distribution_strategy_on_deferred_sequential_model( self, distribution): with distribution.scope(): model = keras.models.Sequential() model.add(keras.layers.Dense(16, activation='relu')) model.add(keras.layers.Dense(3, activation='softmax')) if context.executing_eagerly(): model.compile( 'sgd') else: with self.assertRaisesRegexp( ValueError, 'We currently do not support distribution strategy with a ' '`Sequential` model that is created without ' '`input_shape`/`input_dim` set in its first layer or ' 'a subclassed model.'): model.compile( 'sgd') @combinations.generate( keras_test_lib.all_strategy_combinations_minus_default()) def test_standalone_loss_without_loss_reduction(self, distribution): with distribution.scope(): loss_object = losses.MeanSquaredError() with self.assertRaisesRegexp( ValueError, 'Please use `tf.keras.losses.Reduction.SUM` or ' '`tf.keras.losses.Reduction.NONE`'): y = np.asarray([1, 0]) loss_object(y, y)