コード例 #1
0
class TestDistributionStrategySaveLoadWeights(tf.test.TestCase,
                                              parameterized.TestCase):
    @tf.__internal__.distribute.combinations.generate(
        tf.__internal__.test.combinations.times(
            keras_test_lib.all_strategy_combinations_minus_default(),
            tf.__internal__.test.combinations.combine(
                optimizer=optimizer_combinations.rmsprop_optimizer_keras_v2_fn)
        ))
    def test_save_load_h5(self, distribution, optimizer):
        with self.cached_session():
            dataset = keras_test_lib.get_dataset(distribution)
            with distribution.scope():
                model = keras_test_lib.get_model()
                model.compile(optimizer(), 'mse')
                model.fit(dataset, epochs=1, steps_per_epoch=1)

                weights_file = tempfile.mktemp('.h5')
                model.save_weights(weights_file)

                model_2 = keras_test_lib.get_model()
                model_2.compile(optimizer(), 'mse')
                model_2.load_weights(weights_file)
                model_2.predict(
                    keras_test_lib.get_predict_dataset(distribution), steps=2)
                model_2.fit(dataset, epochs=1, steps_per_epoch=1)

    @tf.__internal__.distribute.combinations.generate(
        tf.__internal__.test.combinations.times(
            keras_test_lib.all_strategy_combinations_minus_default(),
            tf.__internal__.test.combinations.combine(
                optimizer=optimizer_combinations.rmsprop_optimizer_keras_v2_fn)
        ))
    def test_save_load_trackable(self, distribution, optimizer):
        # TODO(b/123533246): Enable the test for TPU once bug is fixed
        if (isinstance(distribution,
                       (tf.distribute.experimental.TPUStrategy,
                        tf.compat.v1.distribute.experimental.TPUStrategy))
                and distribution.extended.steps_per_run > 1):
            self.skipTest(
                'MultiStep TPU Strategy deadlocks with optimizer restore.')
        with self.cached_session():
            dataset = keras_test_lib.get_dataset(distribution)
            with distribution.scope():
                model = keras_test_lib.get_model()
                model.compile(optimizer(), 'mse')
                model.fit(dataset, epochs=1, steps_per_epoch=1)

                weights_file = tempfile.mktemp()
                model.save_weights(weights_file)

                model_2 = keras_test_lib.get_model()
                model_2.compile(optimizer(), 'mse')
                model_2.load_weights(weights_file)
                model_2.predict(
                    keras_test_lib.get_predict_dataset(distribution), steps=2)
                model_2.fit(dataset, epochs=1, steps_per_epoch=1)
コード例 #2
0
class TestDistributionStrategyValidation(tf.test.TestCase,
                                         parameterized.TestCase):
    @tf.__internal__.distribute.combinations.generate(
        tf.__internal__.test.combinations.times(
            keras_test_lib.all_strategy_combinations_minus_default()))
    def test_layer_outside_scope(self, distribution):
        with self.cached_session():
            with self.assertRaisesRegex(
                    ValueError,
                    'was not created in the distribution strategy'):
                x = keras.layers.Input(shape=(3, ), name='input')
                y = keras.layers.Dense(4, name='dense')(x)
                with distribution.scope():
                    model = keras.Model(x, y)
                    optimizer = tf.compat.v1.train.GradientDescentOptimizer(
                        0.001)
                    loss = 'mse'
                    metrics = ['mae', keras.metrics.CategoricalAccuracy()]
                    model.compile(optimizer, loss, metrics=metrics)

    @tf.__internal__.distribute.combinations.generate(
        keras_test_lib.all_strategy_combinations_minus_default())
    def test_model_outside_scope(self, distribution):
        with self.cached_session():
            with self.assertRaisesRegex(
                    ValueError,
                    'was not created in the distribution strategy'):
                x = keras.layers.Input(shape=(3, ), name='input')
                y = keras.layers.Dense(4, name='dense')(x)
                model = keras.Model(x, y)
                with distribution.scope():
                    optimizer = tf.compat.v1.train.GradientDescentOptimizer(
                        0.001)
                    loss = 'mse'
                    metrics = ['mae', keras.metrics.CategoricalAccuracy()]
                    model.compile(optimizer, loss, metrics=metrics)
コード例 #3
0
class TestDistributionStrategyErrorCases(tf.test.TestCase,
                                         parameterized.TestCase):
    @tf.__internal__.distribute.combinations.generate(
        tf.__internal__.test.combinations.combine(distribution=[
            tf.__internal__.distribute.combinations.
            mirrored_strategy_with_gpu_and_cpu,
        ],
                                                  mode=['graph']))
    def test_validating_dataset_input_tensors_with_shape_mismatch(
            self, distribution):
        with self.cached_session():
            a = tf.constant([1, 2], shape=(1, 2))
            b = tf.constant([[1, 2], [1, 2]], shape=(2, 2))
            x = tf.distribute.DistributedValues((a, b))
            y = tf.distribute.DistributedValues((a, a))
            # Removed device and input tensor shape details from the error message
            # since the order of the device and the corresponding input tensor shape
            # is not deterministic over different runs.
            with self.assertRaisesRegex(
                    ValueError, 'Input tensor shapes do not match for '
                    'distributed tensor inputs '
                    'DistributedValues:.+'):
                with distribution.scope():
                    distributed_training_utils_v1.validate_distributed_dataset_inputs(
                        distribution, x, y)

    @tf.__internal__.distribute.combinations.generate(
        tf.__internal__.test.combinations.combine(distribution=[
            tf.__internal__.distribute.combinations.
            mirrored_strategy_with_gpu_and_cpu,
        ],
                                                  mode=['graph', 'eager']))
    def test_validating_dataset_input_tensors_with_dtype_mismatch(
            self, distribution):
        with self.cached_session():
            a = tf.constant([1, 2], shape=(1, 2), dtype=tf.int32)
            b = tf.constant([1, 2], shape=(1, 2), dtype=tf.float64)
            x = tf.distribute.DistributedValues((a, b))
            y = tf.distribute.DistributedValues((a, a))
            # Removed device and input tensor dtype details from the error message
            # since the order of the device and the corresponding input tensor dtype
            # is not deterministic over different runs.
            with self.assertRaisesRegex(
                    ValueError, 'Input tensor dtypes do not match for '
                    'distributed tensor inputs '
                    'DistributedValues:.+'):
                with distribution.scope():
                    distributed_training_utils_v1.validate_distributed_dataset_inputs(
                        distribution, x, y)

    @tf.__internal__.distribute.combinations.generate(
        tf.__internal__.test.combinations.combine(distribution=[
            tf.__internal__.distribute.combinations.
            mirrored_strategy_with_gpu_and_cpu,
        ],
                                                  mode=['graph', 'eager']))
    def test_unsupported_features(self, distribution, mode):
        with self.cached_session():
            with distribution.scope():
                model = keras_test_lib.get_model()
                optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
                loss = 'mse'
                metrics = ['mae']
                model.compile(optimizer, loss, metrics=metrics)

            dataset = keras_test_lib.get_dataset(distribution)
            # Test with validation split
            with self.assertRaises(ValueError):
                model.fit(dataset,
                          epochs=1,
                          steps_per_epoch=2,
                          verbose=0,
                          validation_split=0.5,
                          validation_steps=2)

            # Test with sample weight.
            sample_weight = np.random.random((10, ))
            with self.assertRaises(ValueError):
                model.fit(dataset,
                          epochs=1,
                          steps_per_epoch=2,
                          verbose=0,
                          sample_weight=sample_weight)

            # Test with not specifying the `steps` argument for dataset with infinite
            # cardinality.
            dataset = dataset.repeat()
            with self.assertRaises(ValueError):
                model.fit(dataset, epochs=1, verbose=0)
            with self.assertRaises(ValueError):
                model.evaluate(dataset, verbose=0)

            with self.assertRaises(ValueError):
                model.predict(dataset, verbose=0)

    @tf.__internal__.distribute.combinations.generate(
        tf.__internal__.test.combinations.combine(distribution=[
            tf.__internal__.distribute.combinations.
            mirrored_strategy_with_gpu_and_cpu,
            tf.__internal__.distribute.combinations.one_device_strategy,
        ],
                                                  mode=['graph', 'eager']))
    def test_distribution_strategy_on_subclassed_model(self, distribution):
        with distribution.scope():

            class _SimpleMLP(keras.Model):
                def __init__(self, num_labels):
                    super(_SimpleMLP, self).__init__()
                    self.dense = keras.layers.Dense(num_labels)

                def call(self, inputs):
                    return self.dense(inputs)

            model = _SimpleMLP(3)

            if not tf.executing_eagerly():
                with self.assertRaisesRegex(
                        ValueError,
                        'We currently do not support distribution strategy with a '
                        '`Sequential` model that is created without `input_shape`/'
                        '`input_dim` set in its first layer or a subclassed model.'
                ):
                    model.compile('sgd')
            else:
                model.compile('sgd')

    @tf.__internal__.distribute.combinations.generate(
        tf.__internal__.test.combinations.combine(distribution=[
            tf.__internal__.distribute.combinations.
            mirrored_strategy_with_gpu_and_cpu,
            tf.__internal__.distribute.combinations.one_device_strategy,
        ],
                                                  mode=['graph', 'eager']))
    def test_distribution_strategy_on_deferred_sequential_model(
            self, distribution):
        with distribution.scope():
            model = keras.models.Sequential()
            model.add(keras.layers.Dense(16, activation='relu'))
            model.add(keras.layers.Dense(3, activation='softmax'))

            if tf.executing_eagerly():
                model.compile('sgd')
            else:
                with self.assertRaisesRegex(
                        ValueError,
                        'We currently do not support distribution strategy with a '
                        '`Sequential` model that is created without '
                        '`input_shape`/`input_dim` set in its first layer or '
                        'a subclassed model.'):
                    model.compile('sgd')

    @tf.__internal__.distribute.combinations.generate(
        keras_test_lib.all_strategy_combinations_minus_default())
    def test_standalone_loss_without_loss_reduction(self, distribution):
        with distribution.scope():
            loss_object = losses.MeanSquaredError()

            with self.assertRaisesRegex(
                    ValueError,
                    'Please use `tf.keras.losses.Reduction.SUM` or '
                    '`tf.keras.losses.Reduction.NONE`'):
                y = np.asarray([1, 0])
                loss_object(y, y)
コード例 #4
0
class TestDistributionStrategyErrorCases(tf.test.TestCase,
                                         parameterized.TestCase):
    @tf.__internal__.distribute.combinations.generate(
        tf.__internal__.test.combinations.combine(
            distribution=[
                tf.__internal__.distribute.combinations.
                mirrored_strategy_with_gpu_and_cpu,
            ],
            mode=["graph"],
        ))
    def test_validating_dataset_input_tensors_with_shape_mismatch(
            self, distribution):
        with self.cached_session():

            @tf.function
            def run():
                ctx = tf.distribute.get_replica_context()
                if ctx.replica_id_in_sync_group.device.endswith("GPU:0"):
                    return tf.constant([[1, 2]])
                else:
                    return tf.constant([[1, 2], [1, 2]])

            x = distribution.run(run)

            # Removed device and input tensor shape details from the error message
            # since the order of the device and the corresponding input tensor shape
            # is not deterministic over different runs.
            with self.assertRaisesRegex(
                    ValueError,
                    "Input tensor shapes do not match for "
                    "distributed tensor inputs "
                    "PerReplica:.+",
            ):
                with distribution.scope():
                    distributed_training_utils_v1.validate_distributed_dataset_inputs(
                        distribution, x, None)

    @tf.__internal__.distribute.combinations.generate(
        tf.__internal__.test.combinations.combine(
            distribution=[
                tf.__internal__.distribute.combinations.
                mirrored_strategy_with_gpu_and_cpu,
            ],
            mode=["graph", "eager"],
        ))
    def test_validating_dataset_input_tensors_with_dtype_mismatch(
            self, distribution):
        with self.cached_session():

            @tf.function
            def run():
                ctx = tf.distribute.get_replica_context()
                if ctx.replica_id_in_sync_group.device.endswith("GPU:0"):
                    return tf.constant([[1, 2]], dtype=tf.int32)
                else:
                    return tf.constant([[1, 2]], dtype=tf.float64)

            x = distribution.run(run)

            # Removed device and input tensor dtype details from the error message
            # since the order of the device and the corresponding input tensor dtype
            # is not deterministic over different runs.
            with self.assertRaisesRegex(
                    ValueError,
                    "Input tensor dtypes do not match for "
                    "distributed tensor inputs "
                    "PerReplica:.+",
            ):
                with distribution.scope():
                    distributed_training_utils_v1.validate_distributed_dataset_inputs(
                        distribution, x, None)

    @tf.__internal__.distribute.combinations.generate(
        tf.__internal__.test.combinations.combine(
            distribution=[
                tf.__internal__.distribute.combinations.
                mirrored_strategy_with_gpu_and_cpu,
            ],
            mode=["graph", "eager"],
        ))
    def test_unsupported_features(self, distribution, mode):
        with self.cached_session():
            with distribution.scope():
                model = keras_test_lib.get_model()
                optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
                loss = "mse"
                metrics = ["mae"]
                model.compile(optimizer, loss, metrics=metrics)

            dataset = keras_test_lib.get_dataset(distribution)
            # Test with validation split
            with self.assertRaises(ValueError):
                model.fit(
                    dataset,
                    epochs=1,
                    steps_per_epoch=2,
                    verbose=0,
                    validation_split=0.5,
                    validation_steps=2,
                )

            # Test with sample weight.
            sample_weight = np.random.random((10, ))
            with self.assertRaises(ValueError):
                model.fit(
                    dataset,
                    epochs=1,
                    steps_per_epoch=2,
                    verbose=0,
                    sample_weight=sample_weight,
                )

            # Test with not specifying the `steps` argument for dataset with infinite
            # cardinality.
            dataset = dataset.repeat()
            with self.assertRaises(ValueError):
                model.fit(dataset, epochs=1, verbose=0)
            with self.assertRaises(ValueError):
                model.evaluate(dataset, verbose=0)

            with self.assertRaises(ValueError):
                model.predict(dataset, verbose=0)

    @tf.__internal__.distribute.combinations.generate(
        tf.__internal__.test.combinations.combine(
            distribution=[
                tf.__internal__.distribute.combinations.
                mirrored_strategy_with_gpu_and_cpu,
                tf.__internal__.distribute.combinations.one_device_strategy,
            ],
            mode=["graph", "eager"],
        ))
    def test_distribution_strategy_on_subclassed_model(self, distribution):
        with distribution.scope():

            class _SimpleMLP(keras.Model):
                def __init__(self, num_labels):
                    super().__init__()
                    self.dense = keras.layers.Dense(num_labels)

                def call(self, inputs):
                    return self.dense(inputs)

            model = _SimpleMLP(3)

            if not tf.executing_eagerly():
                with self.assertRaisesRegex(
                        ValueError,
                        "We currently do not support distribution strategy with a "
                        "`Sequential` model that is created without `input_shape`/"
                        "`input_dim` set in its first layer or a subclassed model.",
                ):
                    model.compile("sgd")
            else:
                model.compile("sgd")

    @tf.__internal__.distribute.combinations.generate(
        tf.__internal__.test.combinations.combine(
            distribution=[
                tf.__internal__.distribute.combinations.
                mirrored_strategy_with_gpu_and_cpu,
                tf.__internal__.distribute.combinations.one_device_strategy,
            ],
            mode=["graph", "eager"],
        ))
    def test_distribution_strategy_on_deferred_sequential_model(
            self, distribution):
        with distribution.scope():
            model = keras.models.Sequential()
            model.add(keras.layers.Dense(16, activation="relu"))
            model.add(keras.layers.Dense(3, activation="softmax"))

            if tf.executing_eagerly():
                model.compile("sgd")
            else:
                with self.assertRaisesRegex(
                        ValueError,
                        "We currently do not support distribution strategy with a "
                        "`Sequential` model that is created without "
                        "`input_shape`/`input_dim` set in its first layer or "
                        "a subclassed model.",
                ):
                    model.compile("sgd")

    @tf.__internal__.distribute.combinations.generate(
        keras_test_lib.all_strategy_combinations_minus_default())
    def test_standalone_loss_without_loss_reduction(self, distribution):
        with distribution.scope():
            loss_object = losses.MeanSquaredError()

            with self.assertRaisesRegex(
                    ValueError,
                    "Please use `tf.keras.losses.Reduction.SUM` or "
                    "`tf.keras.losses.Reduction.NONE`",
            ):
                y = np.asarray([1, 0])
                loss_object(y, y)