def test_end_to_end_1_gpu(self): """Test Keras model with 1 GPU.""" config = keras_utils.get_config_proto_v1() tf.compat.v1.enable_eager_execution(config=config) if context.num_gpus() < 1: self.skipTest( "{} GPUs are not available for this test. {} GPUs are available" .format(1, context.num_gpus())) extra_flags = [ "-num_gpus", "1", "-distribution_strategy", "default", "-model_dir", "keras_cifar_1_gpu", "-data_format", "channels_last", ] extra_flags = extra_flags + self._extra_flags integration.run_synthetic(main=keras_cifar_main.run, tmp_root=self.get_temp_dir(), extra_flags=extra_flags)
def test_end_to_end_xla_2_gpu_fp16(self): """Test Keras model with XLA, 2 GPUs and fp16.""" config = keras_utils.get_config_proto_v1() tf.compat.v1.enable_eager_execution(config=config) if context.num_gpus() < 2: self.skipTest( "{} GPUs are not available for this test. {} GPUs are available" .format(2, context.num_gpus())) extra_flags = [ "-num_gpus", "2", "-dtype", "fp16", "-enable_xla", "true", "-distribution_strategy", "default", "-model_dir", "keras_imagenet_xla_2_gpu_fp16", ] extra_flags = extra_flags + self._extra_flags integration.run_synthetic(main=keras_imagenet_main.run, tmp_root=self.get_temp_dir(), extra_flags=extra_flags)
def test_end_to_end_xla_2_gpu_fp16(self, flags_key): """Test Keras model with XLA, 2 GPUs and fp16.""" config = keras_utils.get_config_proto_v1() tf.compat.v1.enable_eager_execution(config=config) if context.num_gpus() < 2: self.skipTest( "{} GPUs are not available for this test. {} GPUs are available" .format(2, context.num_gpus())) extra_flags = [ "-num_gpus", "2", "-dtype", "fp16", "-enable_xla", "true", "-distribution_strategy", "mirrored", ] extra_flags = extra_flags + self.get_extra_flags_dict(flags_key) if "polynomial_decay" in extra_flags: self.skipTest("Pruning with fp16 is not currently supported.") integration.run_synthetic(main=resnet_imagenet_main.run, tmp_root=self.get_temp_dir(), extra_flags=extra_flags)
def test_end_to_end(self, distribution): """Test Keras MNIST model with `strategy`.""" config = keras_utils.get_config_proto_v1() tf.compat.v1.enable_eager_execution(config=config) extra_flags = [ "-train_epochs", "1", # Let TFDS find the metadata folder automatically "--data_dir=" ] dummy_data = ( tf.ones(shape=(10, 28, 28, 1), dtype=tf.int32), tf.range(10), ) datasets = ( tf.data.Dataset.from_tensor_slices(dummy_data), tf.data.Dataset.from_tensor_slices(dummy_data), ) run = functools.partial(mnist_main.run, datasets_override=datasets, strategy_override=distribution) integration.run_synthetic( main=run, synth=False, tmp_root=self.get_temp_dir(), extra_flags=extra_flags)
def test_end_to_end(self, distribution): """Test Keras MNIST model with `strategy`.""" config = keras_utils.get_config_proto_v1() tf.compat.v1.enable_eager_execution(config=config) extra_flags = [ "-train_epochs", "1", # Let TFDS find the metadata folder automatically "--data_dir=" ] def _mock_dataset(self, *args, **kwargs): # pylint: disable=unused-argument """Generate mock dataset with TPU-compatible dtype (instead of uint8).""" return tf.data.Dataset.from_tensor_slices({ "image": tf.ones(shape=(10, 28, 28, 1), dtype=tf.int32), "label": tf.range(10), }) run = functools.partial(mnist_main.run, strategy_override=distribution) with tfds.testing.mock_data(as_dataset_fn=_mock_dataset): integration.run_synthetic(main=run, synth=False, tmp_root=self.get_temp_dir(), extra_flags=extra_flags)
def test_end_to_end_no_dist_strat(self, flags_key): """Test Keras model with 1 GPU, no distribution strategy.""" config = keras_utils.get_config_proto_v1() tf.compat.v1.enable_eager_execution(config=config) extra_flags = [ "-distribution_strategy", "off", ] extra_flags = extra_flags + self.get_extra_flags_dict(flags_key) integration.run_synthetic(main=resnet_imagenet_main.run, tmp_root=self.get_temp_dir(), extra_flags=extra_flags)
def test_end_to_end_tpu(self): """Test Keras model with TPU distribution strategy.""" config = keras_utils.get_config_proto_v1() tf.compat.v1.enable_eager_execution(config=config) extra_flags = [ "-distribution_strategy", "tpu", "-data_format", "channels_last", ] extra_flags = extra_flags + self._extra_flags integration.run_synthetic(main=resnet_imagenet_main.run, tmp_root=self.get_temp_dir(), extra_flags=extra_flags)
def test_end_to_end_no_dist_strat(self): """Test Keras model with 1 GPU, no distribution strategy.""" config = keras_utils.get_config_proto_v1() tf.compat.v1.enable_eager_execution(config=config) extra_flags = [ "-distribution_strategy", "off", "-model_dir", "keras_cifar_no_dist_strat", "-data_format", "channels_last", ] extra_flags = extra_flags + self._extra_flags integration.run_synthetic(main=keras_cifar_main.run, tmp_root=self.get_temp_dir(), extra_flags=extra_flags)
def test_end_to_end_tpu_bf16(self, flags_key): """Test Keras model with TPU and bfloat16 activation.""" config = keras_utils.get_config_proto_v1() tf.compat.v1.enable_eager_execution(config=config) extra_flags = [ "-distribution_strategy", "tpu", "-data_format", "channels_last", "-dtype", "bf16", ] extra_flags = extra_flags + self._extra_flags_dict[flags_key] integration.run_synthetic(main=resnet_imagenet_main.run, tmp_root=self.get_temp_dir(), extra_flags=extra_flags)
def test_end_to_end_2_gpu(self, flags_key): """Test Keras model with 2 GPUs.""" config = keras_utils.get_config_proto_v1() tf.compat.v1.enable_eager_execution(config=config) if context.num_gpus() < 2: self.skipTest( "{} GPUs are not available for this test. {} GPUs are available" .format(2, context.num_gpus())) extra_flags = [ "-num_gpus", "2", "-distribution_strategy", "mirrored", ] extra_flags = extra_flags + self.get_extra_flags_dict(flags_key) integration.run_synthetic(main=resnet_imagenet_main.run, tmp_root=self.get_temp_dir(), extra_flags=extra_flags)