Exemple #1
0
def test_deepspeed_multigpu_stage_3_resume_training(tmpdir):
    """Test to ensure with Stage 3 and single GPU that we can resume training."""
    initial_model = ModelParallelClassificationModel()
    dm = ClassifDataModule()

    ck = ModelCheckpoint(monitor="val_acc", mode="max", save_last=True, save_top_k=-1)
    initial_trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=1,
        limit_train_batches=2,
        limit_val_batches=2,
        limit_test_batches=2,
        strategy=DeepSpeedStrategy(stage=3),
        gpus=1,
        precision=16,
        callbacks=[ck],
        enable_progress_bar=False,
        enable_model_summary=False,
    )
    initial_trainer.fit(initial_model, datamodule=dm)

    class TestCallback(Callback):
        def on_train_batch_start(
            self, trainer: Trainer, pl_module: LightningModule, batch: Any, batch_idx: int
        ) -> None:
            original_deepspeed_strategy = initial_trainer.strategy
            current_deepspeed_strategy = trainer.strategy

            assert isinstance(original_deepspeed_strategy, DeepSpeedStrategy)
            assert isinstance(current_deepspeed_strategy, DeepSpeedStrategy)
            # assert optimizer states are the correctly loaded
            original_optimizer_dict = original_deepspeed_strategy.deepspeed_engine.optimizer.state_dict()
            current_optimizer_dict = current_deepspeed_strategy.deepspeed_engine.optimizer.state_dict()
            for orig_tensor, current_tensor in zip(
                original_optimizer_dict["fp32_flat_groups"], current_optimizer_dict["fp32_flat_groups"]
            ):
                assert torch.all(orig_tensor.eq(current_tensor))
            # assert model state is loaded correctly
            for current_param, initial_param in zip(pl_module.parameters(), initial_model.parameters()):
                assert torch.equal(current_param.cpu(), initial_param.cpu())
            # assert epoch has correctly been restored
            assert trainer.current_epoch == 1

            # assert lr-scheduler states are loaded correctly
            original_lr_scheduler = initial_trainer.lr_scheduler_configs[0].scheduler
            current_lr_scheduler = trainer.lr_scheduler_configs[0].scheduler
            assert original_lr_scheduler.state_dict() == current_lr_scheduler.state_dict()

    model = ModelParallelClassificationModel()
    trainer = Trainer(
        default_root_dir=tmpdir,
        fast_dev_run=True,
        strategy=DeepSpeedStrategy(stage=3),
        gpus=1,
        precision=16,
        callbacks=TestCallback(),
        enable_progress_bar=False,
        enable_model_summary=False,
    )
    trainer.fit(model, datamodule=dm, ckpt_path=ck.best_model_path)
def test_deepspeed_multigpu_single_file(tmpdir):
    """Test to ensure that DeepSpeed loads from a single file checkpoint."""
    model = BoringModel()
    checkpoint_path = os.path.join(tmpdir, "model.pt")
    trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
    trainer.fit(model)
    trainer.save_checkpoint(checkpoint_path)

    trainer = Trainer(
        default_root_dir=tmpdir,
        strategy=DeepSpeedStrategy(stage=3),
        accelerator="gpu",
        devices=1,
        fast_dev_run=True,
        precision=16,
    )
    strategy = trainer.strategy
    assert isinstance(strategy, DeepSpeedStrategy)
    assert not strategy.load_full_weights
    with pytest.raises(MisconfigurationException,
                       match="DeepSpeed was unable to load the checkpoint."):
        trainer.test(model, ckpt_path=checkpoint_path)

    trainer = Trainer(
        default_root_dir=tmpdir,
        strategy=DeepSpeedStrategy(stage=3, load_full_weights=True),
        accelerator="gpu",
        devices=1,
        fast_dev_run=True,
        precision=16,
    )
    strategy = trainer.strategy
    assert isinstance(strategy, DeepSpeedStrategy)
    assert strategy.load_full_weights
    trainer.test(model, ckpt_path=checkpoint_path)
Exemple #3
0
def test_deepspeed_multigpu_stage_3_checkpointing(tmpdir, automatic_optimization, accumulate_grad_batches):
    seed_everything(1)
    if automatic_optimization:
        model = ModelParallelClassificationModel()
    else:
        model = ManualModelParallelClassificationModel()
    dm = ClassifDataModule()
    ck = ModelCheckpoint(monitor="val_acc", mode="max", save_last=True, save_top_k=-1)
    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=10,
        strategy=DeepSpeedStrategy(stage=3),
        gpus=2,
        precision=16,
        accumulate_grad_batches=accumulate_grad_batches,
        callbacks=[ck],
    )
    trainer.fit(model, datamodule=dm)

    results = trainer.test(datamodule=dm)
    assert results[0]["test_acc"] > 0.7
    saved_results = trainer.test(ckpt_path=ck.best_model_path, datamodule=dm)
    assert saved_results[0]["test_acc"] > 0.7
    assert saved_results == results

    if automatic_optimization:
        model = ModelParallelClassificationModel()
    else:
        model = ManualModelParallelClassificationModel()
    trainer = Trainer(default_root_dir=tmpdir, gpus=2, strategy=DeepSpeedStrategy(stage=3), precision=16)

    results = trainer.test(model, datamodule=dm, ckpt_path=ck.best_model_path)
    assert results[0]["test_acc"] > 0.7
Exemple #4
0
def test_deepspeed_run_configure_optimizers(tmpdir):
    """Test end to end that deepspeed works with defaults (without ZeRO as that requires compilation), whilst using
    configure_optimizers for optimizers and schedulers."""

    class TestCB(Callback):
        def on_train_start(self, trainer, pl_module) -> None:
            from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer

            assert isinstance(trainer.optimizers[0], FP16_DeepSpeedZeroOptimizer)
            assert isinstance(trainer.optimizers[0].optimizer, torch.optim.SGD)
            assert isinstance(trainer.lr_scheduler_configs[0].scheduler, torch.optim.lr_scheduler.StepLR)
            # check that the lr_scheduler config was preserved
            assert trainer.lr_scheduler_configs[0].name == "Sean"

    class TestModel(BoringModel):
        def configure_optimizers(self):
            [optimizer], [scheduler] = super().configure_optimizers()
            return {"optimizer": optimizer, "lr_scheduler": {"scheduler": scheduler, "name": "Sean"}}

    model = TestModel()
    lr_monitor = LearningRateMonitor()
    trainer = Trainer(
        strategy=DeepSpeedStrategy(),  # disable ZeRO so our optimizers are not wrapped
        default_root_dir=tmpdir,
        gpus=1,
        fast_dev_run=True,
        precision=16,
        callbacks=[TestCB(), lr_monitor],
    )
    trainer.fit(model)

    assert lr_monitor.lrs == {"Sean": [0.1]}

    _assert_save_model_is_equal(model, tmpdir, trainer)
Exemple #5
0
def test_deepspeed_auto_batch_size_config_select(mock_deepspeed_distributed, tmpdir, dataset_cls, value):
    """Test to ensure that the batch size is correctly set as expected for deepspeed logging purposes."""

    class TestModel(BoringModel):
        def train_dataloader(self):
            return DataLoader(dataset_cls(32, 64))

    class AssertCallback(Callback):
        def setup(self, trainer, pl_module, stage: Optional[str] = None) -> None:
            assert isinstance(trainer.strategy, DeepSpeedStrategy)
            config = trainer.strategy.config

            # int value overrides auto mode
            expected_value = value if isinstance(value, int) else 1
            if dataset_cls == RandomDataset:
                expected_value = pl_module.train_dataloader().batch_size if value == "auto" else value

            assert config["train_micro_batch_size_per_gpu"] == expected_value
            raise SystemExit

    ck = AssertCallback()
    model = TestModel()
    trainer = Trainer(
        default_root_dir=tmpdir,
        fast_dev_run=True,
        callbacks=ck,
        gpus=1,
        strategy=DeepSpeedStrategy(logging_batch_size_per_gpu=value, zero_optimization=False),
    )
    with pytest.raises(SystemExit):
        trainer.fit(model)
Exemple #6
0
def test_deepspeed_with_invalid_config_path(tmpdir):
    """Test to ensure if we pass an invalid config path we throw an exception."""

    with pytest.raises(
        MisconfigurationException, match="You passed in a path to a DeepSpeed config but the path does not exist"
    ):
        DeepSpeedStrategy(config="invalid_path.json")
def test_deepspeed_multigpu_partial_partition_parameters(tmpdir):
    """Test to ensure that a module that defines a layer inside the ``__init__`` and ``configure_sharded_model``
    correctly converts all parameters to float16 when ``precision=16`` and runs successfully."""
    class TestModel(ModelParallelBoringModel):
        def __init__(self):
            super().__init__()
            self.layer_2 = torch.nn.Linear(32, 32)

        def configure_sharded_model(self) -> None:
            self.layer = torch.nn.Linear(32, 2)

        def forward(self, x):
            x = self.layer_2(x)
            return self.layer(x)

        def on_train_epoch_start(self) -> None:
            assert all([x.dtype == torch.float16 for x in self.parameters()])

    model = TestModel()
    trainer = Trainer(
        default_root_dir=tmpdir,
        strategy=DeepSpeedStrategy(stage=3),
        accelerator="gpu",
        devices=1,
        fast_dev_run=True,
        precision=16,
    )
    trainer.fit(model)
def test_deepspeed_summary(tmpdir):
    """Test to ensure that the summary contains the correct values when stage 3 is enabled and that the trainer
    enables the `DeepSpeedSummary` when DeepSpeed is used."""

    model = BoringModel()
    total_parameters = sum(x.numel() for x in model.parameters())

    class TestCallback(Callback):
        def on_fit_start(self, trainer: "pl.Trainer",
                         pl_module: "pl.LightningModule") -> None:
            model_summary = DeepSpeedSummary(pl_module, max_depth=1)
            assert model_summary.total_parameters == total_parameters
            assert model_summary.trainable_parameters == total_parameters

            # check the additional params per device
            summary_data = model_summary._get_summary_data()
            params_per_device = summary_data[-1][-1]
            assert int(
                params_per_device[0]) == (model_summary.total_parameters // 2)

    trainer = Trainer(
        strategy=DeepSpeedStrategy(stage=3),
        default_root_dir=tmpdir,
        accelerator="gpu",
        fast_dev_run=True,
        devices=2,
        precision=16,
        enable_model_summary=True,
        callbacks=[TestCallback()],
    )

    trainer.fit(model)
Exemple #9
0
def test_deepspeed_config(tmpdir, deepspeed_zero_config):
    """Test to ensure deepspeed works correctly when passed a DeepSpeed config object including
    optimizers/schedulers and saves the model weights to load correctly."""
    class TestCB(Callback):
        def on_train_start(self, trainer, pl_module) -> None:
            from deepspeed.runtime.lr_schedules import WarmupLR
            from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer

            assert isinstance(trainer.optimizers[0],
                              FP16_DeepSpeedZeroOptimizer)
            assert isinstance(trainer.optimizers[0].optimizer, torch.optim.SGD)
            assert isinstance(trainer.lr_scheduler_configs[0].scheduler,
                              WarmupLR)

    model = BoringModel()
    trainer = Trainer(
        strategy=DeepSpeedStrategy(config=deepspeed_zero_config),
        default_root_dir=tmpdir,
        gpus=1,
        fast_dev_run=True,
        precision=16,
        callbacks=[TestCB()],
    )

    trainer.fit(model)
    trainer.test(model)
def test_deepspeed_custom_precision_params(tmpdir):
    """Ensure if we modify the FP16 parameters via the DeepSpeedStrategy, the deepspeed config contains these
    changes."""
    class TestCB(Callback):
        def on_train_start(self, trainer, pl_module) -> None:
            assert trainer.strategy.config["fp16"]["loss_scale"] == 10
            assert trainer.strategy.config["fp16"]["initial_scale_power"] == 10
            assert trainer.strategy.config["fp16"]["loss_scale_window"] == 10
            assert trainer.strategy.config["fp16"]["hysteresis"] == 10
            assert trainer.strategy.config["fp16"]["min_loss_scale"] == 10
            raise SystemExit()

    model = BoringModel()
    ds = DeepSpeedStrategy(loss_scale=10,
                           initial_scale_power=10,
                           loss_scale_window=10,
                           hysteresis=10,
                           min_loss_scale=10)
    trainer = Trainer(default_root_dir=tmpdir,
                      strategy=ds,
                      precision=16,
                      accelerator="gpu",
                      devices=1,
                      callbacks=[TestCB()])
    with pytest.raises(SystemExit):
        trainer.fit(model)
Exemple #11
0
def test_deepspeed_config(tmpdir, deepspeed_zero_config):
    """Test to ensure deepspeed works correctly when passed a DeepSpeed config object including
    optimizers/schedulers and saves the model weights to load correctly."""
    class TestCB(Callback):
        def on_train_start(self, trainer, pl_module) -> None:
            from deepspeed.runtime.lr_schedules import WarmupLR
            from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer

            assert isinstance(trainer.optimizers[0],
                              FP16_DeepSpeedZeroOptimizer)
            assert isinstance(trainer.optimizers[0].optimizer, torch.optim.SGD)
            assert isinstance(trainer.lr_scheduler_configs[0].scheduler,
                              WarmupLR)
            assert trainer.lr_scheduler_configs[0].interval == "step"
            assert trainer.lr_scheduler_configs[0].opt_idx == 0

    model = BoringModel()
    lr_monitor = LearningRateMonitor()
    trainer = Trainer(
        strategy=DeepSpeedStrategy(config=deepspeed_zero_config),
        default_root_dir=tmpdir,
        gpus=1,
        log_every_n_steps=1,
        limit_train_batches=4,
        limit_val_batches=4,
        limit_test_batches=4,
        max_epochs=2,
        precision=16,
        callbacks=[TestCB(), lr_monitor],
    )

    trainer.fit(model)
    trainer.test(model)
    assert list(lr_monitor.lrs) == ["lr-SGD"]
    assert len(set(lr_monitor.lrs["lr-SGD"])) == 8
Exemple #12
0
def test_deepspeed_setup_train_dataloader(tmpdir):
    """Test DeepSpeed works when setup is required to call in the DataModule."""
    class TestSetupIsCalledDataModule(LightningDataModule):
        def __init__(self):
            super().__init__()
            self._setup = False

        def setup(self, stage: Optional[str] = None) -> None:
            self._setup = True

        def train_dataloader(self):
            assert self._setup
            return DataLoader(RandomDataset(32, 64), batch_size=2)

        def val_dataloader(self):
            assert self._setup
            return DataLoader(RandomDataset(32, 64), batch_size=2)

        def test_dataloader(self):
            assert self._setup
            return DataLoader(RandomDataset(32, 64), batch_size=2)

    model = BoringModel()
    trainer = Trainer(
        default_root_dir=tmpdir,
        strategy=DeepSpeedStrategy(logging_level=logging.INFO),
        gpus=1,
        fast_dev_run=True,
    )
    dm = TestSetupIsCalledDataModule()
    with mock.patch("deepspeed.utils.logging.logger.warning",
                    autospec=True) as mock_object:
        trainer.fit(model, datamodule=dm)
    assert any("Tried to infer the batch size" in str(arg)
               for arg in mock_object.call_args_list)
Exemple #13
0
def test_deepspeed_multigpu_test(tmpdir):
    """Test to ensure we can use DeepSpeed with just test using ZeRO Stage 3."""
    model = ModelParallelBoringModel()
    trainer = Trainer(
        default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3), gpus=2, fast_dev_run=True, precision=16
    )
    trainer.test(model)
Exemple #14
0
def test_deepspeed_multigpu_stage_2_accumulated_grad_batches(tmpdir, offload_optimizer):
    """Test to ensure with Stage 2 and multiple GPUs, accumulated grad batches works."""
    seed_everything(42)

    class VerificationCallback(Callback):
        def __init__(self):
            self.on_train_batch_start_called = False

        def on_train_batch_start(self, trainer, pl_module: LightningModule, batch: Any, batch_idx: int) -> None:
            deepspeed_engine = trainer.strategy.model
            assert trainer.global_step == deepspeed_engine.global_steps
            self.on_train_batch_start_called = True

    model = ModelParallelClassificationModel()
    dm = ClassifDataModule()
    verification_callback = VerificationCallback()
    trainer = Trainer(
        default_root_dir=tmpdir,
        enable_progress_bar=False,
        # TODO: this test fails with max_epochs >1 as there are leftover batches per epoch.
        # there's divergence in how Lightning handles the last batch of the epoch with how DeepSpeed does it.
        # we step the optimizers on the last batch but DeepSpeed keeps the accumulation for the next epoch
        max_epochs=1,
        strategy=DeepSpeedStrategy(stage=2, offload_optimizer=offload_optimizer),
        gpus=2,
        limit_train_batches=5,
        limit_val_batches=2,
        precision=16,
        accumulate_grad_batches=2,
        callbacks=[verification_callback],
    )
    assert trainer.limit_train_batches % trainer.accumulate_grad_batches != 0, "leftover batches should be tested"
    trainer.fit(model, datamodule=dm)
    assert verification_callback.on_train_batch_start_called
def test_deepspeed_custom_activation_checkpointing_params_forwarded(tmpdir):
    """Ensure if we modify the activation checkpointing parameters, we pass these to
    deepspeed.checkpointing.configure correctly."""
    ds = DeepSpeedStrategy(
        partition_activations=True,
        cpu_checkpointing=True,
        contiguous_memory_optimization=True,
        synchronize_checkpoint_boundary=True,
    )

    model = BoringModel()
    trainer = Trainer(
        default_root_dir=tmpdir,
        enable_progress_bar=False,
        fast_dev_run=1,
        strategy=ds,
        precision=16,
        accelerator="gpu",
        devices=1,
    )
    with mock.patch("deepspeed.checkpointing.configure",
                    wraps=deepspeed.checkpointing.configure
                    ) as deepspeed_checkpointing_configure:
        trainer.fit(model)

    deepspeed_checkpointing_configure.assert_called_with(
        mpu_=None,
        partition_activations=True,
        contiguous_checkpointing=True,
        checkpoint_in_cpu=True,
        profile=None)
def test_deepspeed_multigpu_stage_3_warns_resume_training(tmpdir):
    """Test to ensure with Stage 3 and multiple GPUs that we can resume from training, throwing a warning that the
    optimizer state and scheduler states cannot be restored."""
    dm = ClassifDataModule()
    model = BoringModel()
    checkpoint_path = os.path.join(tmpdir, "model.pt")
    trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
    trainer.fit(model)
    trainer.save_checkpoint(checkpoint_path)

    trainer = Trainer(
        default_root_dir=tmpdir,
        fast_dev_run=True,
        strategy=DeepSpeedStrategy(stage=3, load_full_weights=True),
        accelerator="gpu",
        devices=1,
        precision=16,
    )
    with pytest.warns(
            UserWarning,
            match=
            "A single checkpoint file has been given. This means optimizer states cannot be restored. "
            "If you'd like to restore these states, you must "
            "provide a path to the originally saved DeepSpeed checkpoint.",
    ):
        trainer.fit(model, datamodule=dm, ckpt_path=checkpoint_path)
def test_deepspeed_strategy_env_variables(mock_deepspeed_distributed, tmpdir,
                                          platform):
    """Test to ensure that we setup distributed communication using correctly.

    When using windows, ranks environment variables should not be set, and deepspeed should handle this.
    """
    trainer = Trainer(default_root_dir=tmpdir,
                      strategy=DeepSpeedStrategy(stage=3))
    strategy = trainer.strategy
    assert isinstance(strategy, DeepSpeedStrategy)
    with mock.patch("platform.system", return_value=platform) as mock_platform:
        strategy._init_deepspeed_distributed()
    mock_deepspeed_distributed.assert_called()
    mock_platform.assert_called()
    if platform == "Windows":
        # assert no env variables have been set within the DeepSpeedStrategy
        assert all(k not in os.environ
                   for k in ("MASTER_PORT", "MASTER_ADDR", "RANK",
                             "WORLD_SIZE", "LOCAL_RANK"))
    else:
        assert os.environ["MASTER_ADDR"] == str(
            trainer.strategy.cluster_environment.main_address)
        assert os.environ["MASTER_PORT"] == str(
            trainer.strategy.cluster_environment.main_port)
        assert os.environ["RANK"] == str(trainer.strategy.global_rank)
        assert os.environ["WORLD_SIZE"] == str(trainer.strategy.world_size)
        assert os.environ["LOCAL_RANK"] == str(trainer.strategy.local_rank)
    def select_strategy(self) -> Strategy:
        if isinstance(self.distributed_backend, Accelerator) and self.distributed_backend.strategy is not None:
            plugin = self.distributed_backend.strategy
        elif self.use_ddp2:
            plugin = DDP2Strategy(parallel_devices=self.parallel_devices, cluster_environment=self.cluster_environment)
        elif self.use_ddp and self.use_deepspeed:
            plugin = DeepSpeedStrategy(
                cluster_environment=self.select_cluster_environment(), parallel_devices=self.parallel_devices
            )
        elif self.use_ddp:
            use_slurm_ddp = self.use_ddp and self._is_slurm_managing_tasks()
            use_torchelastic_ddp = self.use_ddp and TorchElasticEnvironment.detect()
            use_kubeflow_ddp = self.use_ddp and KubeflowEnvironment.detect()
            use_ddp_spawn = self._strategy_type == _StrategyType.DDP_SPAWN
            use_ddp_cpu_spawn = use_ddp_spawn and self.use_cpu
            use_tpu_spawn = self.use_tpu and self._strategy_type == _StrategyType.TPU_SPAWN
            use_ddp_cpu_torch_elastic = use_ddp_cpu_spawn and TorchElasticEnvironment.detect()
            use_ddp_cpu_kubeflow = use_ddp_cpu_spawn and KubeflowEnvironment.detect()
            use_ddp_cpu_slurm = use_ddp_cpu_spawn and self._is_slurm_managing_tasks()
            use_ddp_sharded = self._strategy_type == _StrategyType.DDP_SHARDED
            use_ddp_sharded_spawn = self._strategy_type == _StrategyType.DDP_SHARDED_SPAWN
            use_ddp_fully_sharded = self._strategy_type == _StrategyType.DDP_FULLY_SHARDED

            if use_tpu_spawn:
                ddp_strategy_cls = TPUSpawnStrategy
            elif use_ddp_sharded:
                ddp_strategy_cls = DDPShardedStrategy
            elif use_ddp_sharded_spawn:
                ddp_strategy_cls = DDPSpawnShardedStrategy
            elif (
                use_ddp_cpu_slurm
                or use_slurm_ddp
                or use_ddp_cpu_torch_elastic
                or use_torchelastic_ddp
                or use_kubeflow_ddp
                or use_ddp_cpu_kubeflow
            ):
                ddp_strategy_cls = DDPStrategy
            elif use_ddp_spawn or use_ddp_cpu_spawn:
                ddp_strategy_cls = DDPSpawnStrategy
            elif use_ddp_fully_sharded:
                ddp_strategy_cls = DDPFullyShardedStrategy
            else:
                ddp_strategy_cls = DDPStrategy

            plugin = ddp_strategy_cls(
                parallel_devices=self.parallel_devices, cluster_environment=self.cluster_environment
            )
        elif self.use_dp:
            plugin = DataParallelStrategy(parallel_devices=self.parallel_devices)
        elif self.use_horovod:
            plugin = HorovodStrategy(parallel_devices=self.parallel_devices)
        elif self.use_tpu and isinstance(self.tpu_cores, list):
            plugin = SingleTPUStrategy(self.tpu_id)
        elif self.use_ipu:
            plugin = IPUStrategy(parallel_devices=self.parallel_devices)
        else:
            single_gpu_ordinal = device_parser.determine_root_gpu_device(self.parallel_device_ids)
            plugin = SingleDeviceStrategy(device=single_gpu_ordinal if self.use_gpu else "cpu")
        return plugin
def test_deepspeed_with_env_path(tmpdir, monkeypatch, deepspeed_config):
    """Test to ensure if we pass an env variable, we load the config from the path."""
    config_path = os.path.join(tmpdir, "temp.json")
    with open(config_path, "w") as f:
        f.write(json.dumps(deepspeed_config))
    monkeypatch.setenv("PL_DEEPSPEED_CONFIG_PATH", config_path)
    strategy = DeepSpeedStrategy()
    assert strategy.config == deepspeed_config
Exemple #20
0
def test_deepspeed_multigpu_no_schedulers(tmpdir):
    """Test to ensure ZeRO Stage 3 works with a parallel model and no schedulers."""
    model = ModelParallelBoringModelNoSchedulers()
    trainer = Trainer(
        default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3), gpus=2, fast_dev_run=True, precision=16
    )
    trainer.fit(model)

    _assert_save_model_is_equal(model, tmpdir, trainer)
Exemple #21
0
def test_deepspeed_skip_backward_raises(tmpdir):
    class TestModel(BoringModel):
        def training_step(self, batch, batch_idx):
            return None

    model = TestModel()
    trainer = Trainer(default_root_dir=tmpdir, strategy=DeepSpeedStrategy(), gpus=1, fast_dev_run=True, precision=16)
    with pytest.raises(MisconfigurationException, match="returning `None` .* is not supported"):
        trainer.fit(model)
Exemple #22
0
def test_deepspeed_with_meta_device(tmpdir):
    with init_meta_context():
        model = BoringModel()
    assert model.layer.weight.device.type == "meta"
    trainer = Trainer(
        default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3), gpus=2, fast_dev_run=True, precision=16
    )
    trainer.fit(model)
    assert model.layer.weight.device.type == "cpu"
Exemple #23
0
def test_deepspeed_multigpu_stage_3(tmpdir, deepspeed_config):
    """Test to ensure ZeRO Stage 3 works with a parallel model."""
    model = ModelParallelBoringModel()
    trainer = Trainer(
        default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3), gpus=2, fast_dev_run=True, precision=16
    )
    trainer.fit(model)
    trainer.test(model)

    _assert_save_model_is_equal(model, tmpdir, trainer)
Exemple #24
0
def test_deepspeed_multigpu(tmpdir):
    """Test to ensure that DeepSpeed with multiple GPUs works and deepspeed distributed is initialized
    correctly."""
    model = BoringModel()
    trainer = Trainer(
        default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3), gpus=2, fast_dev_run=True, precision=16
    )
    with mock.patch("deepspeed.init_distributed", wraps=deepspeed.init_distributed) as mock_deepspeed_distributed:
        trainer.fit(model)
    mock_deepspeed_distributed.assert_called_once()
    trainer.test(model)

    _assert_save_model_is_equal(model, tmpdir, trainer)
def test_deepspeed_custom_activation_checkpointing_params(tmpdir):
    """Ensure if we modify the activation checkpointing parameters, the deepspeed config contains these changes."""
    ds = DeepSpeedStrategy(
        partition_activations=True,
        cpu_checkpointing=True,
        contiguous_memory_optimization=True,
        synchronize_checkpoint_boundary=True,
    )
    checkpoint_config = ds.config["activation_checkpointing"]
    assert checkpoint_config["partition_activations"]
    assert checkpoint_config["cpu_checkpointing"]
    assert checkpoint_config["contiguous_memory_optimization"]
    assert checkpoint_config["synchronize_checkpoint_boundary"]
Exemple #26
0
def test_deepspeed_stage_3_save_warning(tmpdir):
    """Test to ensure that DeepSpeed Stage 3 gives a warning when saving on rank zero."""
    model = BoringModel()
    trainer = Trainer(
        default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3), gpus=2, fast_dev_run=True, precision=16
    )
    trainer.fit(model)
    checkpoint_path = os.path.join(tmpdir, "model.pt")

    # both ranks need to call save checkpoint, however only rank 0 needs to check the warning
    context_manager = (
        pytest.warns(UserWarning, match="each worker will save a shard of the checkpoint within a directory.")
        if trainer.is_global_zero
        else contextlib.suppress()
    )
    with context_manager:
        trainer.save_checkpoint(checkpoint_path)
def test_deepspeed_multigpu_stage_3_manual_optimization(
        tmpdir, deepspeed_config):
    """Test to ensure ZeRO Stage 3 works with a parallel model."""
    model = ModelParallelBoringModelManualOptim()
    model.training_epoch_end = None
    trainer = Trainer(
        default_root_dir=tmpdir,
        strategy=DeepSpeedStrategy(stage=3),
        accelerator="gpu",
        devices=2,
        fast_dev_run=True,
        precision=16,
    )
    trainer.fit(model)
    trainer.test(model)

    _assert_save_model_is_equal(model, tmpdir, trainer)
Exemple #28
0
def test_warn_deepspeed_ignored(tmpdir):
    class TestModel(BoringModel):
        def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None:
            return loss.backward()

    model = TestModel()
    trainer = Trainer(
        fast_dev_run=True,
        default_root_dir=tmpdir,
        strategy=DeepSpeedStrategy(),
        gpus=1,
        precision=16,
        track_grad_norm=2,
    )
    from pytorch_lightning.plugins.precision.deepspeed import warning_cache

    with pytest.warns(UserWarning, match="will be ignored since DeepSpeed handles the backward"):
        trainer.fit(model)
    assert any("track_grad_norm=2.0)' but this is not supported" in w for w in warning_cache)
Exemple #29
0
def test_deepspeed_multigpu_test_rnn(tmpdir):
    """Test to ensure that turning off explicit partitioning of the entire module for ZeRO Stage 3 works when
    training with certain layers which will crash with explicit partitioning."""
    class TestModel(BoringModel):
        def __init__(self):
            super().__init__()
            self.rnn = torch.nn.GRU(32, 32)

        def on_train_epoch_start(self) -> None:
            assert all([x.dtype == torch.float16 for x in self.parameters()])

    model = TestModel()
    trainer = Trainer(
        default_root_dir=tmpdir,
        strategy=DeepSpeedStrategy(stage=3),
        gpus=1,
        fast_dev_run=True,
        precision=16,
    )
    trainer.fit(model)
Exemple #30
0
def test_deepspeed_collate_checkpoint(tmpdir):
    """Test to ensure that with DeepSpeed Stage 3 we can collate the sharded checkpoints into a single file."""
    model = BoringModel()
    trainer = Trainer(
        default_root_dir=tmpdir,
        strategy=DeepSpeedStrategy(stage=3),
        accelerator="gpu",
        devices=2,
        fast_dev_run=True,
        precision=16,
    )
    trainer.fit(model)
    checkpoint_path = os.path.join(tmpdir, "model.pt")
    checkpoint_path = trainer.strategy.broadcast(checkpoint_path)
    trainer.save_checkpoint(checkpoint_path)
    trainer.strategy.barrier()
    if trainer.is_global_zero:
        # ensure function call works
        output_path = os.path.join(tmpdir, "single_model.pt")
        convert_zero_checkpoint_to_fp32_state_dict(checkpoint_path,
                                                   output_path)
        _assert_checkpoint_equal(model, output_path)