"LOCAL_RANK": "1",
        "GROUP_RANK": "1",
        "RANK": "3",
        "WORLD_SIZE": "4",
        "LOCAL_WORLD_SIZE": "2",
        "TORCHELASTIC_RUN_ID": "1",
    }
    environment = TorchElasticEnvironment()
    yield environment, variables, expected


@pytest.mark.parametrize(
    "strategy_cls",
    [
        DDPStrategy, DDPShardedStrategy,
        pytest.param(DeepSpeedStrategy, marks=RunIf(deepspeed=True))
    ],
)
@mock.patch("pytorch_lightning.accelerators.cuda.CUDAAccelerator.is_available",
            return_value=True)
def test_ranks_available_manual_strategy_selection(mock_gpu_acc_available,
                                                   strategy_cls):
    """Test that the rank information is readily available after Trainer initialization."""
    num_nodes = 2
    for cluster, variables, expected in environment_combinations():
        with mock.patch.dict(os.environ, variables):
            strategy = strategy_cls(parallel_devices=[
                torch.device("cuda", 1),
                torch.device("cuda", 2)
            ],
                                    cluster_environment=cluster)
Exemplo n.º 2
0
                 reduce_fx="mean")
        self.log("bar_3",
                 batch_idx + self.rank,
                 on_step=False,
                 on_epoch=True,
                 sync_dist=True,
                 reduce_fx="max")
        return super().validation_step(batch, batch_idx)


@pytest.mark.parametrize(
    "devices, accelerator",
    [
        (1, "cpu"),
        (2, "cpu"),
        pytest.param(2, "gpu", marks=RunIf(min_cuda_gpus=2)),
    ],
)
def test_logging_sync_dist_true(tmpdir, devices, accelerator):
    """Tests to ensure that the sync_dist flag works (should just return the original value)"""
    fake_result = 1
    model = LoggingSyncDistModel(fake_result)

    use_multiple_devices = devices > 1
    trainer = Trainer(
        max_epochs=1,
        default_root_dir=tmpdir,
        limit_train_batches=3,
        limit_val_batches=3,
        enable_model_summary=False,
        strategy="ddp_spawn" if use_multiple_devices else None,
    def configure_optimizers(self):
        return torch.optim.SGD(self.parameters(), lr=0.1)

    def train_dataloader(self):
        return DataLoader(RandomIndicesDataset(), batch_size=4)

    def val_dataloader(self):
        return DataLoader(RandomIndicesDataset(), batch_size=4)

    def test_dataloader(self):
        return DataLoader(RandomIndicesDataset(), batch_size=4)


@pytest.mark.flaky(reruns=3)
@pytest.mark.parametrize("accelerator", [
    pytest.param("gpu", marks=RunIf(min_cuda_gpus=1)),
    pytest.param("mps", marks=RunIf(mps=True))
])
def test_trainer_num_prefetch_batches(tmpdir, accelerator):

    model = RecommenderModel()

    class AssertFetcher(Callback):
        def __init__(self, check_inter_batch):
            self._check_inter_batch = check_inter_batch

        def on_train_epoch_end(self, trainer, lightning_module):
            fetcher = trainer.fit_loop._data_fetcher
            assert isinstance(
                fetcher, InterBatchParallelDataFetcher
                if self._check_inter_batch else DataFetcher)
Exemplo n.º 4
0
        assert torch.all(self.layer.weight.grad == 0)

        return loss_2

    def configure_optimizers(self):
        optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
        optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
        return optimizer, optimizer_2


@pytest.mark.parametrize(
    "kwargs",
    [
        {},
        pytest.param(
            {"accelerator": "gpu", "devices": 1, "precision": 16, "amp_backend": "native"}, marks=RunIf(min_cuda_gpus=1)
        ),
        pytest.param(
            {"accelerator": "gpu", "devices": 1, "precision": 16, "amp_backend": "apex", "amp_level": "O2"},
            marks=RunIf(min_cuda_gpus=1, amp_apex=True),
        ),
    ],
)
def test_multiple_optimizers_manual_no_return(tmpdir, kwargs):
    apex_optimizer_patches = []
    apex_optimizer_steps = []

    class TestModel(ManualOptModel):
        def training_step(self, batch, batch_idx):
            # avoid returning a value
            super().training_step(batch, batch_idx)
    },
)
@mock.patch("pytorch_lightning.utilities.device_parser.is_cuda_available",
            return_value=True)
@mock.patch("pytorch_lightning.utilities.device_parser.num_cuda_devices",
            return_value=2)
@pytest.mark.parametrize("strategy,devices", [("ddp", 2), ("ddp_spawn", 2)])
@pytest.mark.parametrize(
    "amp,custom_plugin,plugin_cls",
    [
        ("native", False, NativeMixedPrecisionPlugin),
        ("native", True, MyNativeAMP),
        pytest.param("apex",
                     False,
                     ApexMixedPrecisionPlugin,
                     marks=RunIf(amp_apex=True)),
        pytest.param("apex", True, MyApexPlugin, marks=RunIf(amp_apex=True)),
    ],
)
def test_amp_apex_ddp(mocked_device_count, mocked_is_available, strategy,
                      devices, amp, custom_plugin, plugin_cls):
    plugin = None
    if custom_plugin:
        plugin = plugin_cls(16, "cpu") if amp == "native" else plugin_cls()
    trainer = Trainer(
        fast_dev_run=True,
        precision=16,
        amp_backend=amp,
        accelerator="gpu",
        devices=devices,
        strategy=strategy,
        percent_diff <= max_percent_speed_diff
    ), f"Custom DDP was too slow compared to regular DDP, Custom Plugin Time: {custom_model_time}, DDP Time: {ddp_time}"

    if use_cuda:
        # Assert CUDA memory parity
        assert max_memory_custom <= max_memory_ddp, (
            "Custom plugin used too much memory compared to DDP, "
            f"Custom Mem: {max_memory_custom}, DDP Mem: {max_memory_ddp}")


@RunIf(skip_windows=True, fairscale=True)
@pytest.mark.parametrize(
    "kwargs",
    [
        pytest.param(dict(gpus=1, model_cls=SeedTrainLoaderModel),
                     marks=RunIf(min_cuda_gpus=1)),
        pytest.param(dict(gpus=1, precision=16,
                          model_cls=SeedTrainLoaderModel),
                     marks=RunIf(min_cuda_gpus=1, amp_native=True)),
        pytest.param(dict(gpus=2, model_cls=SeedTrainLoaderModel),
                     marks=RunIf(min_cuda_gpus=2)),
        pytest.param(dict(gpus=2, precision=16,
                          model_cls=SeedTrainLoaderModel),
                     marks=RunIf(min_cuda_gpus=2, amp_native=True)),
        pytest.param(
            dict(gpus=2, model_cls=SeedTrainLoaderMultipleOptimizersModel),
            marks=[
                RunIf(min_cuda_gpus=2),
                pytest.mark.skip(
                    reason=
                    "TODO: Current issue with multiple optimizers and FairScale."
            old_cli.instantiate_class(
                tuple(), {"class_path": "pytorch_lightning.Trainer"}), Trainer)


def test_profiler_deprecation_warning():
    assert "Profiler` is deprecated in v1.7" in Profiler.__doc__


@pytest.mark.parametrize(
    "cls",
    [
        AdvancedProfiler,
        PassThroughProfiler,
        PyTorchProfiler,
        SimpleProfiler,
        pytest.param(XLAProfiler, marks=RunIf(tpu=True)),
    ],
)
def test_profiler_classes_deprecated_warning(cls):
    with pytest.deprecated_call(
            match=
            f"profiler.{cls.__name__}` is deprecated in v1.7 and will be removed in v1.9."
            f" Use .*profilers.{cls.__name__}` class instead."):
        cls()


@pytest.mark.skipif(not _KINETO_AVAILABLE,
                    reason="Requires PyTorch Profiler Kineto")
def test_pytorch_profiler_schedule_wrapper_deprecation_warning():
    with pytest.deprecated_call(
            match=
Exemplo n.º 8
0
    trainer = Trainer(strategy="ddp_sharded_spawn",
                      accelerator="cpu",
                      devices=2,
                      fast_dev_run=True)

    trainer.fit(model, ckpt_path=checkpoint_path)


@RunIf(skip_windows=True, standalone=True, fairscale=True)
@pytest.mark.parametrize(
    "trainer_kwargs",
    (
        dict(accelerator="cpu", devices=2),
        pytest.param(dict(accelerator="gpu", devices=2),
                     marks=RunIf(min_cuda_gpus=2)),
    ),
)
def test_ddp_sharded_strategy_test_multigpu(trainer_kwargs):
    """Test to ensure we can use validate and test without fit."""
    model = BoringModel()
    trainer = Trainer(
        strategy="ddp_sharded_spawn",
        fast_dev_run=True,
        enable_progress_bar=False,
        enable_model_summary=False,
        **trainer_kwargs,
    )

    trainer.validate(model)
    trainer.test(model)
Exemplo n.º 9
0
else:

    class DictConfSubClassBoringModel:
        ...


@pytest.mark.parametrize(
    "cls",
    [
        CustomBoringModel,
        SubClassBoringModel,
        NonSavingSubClassBoringModel,
        SubSubClassBoringModel,
        AggSubClassBoringModel,
        UnconventionalArgsBoringModel,
        pytest.param(DictConfSubClassBoringModel, marks=RunIf(omegaconf=True)),
    ],
)
def test_collect_init_arguments(tmpdir, cls):
    """Test that the model automatically saves the arguments passed into the constructor."""
    extra_args = {}
    if cls is AggSubClassBoringModel:
        extra_args.update(my_loss=torch.nn.CosineEmbeddingLoss())
    elif cls is DictConfSubClassBoringModel:
        extra_args.update(dict_conf=OmegaConf.create(dict(
            my_param="anything")))

    model = cls(**extra_args)
    assert model.hparams.batch_size == 64
    model = cls(batch_size=179, **extra_args)
    assert model.hparams.batch_size == 179
Exemplo n.º 10
0
    assert _count == 6
    assert _has_fastforward_sampler == use_fault_tolerant

    def _assert_dataset(loader):
        d = loader.dataset
        if use_fault_tolerant:
            assert isinstance(d, CaptureMapDataset)
        else:
            assert isinstance(d, CustomDataset)

    apply_to_collection(dataloader.loaders, DataLoader, _assert_dataset)


@pytest.mark.parametrize(
    "accelerator",
    ["cpu", pytest.param("gpu", marks=RunIf(min_cuda_gpus=2))])
@pytest.mark.parametrize("replace_sampler_ddp", [False, True])
def test_combined_data_loader_with_max_size_cycle_and_ddp(
        accelerator, replace_sampler_ddp):
    """This test makes sure distributed sampler has been properly injected in dataloaders when using CombinedLoader
    with ddp and `max_size_cycle` mode."""
    trainer = Trainer(strategy="ddp",
                      accelerator=accelerator,
                      devices=2,
                      replace_sampler_ddp=replace_sampler_ddp)

    dataloader = CombinedLoader(
        {
            "a": DataLoader(RandomDataset(32, 8), batch_size=1),
            "b": DataLoader(RandomDataset(32, 8), batch_size=1)
        }, )
 (
     "ddp_spawn_find_unused_parameters_false",
     DDPSpawnStrategy,
     {
         "find_unused_parameters": False,
         "start_method": "spawn"
     },
 ),
 pytest.param(
     "ddp_fork_find_unused_parameters_false",
     DDPSpawnStrategy,
     {
         "find_unused_parameters": False,
         "start_method": "fork"
     },
     marks=RunIf(skip_windows=True),
 ),
 pytest.param(
     "ddp_notebook_find_unused_parameters_false",
     DDPSpawnStrategy,
     {
         "find_unused_parameters": False,
         "start_method": "fork"
     },
     marks=RunIf(skip_windows=True),
 ),
 (
     "ddp_sharded_spawn_find_unused_parameters_false",
     DDPSpawnShardedStrategy,
     {
         "find_unused_parameters": False
Exemplo n.º 12
0
        "epoch_loop.batch_progress"]

    val_dl_progress = "epoch_loop.val_loop.dataloader_progress"
    expected[val_dl_progress]["total"]["ready"] += 1
    assert state_dict_after_restart[val_dl_progress] == expected[
        val_dl_progress]

    expected[val_batch_progress]["total"]["ready"] += 1
    expected[val_batch_progress]["total"]["started"] += 1
    assert state_dict_after_restart[val_batch_progress] == expected[
        val_batch_progress]


@pytest.mark.parametrize("should_fail", [False, True])
@pytest.mark.parametrize("persistent_workers",
                         [pytest.param(False, marks=RunIf(slow=True)), True])
def test_workers_are_shutdown(tmpdir, should_fail, persistent_workers):
    # `num_workers == 1` uses `_MultiProcessingDataLoaderIter`
    # `persistent_workers` makes sure `self._iterator` gets set on the `DataLoader` instance

    class _TestMultiProcessingDataLoaderIter(_MultiProcessingDataLoaderIter):
        def __init__(self, *args, dataloader, **kwargs):
            super().__init__(*args, **kwargs)
            self.dataloader = dataloader

        def _shutdown_workers(self):
            self.dataloader.count_shutdown_workers += 1
            super()._shutdown_workers()

    class TestDataLoader(DataLoader):
        def __init__(self, *args, **kwargs):
    if backend == "nccl":
        device = torch.device("cuda", rank)
        torch.cuda.set_device(device)
    else:
        device = torch.device("cpu")

    # initialize the process group
    torch.distributed.init_process_group(backend, rank=rank, world_size=world_size)
    tensor = torch.ones(rank + 1, 2 - rank, device=device)
    result = gather_all_tensors(tensor)
    assert len(result) == world_size
    for idx in range(world_size):
        val = result[idx]
        assert val.shape == (idx + 1, 2 - idx)
        assert (val == torch.ones_like(val)).all()


@RunIf(min_torch="1.10", skip_windows=True)
@pytest.mark.parametrize(
    "process",
    [
        _test_all_gather_uneven_tensors_multidim,
        _test_all_gather_uneven_tensors,
    ],
)
@pytest.mark.parametrize("backend", [pytest.param("nccl", marks=RunIf(min_cuda_gpus=2)), "gloo"])
def test_gather_all_tensors(backend, process):
    tutils.set_random_main_port()
    mp.spawn(process, args=(2, backend), nprocs=2)
    trainer.predict(model, dataloaders=dataloader, return_predictions=False)
    assert cb.write_on_batch_end.call_count == 4
    assert cb.write_on_epoch_end.call_count == 0

    DummyPredictionWriter.write_on_batch_end.reset_mock()
    DummyPredictionWriter.write_on_epoch_end.reset_mock()

    cb = DummyPredictionWriter("epoch")
    trainer = Trainer(limit_predict_batches=4, callbacks=cb)
    trainer.predict(model, dataloaders=dataloader, return_predictions=False)
    assert cb.write_on_batch_end.call_count == 0
    assert cb.write_on_epoch_end.call_count == 1


@pytest.mark.parametrize("num_workers",
                         [0, pytest.param(2, marks=RunIf(slow=True))])
def test_prediction_writer_batch_indices(num_workers):
    DummyPredictionWriter.write_on_batch_end = Mock()
    DummyPredictionWriter.write_on_epoch_end = Mock()

    dataloader = DataLoader(RandomDataset(32, 64),
                            batch_size=4,
                            num_workers=num_workers)
    model = BoringModel()
    writer = DummyPredictionWriter("batch_and_epoch")
    trainer = Trainer(limit_predict_batches=4, callbacks=writer)
    trainer.predict(model, dataloaders=dataloader)

    writer.write_on_batch_end.assert_has_calls([
        call(trainer, model, ANY, [0, 1, 2, 3], ANY, 0, 0),
        call(trainer, model, ANY, [4, 5, 6, 7], ANY, 1, 0),
@RunIf(skip_windows=True)
def test_ipython_compatible_strategy_ddp_fork(monkeypatch):
    monkeypatch.setattr(pytorch_lightning.utilities, "_IS_INTERACTIVE", True)
    trainer = Trainer(strategy="ddp_fork", accelerator="cpu")
    assert trainer.strategy.launcher.is_interactive_compatible


@pytest.mark.parametrize(
    ["strategy", "strategy_class"],
    [
        ("ddp", DDPStrategy),
        ("ddp_spawn", DDPSpawnStrategy),
        ("ddp_sharded", DDPShardedStrategy),
        ("ddp_sharded_spawn", DDPSpawnShardedStrategy),
        pytest.param("deepspeed", DeepSpeedStrategy, marks=RunIf(deepspeed=True)),
    ],
)
@pytest.mark.parametrize("devices", [1, 2])
@mock.patch("pytorch_lightning.utilities.device_parser.is_cuda_available", return_value=True)
@mock.patch("pytorch_lightning.utilities.device_parser.num_cuda_devices", return_value=2)
def test_accelerator_choice_multi_node_gpu(
    mock_is_available, mock_device_count, tmpdir, strategy, strategy_class, devices
):
    trainer = Trainer(default_root_dir=tmpdir, num_nodes=2, accelerator="gpu", strategy=strategy, devices=devices)
    assert isinstance(trainer.strategy, strategy_class)


@mock.patch("pytorch_lightning.accelerators.cuda.device_parser.num_cuda_devices", return_value=0)
def test_accelerator_cpu(_):
    trainer = Trainer(accelerator="cpu")
Exemplo n.º 16
0
                                  method="trace")
    assert isinstance(script, torch.jit.ScriptModule)

    model.eval()
    with torch.no_grad():
        model_output = model(example_inputs)

    script_output = script(example_inputs)
    assert torch.allclose(script_output, model_output)


@pytest.mark.parametrize(
    "device_str",
    [
        "cpu",
        pytest.param("cuda:0", marks=RunIf(min_cuda_gpus=1)),
        pytest.param("mps:0", marks=RunIf(mps=True)),
    ],
)
def test_torchscript_device(device_str):
    """Test that scripted module is on the correct device."""
    device = torch.device(device_str)
    model = BoringModel().to(device)
    model.example_input_array = torch.randn(5, 32)

    script = model.to_torchscript()
    assert next(script.parameters()).device == device
    script_output = script(model.example_input_array.to(device))
    assert script_output.device == device

        trainer.request_dataloader(stage=RunningStage.TRAINING)


def test_v_1_8_0_deprecated_device_stats_monitor_prefix_metric_keys():
    from pytorch_lightning.callbacks.device_stats_monitor import prefix_metric_keys

    with pytest.deprecated_call(match="in v1.6 and will be removed in v1.8"):
        prefix_metric_keys({"foo": 1.0}, "bar")


@pytest.mark.parametrize(
    "cls",
    [
        DDPPlugin,
        DDPSpawnPlugin,
        pytest.param(DeepSpeedPlugin, marks=RunIf(deepspeed=True)),
        DataParallelPlugin,
        DDPFullyShardedPlugin,
        pytest.param(IPUPlugin, marks=RunIf(ipu=True)),
        DDPShardedPlugin,
        DDPSpawnShardedPlugin,
        TPUSpawnPlugin,
    ],
)
def test_v1_8_0_deprecated_training_type_plugin_classes(cls):
    old_name = cls.__name__
    new_name = old_name.replace("Plugin", "Strategy")
    with pytest.deprecated_call(
            match=
            f"{old_name}` is deprecated in v1.6 and will be removed in v1.8. Use .*{new_name}` instead."
    ):
        if not self.early_stop_on_train:
            return
        self._epoch_end()

    def validation_epoch_end(self, outputs):
        if self.early_stop_on_train:
            return
        self._epoch_end()

    def on_train_end(self) -> None:
        assert self.trainer.current_epoch - 1 == self.expected_end_epoch, "Early Stopping Failed"


_ES_CHECK = dict(check_on_train_epoch_end=True)
_ES_CHECK_P3 = dict(patience=3, check_on_train_epoch_end=True)
_SPAWN_MARK = dict(marks=RunIf(skip_windows=True))


@pytest.mark.parametrize(
    "callbacks, expected_stop_epoch, check_on_train_epoch_end, strategy, devices",
    [
        ([EarlyStopping("abc"),
          EarlyStopping("cba", patience=3)], 3, False, None, 1),
        ([EarlyStopping("cba", patience=3),
          EarlyStopping("abc")], 3, False, None, 1),
        pytest.param([EarlyStopping("abc"),
                      EarlyStopping("cba", patience=3)], 3, False, "ddp_spawn",
                     2, **_SPAWN_MARK),
        pytest.param([EarlyStopping("cba", patience=3),
                      EarlyStopping("abc")], 3, False, "ddp_spawn", 2, **
                     _SPAWN_MARK),
    assert lite_module.method() == 2
    assert lite_module.forward.__self__.__class__ == _LiteModule

    with pytest.raises(AttributeError):
        _ = lite_module.not_exists


@pytest.mark.parametrize(
    "precision, input_type, expected_type, accelerator, device_str",
    [
        pytest.param(32,
                     torch.float16,
                     torch.float32,
                     "gpu",
                     "cuda:0",
                     marks=RunIf(min_cuda_gpus=1)),
        pytest.param(32,
                     torch.float32,
                     torch.float32,
                     "gpu",
                     "cuda:0",
                     marks=RunIf(min_cuda_gpus=1)),
        pytest.param(32,
                     torch.float64,
                     torch.float32,
                     "gpu",
                     "cuda:0",
                     marks=RunIf(min_cuda_gpus=1)),
        pytest.param(32,
                     torch.int,
                     torch.int,
Exemplo n.º 20
0
import pytest
import torch

from pytorch_lightning.overrides.fairscale import _FAIRSCALE_AVAILABLE
from pytorch_lightning.plugins import ShardedNativeMixedPrecisionPlugin
from tests_pytorch.helpers.runif import RunIf

ShardedGradScaler = None
if _FAIRSCALE_AVAILABLE:
    from fairscale.optim.grad_scaler import ShardedGradScaler


@RunIf(fairscale=True)
@pytest.mark.parametrize(
    "precision,scaler,expected",
    [
        (16, torch.cuda.amp.GradScaler(), torch.cuda.amp.GradScaler),
        (16, None, ShardedGradScaler),
        pytest.param("bf16", None, None, marks=RunIf(min_torch="1.10")),
        (32, None, None),
    ],
)
def test_sharded_precision_scaler(precision, scaler, expected):
    plugin = ShardedNativeMixedPrecisionPlugin(precision=precision,
                                               scaler=scaler,
                                               device="cuda")
    if expected:
        assert isinstance(plugin.scaler, expected)
    else:
        assert not plugin.scaler
Exemplo n.º 21
0
        strategy=strategy,
        precision=precision,
    )

    model = AMPTestModel()
    trainer.fit(model)
    trainer.test(model)
    trainer.predict(model, DataLoader(RandomDataset(32, 64)))

    assert trainer.state.finished, f"Training failed with {trainer.state}"


@RunIf(min_cuda_gpus=2, min_torch="1.10")
@pytest.mark.parametrize("strategy", [None, "dp", "ddp_spawn"])
@pytest.mark.parametrize(
    "precision", [16, pytest.param("bf16", marks=RunIf(bf16_cuda=True))])
@pytest.mark.parametrize("devices", [1, 2])
def test_amp_gpus(tmpdir, strategy, precision, devices):
    """Make sure combinations of AMP and strategies work if supported."""
    tutils.reset_seed()

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=1,
        accelerator="gpu",
        devices=devices,
        strategy=strategy,
        precision=precision,
    )

    model = AMPTestModel()
Exemplo n.º 22
0
    if precision == 32:
        yield
        return
    if accelerator == "gpu":
        with torch.cuda.amp.autocast():
            yield
    elif accelerator == "cpu":
        with torch.cpu.amp.autocast():
            yield


@pytest.mark.parametrize(
    "precision, strategy, devices, accelerator",
    [
        pytest.param(32, None, 1, "cpu"),
        pytest.param(32, None, 1, "gpu", marks=RunIf(min_cuda_gpus=1)),
        pytest.param(16, None, 1, "gpu", marks=RunIf(min_cuda_gpus=1)),
        pytest.param("bf16",
                     None,
                     1,
                     "gpu",
                     marks=RunIf(
                         min_cuda_gpus=1, min_torch="1.10", bf16_cuda=True)),
        pytest.param(32, None, 1, "mps", marks=RunIf(mps=True)),
    ],
)
def test_boring_lite_model_single_device(precision, strategy, devices,
                                         accelerator, tmpdir):
    LightningLite.seed_everything(42)
    train_dataloader = DataLoader(RandomDataset(32, 8))
    model = BoringModel()
Exemplo n.º 23
0
from pytorch_lightning.utilities.seed import seed_everything
from tests_pytorch.helpers.datamodules import ClassifDataModule
from tests_pytorch.helpers.runif import RunIf
from tests_pytorch.strategies.test_dp import CustomClassificationModelDP

if _TORCH_GREATER_EQUAL_1_12:
    torch_test_assert_close = torch.testing.assert_close
else:
    torch_test_assert_close = torch.testing.assert_allclose


@pytest.mark.parametrize(
    "trainer_kwargs",
    (
        pytest.param(dict(accelerator="gpu", devices=1),
                     marks=RunIf(min_cuda_gpus=1)),
        pytest.param(dict(strategy="dp", accelerator="gpu", devices=2),
                     marks=RunIf(min_cuda_gpus=2)),
        pytest.param(dict(strategy="ddp_spawn", accelerator="gpu", devices=2),
                     marks=RunIf(min_cuda_gpus=2)),
        pytest.param(dict(accelerator="mps", devices=1),
                     marks=RunIf(mps=True)),
    ),
)
def test_evaluate(tmpdir, trainer_kwargs):
    tutils.set_random_main_port()
    seed_everything(1)
    dm = ClassifDataModule()
    model = CustomClassificationModelDP()
    trainer = Trainer(default_root_dir=tmpdir,
                      max_epochs=2,
Exemplo n.º 24
0
            self.num_train_batches += 1

    overridden_model = OverriddenModel()
    not_overridden_model = NotOverriddenModel()
    not_overridden_model.training_epoch_end = None

    trainer = Trainer(max_epochs=1, default_root_dir=tmpdir, overfit_batches=2)

    trainer.fit(overridden_model)
    assert overridden_model.len_outputs == overridden_model.num_train_batches


@pytest.mark.parametrize(
    "accelerator,expected_device_str",
    [
        pytest.param("gpu", "cuda:0", marks=RunIf(min_cuda_gpus=1)),
        pytest.param("mps", "mps:0", marks=RunIf(mps=True)),
    ],
)
@mock.patch(
    "pytorch_lightning.strategies.Strategy.lightning_module",
    new_callable=PropertyMock,
)
def test_apply_batch_transfer_handler(model_getter_mock, accelerator,
                                      expected_device_str):
    expected_device = torch.device(expected_device_str)

    class CustomBatch:
        def __init__(self, data):
            self.samples = data[0]
            self.targets = data[1]
Exemplo n.º 25
0
    EmptyLite.seed_everything(3)

    lite = EmptyLite()
    lite_dataloader = lite.setup_dataloaders(DataLoader(Mock()))

    assert lite_dataloader.worker_init_fn.func is pl_worker_init_function
    assert os.environ == {"PL_GLOBAL_SEED": "3", "PL_SEED_WORKERS": "1"}


@pytest.mark.parametrize(
    "strategy",
    [
        _StrategyType.DP,
        _StrategyType.DDP,
        _StrategyType.DDP_SPAWN,
        pytest.param(_StrategyType.DDP_FORK, marks=RunIf(skip_windows=True)),
        pytest.param(_StrategyType.DEEPSPEED, marks=RunIf(deepspeed=True)),
        pytest.param(_StrategyType.DDP_SHARDED, marks=RunIf(fairscale=True)),
        pytest.param(_StrategyType.DDP_SHARDED_SPAWN,
                     marks=RunIf(fairscale=True)),
    ],
)
def test_setup_dataloaders_replace_custom_sampler(strategy):
    """Test that asking to replace a custom sampler results in an error when a distributed sampler would be
    needed."""
    custom_sampler = Mock(spec=Sampler)
    dataloader = DataLoader(Mock(), sampler=custom_sampler)

    # explicitly asking to replace when a custom sampler is already configured raises an exception
    lite = EmptyLite(accelerator="cpu", strategy=strategy, devices=2)
    if lite._accelerator_connector.is_distributed: