def test_lr_scheduler_with_no_actual_scheduler_raises(tmpdir):
    """Test exception when lr_scheduler dict has no scheduler."""
    model = BoringModel()
    model.configure_optimizers = lambda: {"optimizer": optim.Adam(model.parameters()), "lr_scheduler": {}}
    trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
    with pytest.raises(MisconfigurationException, match='The lr scheduler dict must have the key "scheduler"'):
        trainer.fit(model)
Exemplo n.º 2
0
def test_invalid_setup_method():
    """Test error message when `setup` method of `LightningModule` or `LightningDataModule` is not defined
    correctly."""

    class CustomModel(BoringModel):
        def setup(self):
            pass

    class CustomDataModule(BoringDataModule):
        def setup(self):
            pass

    class CustomBoringCallback(Callback):
        def setup(self, pl_module, trainer):
            pass

    fit_kwargs = [
        {"model": CustomModel(), "datamodule": BoringDataModule()},
        {"model": BoringModel(), "datamodule": CustomDataModule()},
    ]

    for kwargs in fit_kwargs:
        trainer = Trainer(fast_dev_run=True)

        with pytest.raises(MisconfigurationException, match="does not have a `stage` argument"):
            trainer.fit(**kwargs)

    trainer = Trainer(fast_dev_run=True, callbacks=[CustomBoringCallback()])
    model = BoringModel()

    with pytest.raises(MisconfigurationException, match="does not have a `stage` argument"):
        trainer.fit(model)
def test_deepspeed_summary(tmpdir):
    """Test to ensure that the summary contains the correct values when stage 3 is enabled and that the trainer
    enables the `DeepSpeedSummary` when DeepSpeed is used."""

    model = BoringModel()
    total_parameters = sum(x.numel() for x in model.parameters())

    class TestCallback(Callback):
        def on_fit_start(self, trainer: "pl.Trainer",
                         pl_module: "pl.LightningModule") -> None:
            model_summary = DeepSpeedSummary(pl_module, max_depth=1)
            assert model_summary.total_parameters == total_parameters
            assert model_summary.trainable_parameters == total_parameters

            # check the additional params per device
            summary_data = model_summary._get_summary_data()
            params_per_device = summary_data[-1][-1]
            assert int(
                params_per_device[0]) == (model_summary.total_parameters // 2)

    trainer = Trainer(
        strategy=DeepSpeedStrategy(stage=3),
        default_root_dir=tmpdir,
        accelerator="gpu",
        fast_dev_run=True,
        devices=2,
        precision=16,
        enable_model_summary=True,
        callbacks=[TestCallback()],
    )

    trainer.fit(model)
def test_none_optimizer(tmpdir):
    model = BoringModel()
    model.configure_optimizers = lambda: None
    trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.1, limit_train_batches=0.2)
    with pytest.warns(UserWarning, match="will run with no optimizer"):
        trainer.fit(model)
    assert trainer.state.finished, f"Training failed with {trainer.state}"
def test_unknown_configure_optimizers_raises(tmpdir):
    """Test exception with an unsupported configure_optimizers return."""
    model = BoringModel()
    model.configure_optimizers = lambda: 1
    trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
    with pytest.raises(MisconfigurationException, match="Unknown configuration for model optimizers"):
        trainer.fit(model)
def test_onecyclelr_with_epoch_interval_warns():
    """Test warning when a OneCycleLR is used and interval is epoch."""
    model = BoringModel()
    optimizer = optim.Adam(model.parameters())
    lr_scheduler = {"scheduler": optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, total_steps=3)}
    with pytest.warns(RuntimeWarning, match="Are you sure you didn't mean 'interval': 'step'?"):
        _configure_schedulers_automatic_opt([lr_scheduler], None)
Exemplo n.º 7
0
def test_poptorch_models_at_different_stages(tmpdir):
    plugin = IPUStrategy()
    trainer = Trainer(default_root_dir=tmpdir,
                      strategy=plugin,
                      accelerator="ipu",
                      devices=8)
    model = BoringModel()
    model.trainer = trainer
    plugin.model = model

    trainer.optimizers = model.configure_optimizers()[0]
    trainer.state.fn = TrainerFn.FITTING
    trainer.strategy.setup(trainer)
    assert list(trainer.strategy.poptorch_models) == [
        RunningStage.TRAINING, RunningStage.VALIDATING
    ]

    for fn, stage in (
        (TrainerFn.VALIDATING, RunningStage.VALIDATING),
        (TrainerFn.TESTING, RunningStage.TESTING),
        (TrainerFn.PREDICTING, RunningStage.PREDICTING),
    ):
        trainer.state.fn = fn
        trainer.state.stage = stage
        trainer.strategy.setup(trainer)
        assert list(trainer.strategy.poptorch_models) == [stage]
Exemplo n.º 8
0
def test_hpc_restore_attempt(tmpdir):
    """Test that restore() attempts to restore the hpc_ckpt with highest priority."""
    model = BoringModel()
    trainer = Trainer(default_root_dir=tmpdir, max_steps=1, enable_checkpointing=False, logger=False)
    trainer.fit(model)

    hpc_ckpt_path = tmpdir / "hpc_ckpt_3.ckpt"
    trainer.save_checkpoint(hpc_ckpt_path)
    assert os.listdir(tmpdir) == ["hpc_ckpt_3.ckpt"]

    # set weights to zero
    for param in model.parameters():
        torch.nn.init.constant_(param, 0)

    # case 1: restore hpc first, no explicit resume path provided
    trainer = Trainer(default_root_dir=tmpdir, max_steps=2, enable_checkpointing=False, logger=False)
    trainer.fit(model)

    for param in model.parameters():
        assert param.abs().sum() > 0
        torch.nn.init.constant_(param, 0)

    # case 2: explicit resume path provided, restore hpc anyway
    trainer = Trainer(default_root_dir=tmpdir, max_steps=3)
    trainer.fit(model, ckpt_path="not existing")

    for param in model.parameters():
        assert param.abs().sum() > 0
def test_v1_8_0_lightning_module_use_amp():
    model = BoringModel()
    with pytest.deprecated_call(
            match="`LightningModule.use_amp` was deprecated in v1.6"):
        _ = model.use_amp
    with pytest.deprecated_call(
            match="`LightningModule.use_amp` was deprecated in v1.6"):
        model.use_amp = False
Exemplo n.º 10
0
def test_property_current_epoch():
    """Test that the current_epoch in LightningModule is accessible via the Trainer."""
    model = BoringModel()
    assert model.current_epoch == 0

    trainer = Mock(current_epoch=123)
    model.trainer = trainer
    assert model.current_epoch == 123
Exemplo n.º 11
0
def test_tensorboard_log_graph(tmpdir, example_input_array):
    """test that log graph works with both model.example_input_array and if array is passed externally."""
    model = BoringModel()
    if example_input_array is not None:
        model.example_input_array = None

    logger = TensorBoardLogger(tmpdir, log_graph=True)
    logger.log_graph(model, example_input_array)
Exemplo n.º 12
0
def test_wrong_configure_optimizers(tmpdir):
    """Test that an error is thrown when no `configure_optimizers()` is defined."""
    trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)

    with pytest.raises(MisconfigurationException, match=r"No `configure_optimizers\(\)` method defined."):
        model = BoringModel()
        model.configure_optimizers = None
        trainer.fit(model)
Exemplo n.º 13
0
def test_property_global_step():
    """Test that the global_step in LightningModule is accessible via the Trainer."""
    model = BoringModel()
    assert model.global_step == 0

    trainer = Mock(global_step=123)
    model.trainer = trainer
    assert model.global_step == 123
Exemplo n.º 14
0
def test_property_local_rank():
    """Test that the local rank in LightningModule is accessible via the Trainer."""
    model = BoringModel()
    assert model.local_rank == 0

    trainer = Mock(local_rank=123)
    model.trainer = trainer
    assert model.local_rank == 123
def test_is_overridden():
    model = BoringModel()
    datamodule = BoringDataModule()

    # edge cases
    assert not is_overridden("whatever", None)
    with pytest.raises(ValueError, match="Expected a parent"):
        is_overridden("whatever", object())
    assert not is_overridden("whatever", model)
    assert not is_overridden("whatever", model, parent=LightningDataModule)

    class TestModel(BoringModel):
        def foo(self):
            pass

        def bar(self):
            return 1

    with pytest.raises(ValueError,
                       match="The parent should define the method"):
        is_overridden("foo", TestModel())

    # normal usage
    assert is_overridden("training_step", model)
    assert is_overridden("train_dataloader", datamodule)

    class WrappedModel(TestModel):
        def __new__(cls, *args, **kwargs):
            obj = super().__new__(cls)
            obj.foo = cls.wrap(obj.foo)
            obj.bar = cls.wrap(obj.bar)
            return obj

        @staticmethod
        def wrap(fn):
            @wraps(fn)
            def wrapper():
                fn()

            return wrapper

        def bar(self):
            return 2

    # `functools.wraps()` support
    assert not is_overridden("foo", WrappedModel(), parent=TestModel)
    assert is_overridden("bar", WrappedModel(), parent=TestModel)

    # `Mock` support
    mock = Mock(spec=BoringModel, wraps=model)
    assert is_overridden("training_step", mock)
    mock = Mock(spec=BoringDataModule, wraps=datamodule)
    assert is_overridden("train_dataloader", mock)

    # `partial` support
    model.training_step = partial(model.training_step)
    assert is_overridden("training_step", model)
Exemplo n.º 16
0
def test_property_loggers(tmpdir):
    """Test that loggers in LightningModule is accessible via the Trainer."""
    model = BoringModel()
    assert model.loggers == []

    logger = TensorBoardLogger(tmpdir)
    trainer = Trainer(logger=logger)
    model.trainer = trainer
    assert model.loggers == [logger]
Exemplo n.º 17
0
def test_property_logger(tmpdir):
    """Test that the logger in LightningModule is accessible via the Trainer."""
    model = BoringModel()
    assert model.logger is None

    logger = TensorBoardLogger(tmpdir)
    trainer = Mock(loggers=[logger])
    model.trainer = trainer
    assert model.logger == logger
Exemplo n.º 18
0
def test_dm_apply_batch_transfer_handler(get_module_mock, accelerator, device):
    expected_device = torch.device(device)

    class CustomBatch:
        def __init__(self, data):
            self.samples = data[0]
            self.targets = data[1]

    class CurrentTestDM(LightningDataModule):
        rank = 0
        transfer_batch_to_device_hook_rank = None
        on_before_batch_transfer_hook_rank = None
        on_after_batch_transfer_hook_rank = None

        def on_before_batch_transfer(self, batch, dataloader_idx):
            assert dataloader_idx == 0
            self.on_before_batch_transfer_hook_rank = self.rank
            self.rank += 1
            batch.samples += 1
            return batch

        def on_after_batch_transfer(self, batch, dataloader_idx):
            assert dataloader_idx == 0
            assert batch.samples.device == batch.targets.device == expected_device
            self.on_after_batch_transfer_hook_rank = self.rank
            self.rank += 1
            batch.targets *= 2
            return batch

        def transfer_batch_to_device(self, batch, device, dataloader_idx):
            assert dataloader_idx == 0
            self.transfer_batch_to_device_hook_rank = self.rank
            self.rank += 1
            batch.samples = batch.samples.to(device)
            batch.targets = batch.targets.to(device)
            return batch

    dm = CurrentTestDM()
    model = BoringModel()

    batch = CustomBatch((torch.zeros(5, 32), torch.ones(5, 1, dtype=torch.long)))

    trainer = Trainer(accelerator=accelerator, devices=1)
    model.trainer = trainer
    # running .fit() would require us to implement custom data loaders, we mock the model reference instead
    get_module_mock.return_value = model

    trainer._data_connector.attach_datamodule(model, datamodule=dm)
    batch_gpu = trainer.strategy.batch_to_device(batch, expected_device)

    assert dm.on_before_batch_transfer_hook_rank == 0
    assert dm.transfer_batch_to_device_hook_rank == 1
    assert dm.on_after_batch_transfer_hook_rank == 2
    assert batch_gpu.samples.device == batch_gpu.targets.device == expected_device
    assert torch.allclose(batch_gpu.samples.cpu(), torch.ones(5, 32))
    assert torch.allclose(batch_gpu.targets.cpu(), torch.ones(5, 1, dtype=torch.long) * 2)
Exemplo n.º 19
0
def test_torchscript_device(device_str):
    """Test that scripted module is on the correct device."""
    device = torch.device(device_str)
    model = BoringModel().to(device)
    model.example_input_array = torch.randn(5, 32)

    script = model.to_torchscript()
    assert next(script.parameters()).device == device
    script_output = script(model.example_input_array.to(device))
    assert script_output.device == device
def test_reducelronplateau_with_no_monitor_raises(tmpdir):
    """Test exception when a ReduceLROnPlateau is used with no monitor."""
    model = BoringModel()
    optimizer = optim.Adam(model.parameters())
    model.configure_optimizers = lambda: ([optimizer], [optim.lr_scheduler.ReduceLROnPlateau(optimizer)])
    trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
    with pytest.raises(
        MisconfigurationException, match="`configure_optimizers` must include a monitor when a `ReduceLROnPlateau`"
    ):
        trainer.fit(model)
Exemplo n.º 21
0
def test_torchscript_with_no_input(tmpdir):
    """Test that an error is thrown when there is no input tensor."""
    model = BoringModel()
    model.example_input_array = None

    with pytest.raises(
            ValueError,
            match=
            "requires either `example_inputs` or `model.example_input_array`"):
        model.to_torchscript(method="trace")
Exemplo n.º 22
0
def test_model_saves_on_gpu(tmpdir, accelerator):
    """Test that model saves on gpu."""
    model = BoringModel()
    trainer = Trainer(accelerator=accelerator, devices=1, fast_dev_run=True)
    trainer.fit(model)

    file_path = os.path.join(tmpdir, "model.onnx")
    input_sample = torch.randn((1, 32))
    model.to_onnx(file_path, input_sample)
    assert os.path.isfile(file_path)
    assert os.path.getsize(file_path) > 4e2
Exemplo n.º 23
0
def test_model_saves_with_input_sample(tmpdir):
    """Test that ONNX model saves with input sample and size is greater than 3 MB."""
    model = BoringModel()
    trainer = Trainer(fast_dev_run=True)
    trainer.fit(model)

    file_path = os.path.join(tmpdir, "model.onnx")
    input_sample = torch.randn((1, 32))
    model.to_onnx(file_path, input_sample)
    assert os.path.isfile(file_path)
    assert os.path.getsize(file_path) > 4e2
Exemplo n.º 24
0
def test_error_if_no_input(tmpdir):
    """Test that an error is thrown when there is no input tensor."""
    model = BoringModel()
    model.example_input_array = None
    file_path = os.path.join(tmpdir, "model.onnx")
    with pytest.raises(
            ValueError,
            match=r"Could not export to ONNX since neither `input_sample` nor"
            r" `model.example_input_array` attribute is set.",
    ):
        model.to_onnx(file_path)
def test_lr_scheduler_with_extra_keys_warns(tmpdir):
    """Test warning when lr_scheduler dict has extra keys."""
    model = BoringModel()
    optimizer = optim.Adam(model.parameters())
    model.configure_optimizers = lambda: {
        "optimizer": optimizer,
        "lr_scheduler": {"scheduler": optim.lr_scheduler.StepLR(optimizer, 1), "foo": 1, "bar": 2},
    }
    trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
    with pytest.warns(RuntimeWarning, match=r"Found unsupported keys in the lr scheduler dict: \{.+\}"):
        trainer.fit(model)
Exemplo n.º 26
0
def test_v2_0_resume_from_checkpoint_trainer_constructor(tmpdir):
    # test resume_from_checkpoint still works until v2.0 deprecation
    model = BoringModel()
    callback = OldStatefulCallback(state=111)
    trainer = Trainer(default_root_dir=tmpdir,
                      max_steps=1,
                      callbacks=[callback])
    trainer.fit(model)
    ckpt_path = trainer.checkpoint_callback.best_model_path

    callback = OldStatefulCallback(state=222)
    with pytest.deprecated_call(
            match=
            r"Setting `Trainer\(resume_from_checkpoint=\)` is deprecated in v1.5"
    ):
        trainer = Trainer(default_root_dir=tmpdir,
                          max_steps=2,
                          callbacks=[callback],
                          resume_from_checkpoint=ckpt_path)
    with pytest.deprecated_call(
            match=r"trainer.resume_from_checkpoint` is deprecated in v1.5"):
        _ = trainer.resume_from_checkpoint
    assert trainer._checkpoint_connector.resume_checkpoint_path is None
    assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path == ckpt_path
    trainer.validate(model=model, ckpt_path=ckpt_path)
    assert callback.state == 222
    assert trainer._checkpoint_connector.resume_checkpoint_path is None
    assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path == ckpt_path
    with pytest.deprecated_call(
            match=r"trainer.resume_from_checkpoint` is deprecated in v1.5"):
        trainer.fit(model)
    ckpt_path = trainer.checkpoint_callback.best_model_path  # last `fit` replaced the `best_model_path`
    assert callback.state == 111
    assert trainer._checkpoint_connector.resume_checkpoint_path is None
    assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path is None
    trainer.predict(model=model, ckpt_path=ckpt_path)
    assert trainer._checkpoint_connector.resume_checkpoint_path is None
    assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path is None
    trainer.fit(model)
    assert trainer._checkpoint_connector.resume_checkpoint_path is None
    assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path is None

    # test fit(ckpt_path=) precedence over Trainer(resume_from_checkpoint=) path
    model = BoringModel()
    with pytest.deprecated_call(
            match=
            r"Setting `Trainer\(resume_from_checkpoint=\)` is deprecated in v1.5"
    ):
        trainer = Trainer(resume_from_checkpoint="trainer_arg_path")
    with pytest.raises(
            FileNotFoundError,
            match=
            "Checkpoint at fit_arg_ckpt_path not found. Aborting training."):
        trainer.fit(model, ckpt_path="fit_arg_ckpt_path")
def test_lr_scheduler_with_unknown_interval_raises(tmpdir):
    """Test exception when lr_scheduler dict has unknown interval param value."""
    model = BoringModel()
    optimizer = optim.Adam(model.parameters())
    model.configure_optimizers = lambda: {
        "optimizer": optimizer,
        "lr_scheduler": {"scheduler": optim.lr_scheduler.StepLR(optimizer, 1), "interval": "incorrect_unknown_value"},
    }
    trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
    with pytest.raises(MisconfigurationException, match=r'The "interval" key in lr scheduler dict must be'):
        trainer.fit(model)
def test_dataloader_source_request_from_module():
    """Test requesting a dataloader from a module works."""
    module = BoringModel()
    module.trainer = Trainer()
    module.foo = Mock(return_value=module.train_dataloader())

    source = _DataLoaderSource(module, "foo")
    assert source.is_module()
    module.foo.assert_not_called()
    assert isinstance(source.dataloader(), DataLoader)
    module.foo.assert_called_once()
def test_reducelronplateau_with_no_monitor_in_lr_scheduler_dict_raises(tmpdir):
    """Test exception when lr_scheduler dict has a ReduceLROnPlateau with no monitor."""
    model = BoringModel()
    optimizer = optim.Adam(model.parameters())
    model.configure_optimizers = lambda: {
        "optimizer": optimizer,
        "lr_scheduler": {"scheduler": optim.lr_scheduler.ReduceLROnPlateau(optimizer)},
    }
    trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
    with pytest.raises(MisconfigurationException, match="must include a monitor when a `ReduceLROnPlateau`"):
        trainer.fit(model)
Exemplo n.º 30
0
def test_trainer_manual_optimization_config(tmpdir):
    """Test error message when requesting Trainer features unsupported with manual optimization."""
    model = BoringModel()
    model.automatic_optimization = False

    trainer = Trainer(gradient_clip_val=1.0)
    with pytest.raises(MisconfigurationException, match="Automatic gradient clipping is not supported"):
        trainer.fit(model)

    trainer = Trainer(accumulate_grad_batches=2)
    with pytest.raises(MisconfigurationException, match="Automatic gradient accumulation is not supported"):
        trainer.fit(model)