def test_unknown_configure_optimizers_raises(tmpdir): """Test exception with an unsupported configure_optimizers return.""" model = BoringModel() model.configure_optimizers = lambda: 1 trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True) with pytest.raises(MisconfigurationException, match="Unknown configuration for model optimizers"): trainer.fit(model)
def test_lr_scheduler_with_no_actual_scheduler_raises(tmpdir): """Test exception when lr_scheduler dict has no scheduler.""" model = BoringModel() model.configure_optimizers = lambda: {"optimizer": optim.Adam(model.parameters()), "lr_scheduler": {}} trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True) with pytest.raises(MisconfigurationException, match='The lr scheduler dict must have the key "scheduler"'): trainer.fit(model)
def test_none_optimizer(tmpdir): model = BoringModel() model.configure_optimizers = lambda: None trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.1, limit_train_batches=0.2) with pytest.warns(UserWarning, match="will run with no optimizer"): trainer.fit(model) assert trainer.state.finished, f"Training failed with {trainer.state}"
def test_poptorch_models_at_different_stages(tmpdir): plugin = IPUStrategy() trainer = Trainer(default_root_dir=tmpdir, strategy=plugin, accelerator="ipu", devices=8) model = BoringModel() model.trainer = trainer plugin.model = model trainer.optimizers = model.configure_optimizers()[0] trainer.state.fn = TrainerFn.FITTING trainer.strategy.setup(trainer) assert list(trainer.strategy.poptorch_models) == [ RunningStage.TRAINING, RunningStage.VALIDATING ] for fn, stage in ( (TrainerFn.VALIDATING, RunningStage.VALIDATING), (TrainerFn.TESTING, RunningStage.TESTING), (TrainerFn.PREDICTING, RunningStage.PREDICTING), ): trainer.state.fn = fn trainer.state.stage = stage trainer.strategy.setup(trainer) assert list(trainer.strategy.poptorch_models) == [stage]
def test_wrong_configure_optimizers(tmpdir): """Test that an error is thrown when no `configure_optimizers()` is defined.""" trainer = Trainer(default_root_dir=tmpdir, max_epochs=1) with pytest.raises(MisconfigurationException, match=r"No `configure_optimizers\(\)` method defined."): model = BoringModel() model.configure_optimizers = None trainer.fit(model)
def test_reducelronplateau_with_no_monitor_raises(tmpdir): """Test exception when a ReduceLROnPlateau is used with no monitor.""" model = BoringModel() optimizer = optim.Adam(model.parameters()) model.configure_optimizers = lambda: ([optimizer], [optim.lr_scheduler.ReduceLROnPlateau(optimizer)]) trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True) with pytest.raises( MisconfigurationException, match="`configure_optimizers` must include a monitor when a `ReduceLROnPlateau`" ): trainer.fit(model)
def test_reducelronplateau_with_no_monitor_in_lr_scheduler_dict_raises(tmpdir): """Test exception when lr_scheduler dict has a ReduceLROnPlateau with no monitor.""" model = BoringModel() optimizer = optim.Adam(model.parameters()) model.configure_optimizers = lambda: { "optimizer": optimizer, "lr_scheduler": {"scheduler": optim.lr_scheduler.ReduceLROnPlateau(optimizer)}, } trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True) with pytest.raises(MisconfigurationException, match="must include a monitor when a `ReduceLROnPlateau`"): trainer.fit(model)
def test_lr_scheduler_with_extra_keys_warns(tmpdir): """Test warning when lr_scheduler dict has extra keys.""" model = BoringModel() optimizer = optim.Adam(model.parameters()) model.configure_optimizers = lambda: { "optimizer": optimizer, "lr_scheduler": {"scheduler": optim.lr_scheduler.StepLR(optimizer, 1), "foo": 1, "bar": 2}, } trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True) with pytest.warns(RuntimeWarning, match=r"Found unsupported keys in the lr scheduler dict: \{.+\}"): trainer.fit(model)
def test_lr_scheduler_with_unknown_interval_raises(tmpdir): """Test exception when lr_scheduler dict has unknown interval param value.""" model = BoringModel() optimizer = optim.Adam(model.parameters()) model.configure_optimizers = lambda: { "optimizer": optimizer, "lr_scheduler": {"scheduler": optim.lr_scheduler.StepLR(optimizer, 1), "interval": "incorrect_unknown_value"}, } trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True) with pytest.raises(MisconfigurationException, match=r'The "interval" key in lr scheduler dict must be'): trainer.fit(model)
def test_lr_scheduler_strict(step_mock, tmpdir, complete_epoch): """Test "strict" support in lr_scheduler dict.""" model = BoringModel() optimizer = optim.Adam(model.parameters()) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer) max_epochs = 1 if complete_epoch else None max_steps = -1 if complete_epoch else 1 trainer = Trainer(default_root_dir=tmpdir, max_epochs=max_epochs, max_steps=max_steps) model.configure_optimizers = lambda: { "optimizer": optimizer, "lr_scheduler": {"scheduler": scheduler, "monitor": "giraffe", "strict": True}, } if complete_epoch: with pytest.raises( MisconfigurationException, match=r"ReduceLROnPlateau conditioned on metric .* which is not available\. Available metrics are:", ): trainer.fit(model) else: trainer.fit(model) step_mock.assert_not_called() model.configure_optimizers = lambda: { "optimizer": optimizer, "lr_scheduler": {"scheduler": scheduler, "monitor": "giraffe", "strict": False}, } if complete_epoch: trainer = Trainer(default_root_dir=tmpdir, max_epochs=max_epochs, max_steps=max_steps) with pytest.warns( RuntimeWarning, match=r"ReduceLROnPlateau conditioned on metric .* which is not available but strict" ): trainer.fit(model) step_mock.assert_not_called()
def test_replication_factor(tmpdir): """Ensure if the user passes manual poptorch Options with custom parameters set, we set them correctly in the dataloaders.""" plugin = IPUStrategy() trainer = Trainer(accelerator="ipu", devices=2, default_root_dir=tmpdir, fast_dev_run=True, strategy=plugin) assert isinstance(trainer.accelerator, IPUAccelerator) assert trainer.num_devices == 2 assert trainer.strategy.replication_factor == 2 model = BoringModel() training_opts = poptorch.Options() inference_opts = poptorch.Options() training_opts.replicationFactor(8) inference_opts.replicationFactor(7) plugin = IPUStrategy(inference_opts=inference_opts, training_opts=training_opts) trainer = Trainer(default_root_dir=tmpdir, accelerator="ipu", devices=1, strategy=plugin) trainer.optimizers = model.configure_optimizers()[0] plugin.model = model model.trainer = trainer trainer.state.fn = TrainerFn.FITTING trainer.strategy.setup(trainer) trainer.state.stage = RunningStage.TRAINING assert trainer.strategy.replication_factor == 8 trainer.state.stage = RunningStage.VALIDATING assert trainer.strategy.replication_factor == 7 for fn, stage in ( (TrainerFn.VALIDATING, RunningStage.VALIDATING), (TrainerFn.TESTING, RunningStage.TESTING), (TrainerFn.PREDICTING, RunningStage.PREDICTING), ): trainer.state.fn = fn trainer.state.stage = stage trainer.strategy.setup(trainer) assert trainer.strategy.replication_factor == 7
def test_optimizer_return_options(tmpdir): trainer = Trainer(default_root_dir=tmpdir) model = BoringModel() trainer.strategy.connect(model) trainer.lightning_module.trainer = trainer # single optimizer opt_a = optim.Adam(model.parameters(), lr=0.002) opt_b = optim.SGD(model.parameters(), lr=0.002) scheduler_a = optim.lr_scheduler.StepLR(opt_a, 10) scheduler_b = optim.lr_scheduler.StepLR(opt_b, 10) # single optimizer model.configure_optimizers = lambda: opt_a opt, lr_sched, freq = _init_optimizers_and_lr_schedulers(model) assert len(opt) == 1 and len(lr_sched) == len(freq) == 0 # opt tuple model.configure_optimizers = lambda: (opt_a, opt_b) opt, lr_sched, freq = _init_optimizers_and_lr_schedulers(model) assert opt == [opt_a, opt_b] assert len(lr_sched) == len(freq) == 0 # opt list model.configure_optimizers = lambda: [opt_a, opt_b] opt, lr_sched, freq = _init_optimizers_and_lr_schedulers(model) assert opt == [opt_a, opt_b] assert len(lr_sched) == len(freq) == 0 ref_lr_sched = LRSchedulerConfig( scheduler=scheduler_a, interval="epoch", frequency=1, reduce_on_plateau=False, monitor=None, strict=True, name=None, opt_idx=0, ) # opt tuple of 2 lists model.configure_optimizers = lambda: ([opt_a], [scheduler_a]) opt, lr_sched, freq = _init_optimizers_and_lr_schedulers(model) assert len(opt) == len(lr_sched) == 1 assert len(freq) == 0 assert opt[0] == opt_a assert lr_sched[0] == ref_lr_sched # opt tuple of 1 list model.configure_optimizers = lambda: ([opt_a], scheduler_a) opt, lr_sched, freq = _init_optimizers_and_lr_schedulers(model) assert len(opt) == len(lr_sched) == 1 assert len(freq) == 0 assert opt[0] == opt_a assert lr_sched[0] == ref_lr_sched # opt single dictionary model.configure_optimizers = lambda: {"optimizer": opt_a, "lr_scheduler": scheduler_a} opt, lr_sched, freq = _init_optimizers_and_lr_schedulers(model) assert len(opt) == len(lr_sched) == 1 assert len(freq) == 0 assert opt[0] == opt_a assert lr_sched[0] == ref_lr_sched # opt multiple dictionaries with frequencies model.configure_optimizers = lambda: ( {"optimizer": opt_a, "lr_scheduler": scheduler_a, "frequency": 1}, {"optimizer": opt_b, "lr_scheduler": scheduler_b, "frequency": 5}, ) opt, lr_sched, freq = _init_optimizers_and_lr_schedulers(model) assert len(opt) == len(lr_sched) == len(freq) == 2 assert opt[0] == opt_a ref_lr_sched.opt_idx = 0 assert lr_sched[0] == ref_lr_sched ref_lr_sched.scheduler = scheduler_b ref_lr_sched.opt_idx = 1 assert lr_sched[1] == ref_lr_sched assert freq == [1, 5]