def test_running_test_pretrained_model_cpu(tmpdir): """Verify test() on pretrained model.""" model = EvalModelTemplate() # logger file to get meta logger = tutils.get_default_logger(tmpdir) # logger file to get weights checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict(progress_bar_refresh_rate=0, max_epochs=3, limit_train_batches=0.4, limit_val_batches=0.2, checkpoint_callback=checkpoint, logger=logger) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'training failed to complete' pretrained_model = tutils.load_model(logger, trainer.checkpoint_callback.dirpath, module_class=EvalModelTemplate) new_trainer = Trainer(**trainer_options) new_trainer.test(pretrained_model) # test we have good test accuracy tutils.assert_ok_model_acc(new_trainer)
def test_running_test_pretrained_model_cpu(tmpdir): """Verify test() on pretrained model.""" tutils.reset_seed() hparams = tutils.get_default_hparams() model = LightningTestModel(hparams) # logger file to get meta logger = tutils.get_default_logger(tmpdir) # logger file to get weights checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict(progress_bar_refresh_rate=0, max_epochs=4, train_percent_check=0.4, val_percent_check=0.2, checkpoint_callback=checkpoint, logger=logger) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'training failed to complete' pretrained_model = tutils.load_model(logger, trainer.checkpoint_callback.dirpath, module_class=LightningTestModel) new_trainer = Trainer(**trainer_options) new_trainer.test(pretrained_model) # test we have good test accuracy tutils.assert_ok_model_acc(new_trainer)
def test_running_test_after_fitting(tmpdir): """Verify test() on fitted model.""" model = EvalModelTemplate() # logger file to get meta logger = tutils.get_default_logger(tmpdir) # logger file to get weights checkpoint = tutils.init_checkpoint_callback(logger) # fit model trainer = Trainer(default_root_dir=tmpdir, progress_bar_refresh_rate=0, max_epochs=8, train_percent_check=0.4, val_percent_check=0.2, test_percent_check=0.2, checkpoint_callback=checkpoint, logger=logger) result = trainer.fit(model) assert result == 1, 'training failed to complete' trainer.test() # test we have good test accuracy tutils.assert_ok_model_acc(trainer, thr=0.5)
def test_running_test_no_val(tmpdir): """Verify `test()` works on a model with no `val_loader`.""" model = EvalModelTemplate() # logger file to get meta logger = tutils.get_default_logger(tmpdir) # logger file to get weights checkpoint = tutils.init_checkpoint_callback(logger) # fit model trainer = Trainer(progress_bar_refresh_rate=0, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, test_percent_check=0.2, checkpoint_callback=checkpoint, logger=logger, early_stop_callback=False) result = trainer.fit(model) assert result == 1, 'training failed to complete' trainer.test() # test we have good test accuracy tutils.assert_ok_model_acc(trainer)
def test_running_test_after_fitting(tmpdir): """Verify test() on fitted model.""" tutils.reset_seed() hparams = tutils.get_default_hparams() model = LightningTestModel(hparams) # logger file to get meta logger = tutils.get_default_testtube_logger(tmpdir, False) # logger file to get weights checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict( default_root_dir=tmpdir, progress_bar_refresh_rate=0, max_epochs=8, train_percent_check=0.4, val_percent_check=0.2, test_percent_check=0.2, checkpoint_callback=checkpoint, logger=logger ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result == 1, 'training failed to complete' trainer.test() # test we have good test accuracy tutils.assert_ok_model_acc(trainer, thr=0.5)
def test_running_test_no_val(tmpdir): """Verify `test()` works on a model with no `val_loader`.""" tutils.reset_seed() class CurrentTestModel(LightTrainDataloader, LightTestMixin, TestModelBase): pass hparams = tutils.get_default_hparams() model = CurrentTestModel(hparams) # logger file to get meta logger = tutils.get_default_logger(tmpdir) # logger file to get weights checkpoint = tutils.init_checkpoint_callback(logger) # fit model trainer = Trainer(progress_bar_refresh_rate=0, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, test_percent_check=0.2, checkpoint_callback=checkpoint, logger=logger, early_stop_callback=False) result = trainer.fit(model) assert result == 1, 'training failed to complete' trainer.test() # test we have good test accuracy tutils.assert_ok_model_acc(trainer)
def test_running_test_pretrained_model_distrib(tmpdir, backend): """Verify `test()` on pretrained model.""" tutils.reset_seed() tutils.set_random_master_port() hparams = tutils.get_default_hparams() model = LightningTestModel(hparams) # exp file to get meta logger = tutils.get_default_logger(tmpdir) # exp file to get weights checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict( progress_bar_refresh_rate=0, max_epochs=2, train_percent_check=0.4, val_percent_check=0.2, checkpoint_callback=checkpoint, logger=logger, gpus=[0, 1], distributed_backend=backend, ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) log.info(os.listdir(tutils.get_data_path(logger, path_dir=tmpdir))) # correct result and ok accuracy assert result == 1, 'training failed to complete' pretrained_model = tutils.load_model(logger, trainer.checkpoint_callback.dirpath, module_class=LightningTestModel) # run test set new_trainer = Trainer(**trainer_options) new_trainer.test(pretrained_model) # test we have good test accuracy tutils.assert_ok_model_acc(new_trainer) dataloaders = model.test_dataloader() if not isinstance(dataloaders, list): dataloaders = [dataloaders] for dataloader in dataloaders: tutils.run_prediction(dataloader, pretrained_model)
def test_load_model_from_checkpoint(tmpdir): """Verify test() on pretrained model.""" tutils.reset_seed() hparams = tutils.get_default_hparams() model = LightningTestModel(hparams) trainer_options = dict( show_progress_bar=False, max_epochs=2, train_percent_check=0.4, val_percent_check=0.2, checkpoint_callback=ModelCheckpoint(tmpdir, save_top_k=-1), logger=False, default_save_path=tmpdir, ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) trainer.test() # correct result and ok accuracy assert result == 1, 'training failed to complete' # load last checkpoint last_checkpoint = sorted( glob.glob(os.path.join(trainer.checkpoint_callback.dirpath, "*.ckpt")))[-1] pretrained_model = LightningTestModel.load_from_checkpoint(last_checkpoint) # test that hparams loaded correctly for k, v in vars(hparams).items(): assert getattr(pretrained_model.hparams, k) == v # assert weights are the same for (old_name, old_p), (new_name, new_p) in zip(model.named_parameters(), pretrained_model.named_parameters()): assert torch.all(torch.eq( old_p, new_p)), 'loaded weights are not the same as the saved weights' new_trainer = Trainer(**trainer_options) new_trainer.test(pretrained_model) # test we have good test accuracy tutils.assert_ok_model_acc(new_trainer)
def test_load_model_from_checkpoint(tmpdir): """Verify test() on pretrained model.""" hparams = EvalModelTemplate.get_default_hparams() model = EvalModelTemplate(**hparams) trainer_options = dict( progress_bar_refresh_rate=0, max_epochs=2, limit_train_batches=0.4, limit_val_batches=0.2, checkpoint_callback=ModelCheckpoint(tmpdir, save_top_k=-1), default_root_dir=tmpdir, ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) trainer.test(ckpt_path=None) # correct result and ok accuracy assert result == 1, 'training failed to complete' # load last checkpoint last_checkpoint = sorted( glob.glob(os.path.join(trainer.checkpoint_callback.dirpath, "*.ckpt")))[-1] pretrained_model = EvalModelTemplate.load_from_checkpoint(last_checkpoint) # test that hparams loaded correctly for k, v in hparams.items(): assert getattr(pretrained_model, k) == v # assert weights are the same for (old_name, old_p), (new_name, new_p) in zip(model.named_parameters(), pretrained_model.named_parameters()): assert torch.all(torch.eq( old_p, new_p)), 'loaded weights are not the same as the saved weights' new_trainer = Trainer(**trainer_options) new_trainer.test(pretrained_model) # test we have good test accuracy tutils.assert_ok_model_acc(new_trainer)
def test_running_test_pretrained_model_dp(tmpdir): """Verify test() on pretrained model.""" tutils.reset_seed() if not tutils.can_run_gpu_test(): return hparams = tutils.get_default_hparams() model = LightningTestModel(hparams) # logger file to get meta logger = tutils.get_default_testtube_logger(tmpdir, False) # logger file to get weights checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict(show_progress_bar=True, max_epochs=4, train_percent_check=0.4, val_percent_check=0.2, checkpoint_callback=checkpoint, logger=logger, gpus=[0, 1], distributed_backend='dp') # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'training failed to complete' pretrained_model = tutils.load_model(logger, trainer.checkpoint_callback.dirpath, module_class=LightningTestModel) new_trainer = Trainer(**trainer_options) new_trainer.test(pretrained_model) # test we have good test accuracy tutils.assert_ok_model_acc(new_trainer)