def test_multiple_test_dataloader(tmpdir): """Verify multiple test_dataloader.""" tutils.reset_seed() class CurrentTestModel(LightningTestMultipleDataloadersMixin, LightningTestModelBase): pass hparams = tutils.get_hparams() model = CurrentTestModel(hparams) # logger file to get meta trainer_options = dict( default_save_path=tmpdir, max_epochs=1, val_percent_check=0.1, train_percent_check=0.2, ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) # verify there are 2 val loaders assert len(trainer.get_test_dataloaders()) == 2, \ 'Multiple test_dataloaders not initiated properly' # make sure predictions are good for each test set for dataloader in trainer.get_test_dataloaders(): tutils.run_prediction(dataloader, trainer.model) # run the test method trainer.test()
def test_multiple_val_dataloader(): """ Verify multiple val_dataloader :return: """ tutils.reset_seed() class CurrentTestModel(LightningValidationMultipleDataloadersMixin, LightningTestModelBase): pass hparams = tutils.get_hparams() model = CurrentTestModel(hparams) # logger file to get meta trainer_options = dict( max_nb_epochs=1, val_percent_check=0.1, train_percent_check=1.0, ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) # verify training completed assert result == 1 # verify there are 2 val loaders assert len(trainer.get_val_dataloaders()) == 2, \ 'Multiple val_dataloaders not initiated properly' # make sure predictions are good for each val set for dataloader in trainer.get_val_dataloaders(): tutils.run_prediction(dataloader, trainer.model)
def assert_good_acc(): assert trainer.current_epoch == real_global_epoch assert trainer.current_epoch >= 0 # if model and state loaded correctly, predictions will be good even though we # haven't trained with the new loaded model trainer.model.eval() for dataloader in trainer.get_val_dataloaders(): tutils.run_prediction(dataloader, trainer.model)
def assert_good_acc(): assert new_trainer.current_epoch == real_global_epoch and new_trainer.current_epoch > 0 # if model and state loaded correctly, predictions will be good even though we # haven't trained with the new loaded model dp_model = new_trainer.model dp_model.eval() dataloader = trainer.get_train_dataloader() tutils.run_prediction(dataloader, dp_model, dp=True)
def test_running_test_pretrained_model_ddp(): """Verify test() on pretrained model""" if not tutils.can_run_gpu_test(): return tutils.reset_seed() tutils.set_random_master_port() hparams = tutils.get_hparams() model = LightningTestModel(hparams) save_dir = tutils.init_save_dir() # exp file to get meta logger = tutils.get_test_tube_logger(False) # exp file to get weights checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict( show_progress_bar=False, max_nb_epochs=1, train_percent_check=0.4, val_percent_check=0.2, checkpoint_callback=checkpoint, logger=logger, gpus=[0, 1], distributed_backend='ddp' ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) exp = logger.experiment logging.info(os.listdir(exp.get_data_path(exp.name, exp.version))) # correct result and ok accuracy assert result == 1, 'training failed to complete' pretrained_model = tutils.load_model(logger.experiment, trainer.checkpoint_callback.filepath, module_class=LightningTestModel) # run test set new_trainer = Trainer(**trainer_options) new_trainer.test(pretrained_model) for dataloader in model.test_dataloader(): tutils.run_prediction(dataloader, pretrained_model) tutils.clear_save_dir()
def test_amp_gpu_ddp_slurm_managed(): """ Make sure DDP + AMP work :return: """ if not tutils.can_run_gpu_test(): return tutils.reset_seed() # simulate setting slurm flags tutils.set_random_master_port() os.environ['SLURM_LOCALID'] = str(0) hparams = tutils.get_hparams() model = LightningTestModel(hparams) trainer_options = dict(show_progress_bar=True, max_nb_epochs=1, gpus=[0], distributed_backend='ddp', use_amp=True) save_dir = tutils.init_save_dir() # exp file to get meta logger = tutils.get_test_tube_logger(False) # exp file to get weights checkpoint = tutils.init_checkpoint_callback(logger) # add these to the trainer options trainer_options['checkpoint_callback'] = checkpoint trainer_options['logger'] = logger # fit model trainer = Trainer(**trainer_options) trainer.is_slurm_managing_tasks = True result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'amp + ddp model failed to complete' # test root model address assert trainer.resolve_root_node_address('abc') == 'abc' assert trainer.resolve_root_node_address('abc[23]') == 'abc23' assert trainer.resolve_root_node_address('abc[23-24]') == 'abc23' assert trainer.resolve_root_node_address( 'abc[23-24, 45-40, 40]') == 'abc23' # test model loading with a map_location pretrained_model = tutils.load_model(logger.experiment, trainer.checkpoint_callback.filepath) # test model preds for dataloader in trainer.get_test_dataloaders(): tutils.run_prediction(dataloader, pretrained_model) if trainer.use_ddp: # on hpc this would work fine... but need to hack it for the purpose of the test trainer.model = pretrained_model trainer.optimizers, trainer.lr_schedulers = pretrained_model.configure_optimizers( ) # test HPC loading / saving trainer.hpc_save(save_dir, logger) trainer.hpc_load(save_dir, on_gpu=True) # test freeze on gpu model.freeze() model.unfreeze() tutils.clear_save_dir()