def test_running_test_pretrained_model(): testing_utils.reset_seed() """Verify test() on pretrained model""" hparams = testing_utils.get_hparams() model = LightningTestModel(hparams) save_dir = testing_utils.init_save_dir() # logger file to get meta logger = testing_utils.get_test_tube_logger(False) # logger file to get weights checkpoint = testing_utils.init_checkpoint_callback(logger) trainer_options = dict(show_progress_bar=False, max_nb_epochs=1, train_percent_check=0.4, val_percent_check=0.2, checkpoint_callback=checkpoint, logger=logger) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'training failed to complete' pretrained_model = testing_utils.load_model( logger.experiment, trainer.checkpoint_callback.filepath, module_class=LightningTestModel) new_trainer = Trainer(**trainer_options) new_trainer.test(pretrained_model) # test we have good test accuracy testing_utils.assert_ok_test_acc(new_trainer) testing_utils.clear_save_dir()
def test_custom_logger(): class CustomLogger(LightningLoggerBase): def __init__(self): super().__init__() self.hparams_logged = None self.metrics_logged = None self.finalized = False @rank_zero_only def log_hyperparams(self, params): self.hparams_logged = params @rank_zero_only def log_metrics(self, metrics, step_num): self.metrics_logged = metrics @rank_zero_only def finalize(self, status): self.finalized_status = status hparams = get_hparams() model = LightningTestModel(hparams) logger = CustomLogger() trainer_options = dict( max_nb_epochs=1, train_percent_check=0.01, logger=logger ) trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result == 1, "Training failed" assert logger.hparams_logged == hparams assert logger.metrics_logged != {} assert logger.finalized_status == "success"
def test_testtube_pickle(tmpdir): """ Verify that pickling a trainer containing a test tube logger works """ tutils.reset_seed() hparams = tutils.get_hparams() model = LightningTestModel(hparams) save_dir = tmpdir logger = tutils.get_test_tube_logger(tmpdir, False) logger.log_hyperparams(hparams) logger.save() trainer_options = dict(max_nb_epochs=1, train_percent_check=0.01, logger=logger) trainer = Trainer(**trainer_options) pkl_bytes = pickle.dumps(trainer) trainer2 = pickle.loads(pkl_bytes) trainer2.logger.log_metrics({"acc": 1.0})
def test_testtube_logger(): """ verify that basic functionality of test tube logger works """ reset_seed() hparams = get_hparams() model = LightningTestModel(hparams) save_dir = init_save_dir() logger = get_test_tube_logger(False) trainer_options = dict(max_nb_epochs=1, train_percent_check=0.01, logger=logger) trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result == 1, "Training failed" clear_save_dir()
def test_amp_gpu_ddp(): """ Make sure DDP + AMP work :return: """ if not testing_utils.can_run_gpu_test(): return testing_utils.reset_seed() testing_utils.set_random_master_port() hparams = testing_utils.get_hparams() model = LightningTestModel(hparams) trainer_options = dict( show_progress_bar=True, max_nb_epochs=1, gpus=2, distributed_backend='ddp', use_amp=True ) testing_utils.run_gpu_model_test(trainer_options, model, hparams)
def test_no_amp_single_gpu(): """ Make sure DDP + AMP work :return: """ testing_utils.reset_seed() if not testing_utils.can_run_gpu_test(): return hparams = testing_utils.get_hparams() model = LightningTestModel(hparams) trainer_options = dict( show_progress_bar=True, max_nb_epochs=1, gpus=1, distributed_backend='dp', use_amp=True ) with pytest.raises((MisconfigurationException, ModuleNotFoundError)): testing_utils.run_gpu_model_test(trainer_options, model, hparams)
def test_mlflow_pickle(): """verify that pickling trainer with mlflow logger works""" try: from pytorch_lightning.logging import MLFlowLogger except ModuleNotFoundError: return hparams = get_hparams() model = LightningTestModel(hparams) root_dir = os.path.dirname(os.path.realpath(__file__)) mlflow_dir = os.path.join(root_dir, "mlruns") logger = MLFlowLogger("test", f"file://{mlflow_dir}") logger.log_hyperparams(hparams) logger.save() trainer_options = dict(max_nb_epochs=1, logger=logger) trainer = Trainer(**trainer_options) pkl_bytes = pickle.dumps(trainer) trainer2 = pickle.loads(pkl_bytes) trainer2.logger.log_metrics({"acc": 1.0})
def test_comet_logger(): """ verify that basic functionality of Comet.ml logger works """ reset_seed() try: from pytorch_lightning.logging import CometLogger except ModuleNotFoundError: return hparams = testing_utils.get_hparams() model = LightningTestModel(hparams) root_dir = os.path.dirname(os.path.realpath(__file__)) comet_dir = os.path.join(root_dir, "cometruns") # We test CometLogger in offline mode with local saves logger = CometLogger( save_dir=comet_dir, project_name="general", workspace="dummy-test", ) trainer_options = dict( max_nb_epochs=1, train_percent_check=0.01, logger=logger ) trainer = Trainer(**trainer_options) result = trainer.fit(model) print('result finished') assert result == 1, "Training failed" testing_utils.clear_save_dir()
def test_comet_logger(tmpdir, monkeypatch): """Verify that basic functionality of Comet.ml logger works.""" # prevent comet logger from trying to print at exit, since # pytest's stdout/stderr redirection breaks it import atexit monkeypatch.setattr(atexit, "register", lambda _: None) tutils.reset_seed() try: from pytorch_lightning.logging import CometLogger except ModuleNotFoundError: return hparams = tutils.get_hparams() model = LightningTestModel(hparams) comet_dir = os.path.join(tmpdir, "cometruns") # We test CometLogger in offline mode with local saves logger = CometLogger( save_dir=comet_dir, project_name="general", workspace="dummy-test", ) trainer_options = dict(default_save_path=tmpdir, max_epochs=1, train_percent_check=0.01, logger=logger) trainer = Trainer(**trainer_options) result = trainer.fit(model) print('result finished') assert result == 1, "Training failed"
def test_no_val_module(tmpdir): """Tests use case where trainer saves the model, and user loads it from tags independently.""" tutils.reset_seed() hparams = tutils.get_hparams() class CurrentTestModel(LightningTestModelBase): pass model = CurrentTestModel(hparams) # logger file to get meta logger = tutils.get_test_tube_logger(tmpdir, False) trainer_options = dict( max_epochs=1, logger=logger, checkpoint_callback=ModelCheckpoint(tmpdir) ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) # training complete assert result == 1, 'amp + ddp model failed to complete' # save model new_weights_path = os.path.join(tmpdir, 'save_test.ckpt') trainer.save_checkpoint(new_weights_path) # load new model tags_path = logger.experiment.get_data_path(logger.experiment.name, logger.experiment.version) tags_path = os.path.join(tags_path, 'meta_tags.csv') model_2 = LightningTestModel.load_from_metrics(weights_path=new_weights_path, tags_csv=tags_path) model_2.eval()
def test_running_test_after_fitting(): """Verify test() on fitted model""" reset_seed() hparams = get_hparams() model = LightningTestModel(hparams) save_dir = init_save_dir() # logger file to get meta logger = get_test_tube_logger(False) logger.log_hyperparams(hparams) logger.save() # logger file to get weights checkpoint = ModelCheckpoint(save_dir) trainer_options = dict(show_progress_bar=False, max_nb_epochs=1, train_percent_check=0.4, val_percent_check=0.2, test_percent_check=0.2, checkpoint_callback=checkpoint, logger=logger) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result == 1, 'training failed to complete' trainer.test() # test we have good test accuracy assert_ok_test_acc(trainer) clear_save_dir()
def test_running_test_after_fitting(): """Verify test() on fitted model""" hparams = get_hparams() model = LightningTestModel(hparams) save_dir = init_save_dir() # exp file to get meta exp = get_exp(False) exp.argparse(hparams) exp.save() # exp file to get weights checkpoint = ModelCheckpoint(save_dir) trainer_options = dict( show_progress_bar=False, max_nb_epochs=1, train_percent_check=0.4, val_percent_check=0.2, test_percent_check=0.2, checkpoint_callback=checkpoint, experiment=exp ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result == 1, 'training failed to complete' trainer.test() # test we have good test accuracy assert_ok_test_acc(trainer) clear_save_dir()
def test_ddp_sampler_error(tmpdir): """Make sure DDP + AMP work.""" if not tutils.can_run_gpu_test(): return tutils.reset_seed() tutils.set_random_master_port() hparams = tutils.get_hparams() model = LightningTestModel(hparams, force_remove_distributed_sampler=True) logger = tutils.get_test_tube_logger(tmpdir, True) trainer = Trainer( logger=logger, show_progress_bar=False, max_epochs=1, gpus=[0, 1], distributed_backend='ddp', use_amp=True ) with pytest.warns(UserWarning): trainer.get_dataloaders(model)
def test_simple_cpu(tmpdir): """ Verify continue training session on CPU :return: """ tutils.reset_seed() hparams = tutils.get_hparams() model = LightningTestModel(hparams) # logger file to get meta trainer_options = dict( default_save_path=tmpdir, max_nb_epochs=1, val_percent_check=0.1, train_percent_check=0.1, ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) # traning complete assert result == 1, 'amp + ddp model failed to complete'
def test_amp_single_gpu(): """ Make sure DDP + AMP work :return: """ if not torch.cuda.is_available(): warnings.warn('test_amp_gpu_ddp cannot run.' 'Rerun on a GPU node to run this test') return if not torch.cuda.device_count() > 1: warnings.warn('test_amp_gpu_ddp cannot run.' 'Rerun on a node with 2+ GPUs to run this test') return hparams = get_hparams() model = LightningTestModel(hparams) trainer_options = dict(show_progress_bar=True, max_nb_epochs=1, gpus=1, distributed_backend='dp', use_amp=True) run_gpu_model_test(trainer_options, model, hparams)
def test_amp_gpu_ddp_slurm_managed(): """ Make sure DDP + AMP work :return: """ if not can_run_gpu_test(): return # simulate setting slurm flags os.environ['MASTER_PORT'] = str(np.random.randint(12000, 19000, 1)[0]) os.environ['SLURM_LOCALID'] = str(0) hparams = get_hparams() model = LightningTestModel(hparams) trainer_options = dict(show_progress_bar=True, max_nb_epochs=1, gpus=[0], distributed_backend='ddp', use_amp=True) save_dir = init_save_dir() # exp file to get meta exp = get_exp(False) exp.argparse(hparams) exp.save() # exp file to get weights checkpoint = ModelCheckpoint(save_dir) # add these to the trainer options trainer_options['checkpoint_callback'] = checkpoint trainer_options['experiment'] = exp # fit model trainer = Trainer(**trainer_options) trainer.is_slurm_managing_tasks = True result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'amp + ddp model failed to complete' # test root model address assert trainer.resolve_root_node_address('abc') == 'abc' assert trainer.resolve_root_node_address('abc[23]') == 'abc23' assert trainer.resolve_root_node_address('abc[23-24]') == 'abc23' assert trainer.resolve_root_node_address( 'abc[23-24, 45-40, 40]') == 'abc23' # test model loading with a map_location map_location = 'cuda:1' pretrained_model = load_model(exp, save_dir, True, map_location) # test model preds run_prediction(model.test_dataloader, pretrained_model) if trainer.use_ddp: # on hpc this would work fine... but need to hack it for the purpose of the test trainer.model = pretrained_model trainer.optimizers, trainer.lr_schedulers = pretrained_model.configure_optimizers( ) # test HPC loading / saving trainer.hpc_save(save_dir, exp) trainer.hpc_load(save_dir, on_gpu=True) # test freeze on gpu model.freeze() model.unfreeze() clear_save_dir()
def test_model_freeze_unfreeze(): hparams = get_hparams() model = LightningTestModel(hparams) model.freeze() model.unfreeze()
def test_cpu_slurm_save_load(): """ Verify model save/load/checkpoint on CPU :return: """ hparams = get_hparams() model = LightningTestModel(hparams) save_dir = init_save_dir() # exp file to get meta exp = get_exp(False) exp.argparse(hparams) exp.save() version = exp.version trainer_options = dict(max_nb_epochs=1, experiment=exp, checkpoint_callback=ModelCheckpoint(save_dir)) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) real_global_step = trainer.global_step # traning complete assert result == 1, 'amp + ddp model failed to complete' # predict with trained model before saving # make a prediction for batch in model.test_dataloader: break x, y = batch x = x.view(x.size(0), -1) model.eval() pred_before_saving = model(x) # test HPC saving # simulate snapshot on slurm saved_filepath = trainer.hpc_save(save_dir, exp) assert os.path.exists(saved_filepath) # new exp file to get meta exp = get_exp(False, version=version) exp.argparse(hparams) exp.save() trainer_options = dict( max_nb_epochs=1, experiment=exp, checkpoint_callback=ModelCheckpoint(save_dir), ) trainer = Trainer(**trainer_options) model = LightningTestModel(hparams) # set the epoch start hook so we can predict before the model does the full training def assert_pred_same(): assert trainer.global_step == real_global_step and trainer.global_step > 0 # predict with loaded model to make sure answers are the same trainer.model.eval() new_pred = trainer.model(x) assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1 model.on_epoch_start = assert_pred_same # by calling fit again, we trigger training, loading weights from the cluster # and our hook to predict using current model before any more weight updates trainer.fit(model) clear_save_dir()
def test_cpu_restore_training(): """ Verify continue training session on CPU :return: """ hparams = get_hparams() model = LightningTestModel(hparams) save_dir = init_save_dir() # exp file to get meta test_exp_version = 10 exp = get_exp(False, version=test_exp_version) exp.argparse(hparams) exp.save() trainer_options = dict(max_nb_epochs=2, val_check_interval=0.50, val_percent_check=0.2, train_percent_check=0.2, experiment=exp, checkpoint_callback=ModelCheckpoint(save_dir)) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) real_global_epoch = trainer.current_epoch # traning complete assert result == 1, 'amp + ddp model failed to complete' # wipe-out trainer and model # retrain with not much data... this simulates picking training back up after slurm # we want to see if the weights come back correctly new_exp = get_exp(False, version=test_exp_version) trainer_options = dict( max_nb_epochs=2, val_check_interval=0.50, val_percent_check=0.2, train_percent_check=0.2, experiment=new_exp, checkpoint_callback=ModelCheckpoint(save_dir), ) trainer = Trainer(**trainer_options) model = LightningTestModel(hparams) # set the epoch start hook so we can predict before the model does the full training def assert_good_acc(): assert trainer.current_epoch == real_global_epoch and trainer.current_epoch > 0 # if model and state loaded correctly, predictions will be good even though we # haven't trained with the new loaded model trainer.model.eval() _ = [ run_prediction(dataloader, trainer.model) for dataloader in trainer.val_dataloader ] model.on_sanity_check_start = assert_good_acc # by calling fit again, we trigger training, loading weights from the cluster # and our hook to predict using current model before any more weight updates trainer.fit(model) clear_save_dir()
def test_amp_gpu_ddp_slurm_managed(): """ Make sure DDP + AMP work :return: """ if not can_run_gpu_test(): return reset_seed() # simulate setting slurm flags set_random_master_port() os.environ['SLURM_LOCALID'] = str(0) hparams = get_hparams() model = LightningTestModel(hparams) trainer_options = dict(show_progress_bar=True, max_nb_epochs=1, gpus=[0], distributed_backend='ddp', use_amp=True) save_dir = init_save_dir() # exp file to get meta logger = get_test_tube_logger(False) # exp file to get weights checkpoint = init_checkpoint_callback(logger) # add these to the trainer options trainer_options['checkpoint_callback'] = checkpoint trainer_options['logger'] = logger # fit model trainer = Trainer(**trainer_options) trainer.is_slurm_managing_tasks = True result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'amp + ddp model failed to complete' # test root model address assert trainer.resolve_root_node_address('abc') == 'abc' assert trainer.resolve_root_node_address('abc[23]') == 'abc23' assert trainer.resolve_root_node_address('abc[23-24]') == 'abc23' assert trainer.resolve_root_node_address( 'abc[23-24, 45-40, 40]') == 'abc23' # test model loading with a map_location pretrained_model = load_model(logger.experiment, trainer.checkpoint_callback.filepath) # test model preds [ run_prediction(dataloader, pretrained_model) for dataloader in trainer.get_test_dataloaders() ] if trainer.use_ddp: # on hpc this would work fine... but need to hack it for the purpose of the test trainer.model = pretrained_model trainer.optimizers, trainer.lr_schedulers = pretrained_model.configure_optimizers( ) # test HPC loading / saving trainer.hpc_save(save_dir, logger) trainer.hpc_load(save_dir, on_gpu=True) # test freeze on gpu model.freeze() model.unfreeze() clear_save_dir()
def test_cpu_slurm_save_load(): """ Verify model save/load/checkpoint on CPU :return: """ hparams = get_hparams() model = LightningTestModel(hparams) save_dir = init_save_dir() # exp file to get meta exp = get_exp(False) exp.argparse(hparams) exp.save() cluster_a = SlurmCluster() trainer_options = dict( max_nb_epochs=1, cluster=cluster_a, experiment=exp, checkpoint_callback=ModelCheckpoint(save_dir) ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) real_global_step = trainer.global_step # traning complete assert result == 1, 'amp + ddp model failed to complete' # predict with trained model before saving # make a prediction for batch in model.test_dataloader: break x, y = batch x = x.view(x.size(0), -1) model.eval() pred_before_saving = model(x) # test registering a save function trainer.enable_auto_hpc_walltime_manager() # test HPC saving # simulate snapshot on slurm saved_filepath = trainer.hpc_save(save_dir, exp) assert os.path.exists(saved_filepath) # wipe-out trainer and model # retrain with not much data... this simulates picking training back up after slurm # we want to see if the weights come back correctly continue_tng_hparams = get_hparams(continue_training=True, hpc_exp_number=cluster_a.hpc_exp_number) trainer_options = dict( max_nb_epochs=1, cluster=SlurmCluster(continue_tng_hparams), experiment=exp, checkpoint_callback=ModelCheckpoint(save_dir), ) trainer = Trainer(**trainer_options) model = LightningTestModel(hparams) # set the epoch start hook so we can predict before the model does the full training def assert_pred_same(): assert trainer.global_step == real_global_step and trainer.global_step > 0 # predict with loaded model to make sure answers are the same trainer.model.eval() new_pred = trainer.model(x) assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1 model.on_epoch_start = assert_pred_same # by calling fit again, we trigger training, loading weights from the cluster # and our hook to predict using current model before any more weight updates trainer.fit(model) clear_save_dir()
def test_dp_resume(): """ Make sure DP continues training correctly :return: """ if not tutils.can_run_gpu_test(): return tutils.reset_seed() hparams = tutils.get_hparams() model = LightningTestModel(hparams) trainer_options = dict( show_progress_bar=True, max_nb_epochs=2, gpus=2, distributed_backend='dp', ) save_dir = tutils.init_save_dir() # get logger logger = tutils.get_test_tube_logger(debug=False) # exp file to get weights # logger file to get weights checkpoint = tutils.init_checkpoint_callback(logger) # add these to the trainer options trainer_options['logger'] = logger trainer_options['checkpoint_callback'] = checkpoint # fit model trainer = Trainer(**trainer_options) trainer.is_slurm_managing_tasks = True result = trainer.fit(model) # track epoch before saving real_global_epoch = trainer.current_epoch # correct result and ok accuracy assert result == 1, 'amp + dp model failed to complete' # --------------------------- # HPC LOAD/SAVE # --------------------------- # save trainer.hpc_save(save_dir, logger) # init new trainer new_logger = tutils.get_test_tube_logger(version=logger.version) trainer_options['logger'] = new_logger trainer_options['checkpoint_callback'] = ModelCheckpoint(save_dir) trainer_options['train_percent_check'] = 0.2 trainer_options['val_percent_check'] = 0.2 trainer_options['max_nb_epochs'] = 1 new_trainer = Trainer(**trainer_options) # set the epoch start hook so we can predict before the model does the full training def assert_good_acc(): assert new_trainer.current_epoch == real_global_epoch and new_trainer.current_epoch > 0 # if model and state loaded correctly, predictions will be good even though we # haven't trained with the new loaded model dp_model = new_trainer.model dp_model.eval() dataloader = trainer.get_train_dataloader() tutils.run_prediction(dataloader, dp_model, dp=True) # new model model = LightningTestModel(hparams) model.on_sanity_check_start = assert_good_acc # fit new model which should load hpc weights new_trainer.fit(model) # test freeze on gpu model.freeze() model.unfreeze() tutils.clear_save_dir()
def main(): """ Make sure DDP + AMP continue training correctly :return: """ hparams = get_hparams() model = LightningTestModel(hparams) trainer_options = dict( show_progress_bar=True, max_nb_epochs=4, gpus=2, distributed_backend='dp', ) save_dir = init_save_dir() # exp file to get meta exp = get_exp(False) exp.argparse(hparams) exp.save() # exp file to get weights checkpoint = ModelCheckpoint(save_dir) # add these to the trainer options trainer_options['experiment'] = exp trainer_options['checkpoint_callback'] = checkpoint # fit model trainer = Trainer(**trainer_options) trainer.is_slurm_managing_tasks = True result = trainer.fit(model) # track epoch before saving real_global_epoch = trainer.current_epoch # correct result and ok accuracy assert result == 1, 'amp + dp model failed to complete' # --------------------------- # HPC LOAD/SAVE # --------------------------- # save trainer.hpc_save(save_dir, exp) # init new trainer new_exp = get_exp(False, version=exp.version) trainer_options['experiment'] = new_exp trainer_options['checkpoint_callback'] = ModelCheckpoint(save_dir) trainer_options['train_percent_check'] = 0.2 trainer_options['val_percent_check'] = 0.2 trainer_options['max_nb_epochs'] = 1 new_trainer = Trainer(**trainer_options) # set the epoch start hook so we can predict before the model does the full training def assert_good_acc(): assert trainer.current_epoch == real_global_epoch and trainer.current_epoch > 0 # if model and state loaded correctly, predictions will be good even though we # haven't trained with the new loaded model dp_model = new_trainer.model dp_model.eval() _ = [run_prediction(dataloader, dp_model, dp=True) for dataloader in trainer.val_dataloader] # new model model = LightningTestModel(hparams) model.on_sanity_check_start = assert_good_acc # fit new model which should load hpc weights new_trainer.fit(model) # test freeze on gpu model.freeze() model.unfreeze() clear_save_dir()
def test_model_checkpoint_options(tmp_path): """Test ModelCheckpoint options.""" def mock_save_function(filepath): open(filepath, 'a').close() hparams = tutils.get_hparams() _ = LightningTestModel(hparams) # simulated losses save_dir = tmp_path / "1" save_dir.mkdir() losses = [10, 9, 2.8, 5, 2.5] # ----------------- # CASE K=-1 (all) w = ModelCheckpoint(save_dir, save_top_k=-1, verbose=1) w.save_function = mock_save_function for i, loss in enumerate(losses): w.on_epoch_end(i, logs={'val_loss': loss}) file_lists = set(os.listdir(save_dir)) assert len(file_lists) == len( losses), "Should save all models when save_top_k=-1" # verify correct naming for i in range(0, len(losses)): assert f'_ckpt_epoch_{i}.ckpt' in file_lists save_dir = tmp_path / "2" save_dir.mkdir() # ----------------- # CASE K=0 (none) w = ModelCheckpoint(save_dir, save_top_k=0, verbose=1) w.save_function = mock_save_function for i, loss in enumerate(losses): w.on_epoch_end(i, logs={'val_loss': loss}) file_lists = os.listdir(save_dir) assert len(file_lists) == 0, "Should save 0 models when save_top_k=0" save_dir = tmp_path / "3" save_dir.mkdir() # ----------------- # CASE K=1 (2.5, epoch 4) w = ModelCheckpoint(save_dir, save_top_k=1, verbose=1, prefix='test_prefix') w.save_function = mock_save_function for i, loss in enumerate(losses): w.on_epoch_end(i, logs={'val_loss': loss}) file_lists = set(os.listdir(save_dir)) assert len(file_lists) == 1, "Should save 1 model when save_top_k=1" assert 'test_prefix_ckpt_epoch_4.ckpt' in file_lists save_dir = tmp_path / "4" save_dir.mkdir() # ----------------- # CASE K=2 (2.5 epoch 4, 2.8 epoch 2) # make sure other files don't get deleted w = ModelCheckpoint(save_dir, save_top_k=2, verbose=1) open(f'{save_dir}/other_file.ckpt', 'a').close() w.save_function = mock_save_function for i, loss in enumerate(losses): w.on_epoch_end(i, logs={'val_loss': loss}) file_lists = set(os.listdir(save_dir)) assert len(file_lists) == 3, 'Should save 2 model when save_top_k=2' assert '_ckpt_epoch_4.ckpt' in file_lists assert '_ckpt_epoch_2.ckpt' in file_lists assert 'other_file.ckpt' in file_lists save_dir = tmp_path / "5" save_dir.mkdir() # ----------------- # CASE K=4 (save all 4 models) # multiple checkpoints within same epoch w = ModelCheckpoint(save_dir, save_top_k=4, verbose=1) w.save_function = mock_save_function for loss in losses: w.on_epoch_end(0, logs={'val_loss': loss}) file_lists = set(os.listdir(save_dir)) assert len( file_lists ) == 4, 'Should save all 4 models when save_top_k=4 within same epoch' save_dir = tmp_path / "6" save_dir.mkdir() # ----------------- # CASE K=3 (save the 2nd, 3rd, 4th model) # multiple checkpoints within same epoch w = ModelCheckpoint(save_dir, save_top_k=3, verbose=1) w.save_function = mock_save_function for loss in losses: w.on_epoch_end(0, logs={'val_loss': loss}) file_lists = set(os.listdir(save_dir)) assert len(file_lists) == 3, 'Should save 3 models when save_top_k=3' assert '_ckpt_epoch_0_v2.ckpt' in file_lists assert '_ckpt_epoch_0_v1.ckpt' in file_lists assert '_ckpt_epoch_0.ckpt' in file_lists
def test_gradient_accumulation_scheduling(tmpdir): tutils.reset_seed() """ Test grad accumulation by the freq of optimizer updates """ # test incorrect configs with pytest.raises(IndexError): assert Trainer(accumulate_grad_batches={0: 3, 1: 4, 4: 6}) assert Trainer(accumulate_grad_batches={-2: 3}) with pytest.raises(TypeError): assert Trainer(accumulate_grad_batches={}) assert Trainer(accumulate_grad_batches=[[2, 3], [4, 6]]) assert Trainer(accumulate_grad_batches={1: 2, 3.: 4}) assert Trainer(accumulate_grad_batches={1: 2.5, 3: 5}) # test optimizer call freq matches scheduler def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None): # only test the first 12 batches in epoch if batch_idx < 12: if epoch == 0: # reset counter when starting epoch if batch_idx == 0: self.prev_called_batch_idx = 0 # use this opportunity to test once assert self.trainer.accumulate_grad_batches == 1 assert batch_idx == self.prev_called_batch_idx self.prev_called_batch_idx += 1 elif 1 <= epoch <= 2: # reset counter when starting epoch if batch_idx == 1: self.prev_called_batch_idx = 1 # use this opportunity to test once assert self.trainer.accumulate_grad_batches == 2 assert batch_idx == self.prev_called_batch_idx self.prev_called_batch_idx += 2 else: if batch_idx == 3: self.prev_called_batch_idx = 3 # use this opportunity to test once assert self.trainer.accumulate_grad_batches == 4 assert batch_idx == self.prev_called_batch_idx self.prev_called_batch_idx += 3 optimizer.step() # clear gradients optimizer.zero_grad() hparams = tutils.get_hparams() model = LightningTestModel(hparams) schedule = {1: 2, 3: 4} trainer = Trainer(accumulate_grad_batches=schedule, train_percent_check=0.1, val_percent_check=0.1, max_epochs=4, default_save_path=tmpdir) # for the test trainer.optimizer_step = optimizer_step model.prev_called_batch_idx = 0 trainer.fit(model)
def test_cpu_restore_training(tmpdir): """Verify continue training session on CPU.""" tutils.reset_seed() hparams = tutils.get_hparams() model = LightningTestModel(hparams) # logger file to get meta test_logger_version = 10 logger = tutils.get_test_tube_logger(tmpdir, False, version=test_logger_version) trainer_options = dict(max_num_epochs=2, val_check_interval=0.50, val_percent_check=0.2, train_percent_check=0.2, logger=logger, checkpoint_callback=ModelCheckpoint(tmpdir)) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) real_global_epoch = trainer.current_epoch # traning complete assert result == 1, 'amp + ddp model failed to complete' # wipe-out trainer and model # retrain with not much data... this simulates picking training back up after slurm # we want to see if the weights come back correctly new_logger = tutils.get_test_tube_logger(tmpdir, False, version=test_logger_version) trainer_options = dict( max_num_epochs=2, val_check_interval=0.50, val_percent_check=0.2, train_percent_check=0.2, logger=new_logger, checkpoint_callback=ModelCheckpoint(tmpdir), ) trainer = Trainer(**trainer_options) model = LightningTestModel(hparams) # set the epoch start hook so we can predict before the model does the full training def assert_good_acc(): assert trainer.current_epoch == real_global_epoch assert trainer.current_epoch >= 0 # if model and state loaded correctly, predictions will be good even though we # haven't trained with the new loaded model trainer.model.eval() for dataloader in trainer.get_val_dataloaders(): tutils.run_prediction(dataloader, trainer.model) model.on_sanity_check_start = assert_good_acc # by calling fit again, we trigger training, loading weights from the cluster # and our hook to predict using current model before any more weight updates trainer.fit(model)