def test_single_gpu_batch_parse(): tutils.reset_seed() if not tutils.can_run_gpu_test(): return trainer = Trainer() # batch is just a tensor batch = torch.rand(2, 3) batch = trainer.transfer_batch_to_gpu(batch, 0) assert batch.device.index == 0 and batch.type() == 'torch.cuda.FloatTensor' # tensor list batch = [torch.rand(2, 3), torch.rand(2, 3)] batch = trainer.transfer_batch_to_gpu(batch, 0) assert batch[0].device.index == 0 and batch[0].type( ) == 'torch.cuda.FloatTensor' assert batch[1].device.index == 0 and batch[1].type( ) == 'torch.cuda.FloatTensor' # tensor list of lists batch = [[torch.rand(2, 3), torch.rand(2, 3)]] batch = trainer.transfer_batch_to_gpu(batch, 0) assert batch[0][0].device.index == 0 and batch[0][0].type( ) == 'torch.cuda.FloatTensor' assert batch[0][1].device.index == 0 and batch[0][1].type( ) == 'torch.cuda.FloatTensor' # tensor dict batch = [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)}] batch = trainer.transfer_batch_to_gpu(batch, 0) assert batch[0]['a'].device.index == 0 and batch[0]['a'].type( ) == 'torch.cuda.FloatTensor' assert batch[0]['b'].device.index == 0 and batch[0]['b'].type( ) == 'torch.cuda.FloatTensor' # tuple of tensor list and list of tensor dict batch = ([torch.rand(2, 3) for _ in range(2)], [{ 'a': torch.rand(2, 3), 'b': torch.rand(2, 3) } for _ in range(2)]) batch = trainer.transfer_batch_to_gpu(batch, 0) assert batch[0][0].device.index == 0 and batch[0][0].type( ) == 'torch.cuda.FloatTensor' assert batch[1][0]['a'].device.index == 0 assert batch[1][0]['a'].type() == 'torch.cuda.FloatTensor' assert batch[1][0]['b'].device.index == 0 assert batch[1][0]['b'].type() == 'torch.cuda.FloatTensor'
def test_running_test_pretrained_model_ddp(tmpdir): """Verify `test()` on pretrained model.""" if not tutils.can_run_gpu_test(): return tutils.reset_seed() tutils.set_random_master_port() hparams = tutils.get_hparams() model = LightningTestModel(hparams) # exp file to get meta logger = tutils.get_test_tube_logger(tmpdir, False) # exp file to get weights checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict(show_progress_bar=False, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, checkpoint_callback=checkpoint, logger=logger, gpus=[0, 1], distributed_backend='ddp') # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) log.info(os.listdir(tutils.get_data_path(logger, path_dir=tmpdir))) # correct result and ok accuracy assert result == 1, 'training failed to complete' pretrained_model = tutils.load_model(logger, trainer.checkpoint_callback.filepath, module_class=LightningTestModel) # run test set new_trainer = Trainer(**trainer_options) new_trainer.test(pretrained_model) dataloaders = model.test_dataloader() if not isinstance(dataloaders, list): dataloaders = [dataloaders] for dataloader in dataloaders: tutils.run_prediction(dataloader, pretrained_model)
def test_multi_gpu_none_backend(tmpdir): """Make sure when using multiple GPUs the user can't use `distributed_backend = None`.""" tutils.reset_seed() if not tutils.can_run_gpu_test(): return model, hparams = tutils.get_model() trainer_options = dict(default_save_path=tmpdir, show_progress_bar=False, max_epochs=1, train_percent_check=0.1, val_percent_check=0.1, gpus='-1') with pytest.warns(UserWarning): tutils.run_model_test(trainer_options, model)
def test_amp_gpu_ddp_slurm_managed(tmpdir): """Make sure DDP + AMP work.""" if not tutils.can_run_gpu_test(): return tutils.reset_seed() # simulate setting slurm flags tutils.set_random_master_port() os.environ['SLURM_LOCALID'] = str(0) hparams = tutils.get_hparams() model = LightningTestModel(hparams) trainer_options = dict( show_progress_bar=True, max_epochs=1, gpus=[0], distributed_backend='ddp', precision=16 ) # exp file to get meta logger = tutils.get_test_tube_logger(tmpdir, False) # exp file to get weights checkpoint = tutils.init_checkpoint_callback(logger) # add these to the trainer options trainer_options['checkpoint_callback'] = checkpoint trainer_options['logger'] = logger # fit model trainer = Trainer(**trainer_options) trainer.is_slurm_managing_tasks = True result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'amp + ddp model failed to complete' # test root model address assert trainer.resolve_root_node_address('abc') == 'abc' assert trainer.resolve_root_node_address('abc[23]') == 'abc23' assert trainer.resolve_root_node_address('abc[23-24]') == 'abc23' assert trainer.resolve_root_node_address('abc[23-24, 45-40, 40]') == 'abc23'
def test_multi_gpu_model_ddp(tmpdir): """Make sure DDP works.""" if not tutils.can_run_gpu_test(): return tutils.reset_seed() tutils.set_random_master_port() model, hparams = tutils.get_model() trainer_options = dict(default_save_path=tmpdir, show_progress_bar=False, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, gpus=[0, 1], distributed_backend='ddp') tutils.run_model_test(trainer_options, model)
def test_amp_single_gpu(tmpdir): """Make sure DDP + AMP work.""" tutils.reset_seed() if not tutils.can_run_gpu_test(): return hparams = tutils.get_hparams() model = LightningTestModel(hparams) trainer_options = dict(default_save_path=tmpdir, show_progress_bar=True, max_epochs=1, gpus=1, distributed_backend='ddp', precision=16) tutils.run_model_test(trainer_options, model)
def test_amp_gpu_dp(tmpdir): """Make sure DP + AMP work.""" tutils.reset_seed() if not tutils.can_run_gpu_test(): return model, hparams = tutils.get_model() trainer_options = dict( default_save_path=tmpdir, max_epochs=1, gpus='0, 1', # test init with gpu string distributed_backend='dp', precision=16) trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result == 1
def test_amp_gpu_ddp(tmpdir): """Make sure DDP + AMP work.""" if not tutils.can_run_gpu_test(): return tutils.reset_seed() tutils.set_random_master_port() hparams = tutils.get_hparams() model = LightningTestModel(hparams) trainer_options = dict(default_save_path=tmpdir, show_progress_bar=True, max_epochs=1, gpus=2, distributed_backend='ddp', use_amp=True) tutils.run_model_test(trainer_options, model)
def test_multi_gpu_model_dp(tmpdir): """Make sure DP works.""" tutils.reset_seed() if not tutils.can_run_gpu_test(): return model, hparams = tutils.get_model() trainer_options = dict(default_save_path=tmpdir, show_progress_bar=False, distributed_backend='dp', max_epochs=1, train_percent_check=0.1, val_percent_check=0.1, gpus='-1') tutils.run_model_test(trainer_options, model) # test memory helper functions memory.get_memory_profile('min_max')
def test_no_amp_single_gpu(tmpdir): """Make sure DDP + AMP work.""" tutils.reset_seed() if not tutils.can_run_gpu_test(): return hparams = tutils.get_hparams() model = LightningTestModel(hparams) trainer_options = dict(default_save_path=tmpdir, show_progress_bar=True, max_epochs=1, gpus=1, distributed_backend='dp', use_amp=True) trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result == 1
def test_running_test_pretrained_model_dp(tmpdir): """Verify test() on pretrained model.""" tutils.reset_seed() if not tutils.can_run_gpu_test(): return hparams = tutils.get_hparams() model = LightningTestModel(hparams) # logger file to get meta logger = tutils.get_test_tube_logger(tmpdir, False) # logger file to get weights checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict(show_progress_bar=True, max_epochs=4, train_percent_check=0.4, val_percent_check=0.2, checkpoint_callback=checkpoint, logger=logger, gpus=[0, 1], distributed_backend='dp') # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'training failed to complete' pretrained_model = tutils.load_model(logger, trainer.checkpoint_callback.filepath, module_class=LightningTestModel) new_trainer = Trainer(**trainer_options) new_trainer.test(pretrained_model) # test we have good test accuracy tutils.assert_ok_model_acc(new_trainer)
def test_ddp_sampler_error(tmpdir): """Make sure DDP + AMP work.""" if not tutils.can_run_gpu_test(): return tutils.reset_seed() tutils.set_random_master_port() hparams = tutils.get_hparams() model = LightningTestModel(hparams, force_remove_distributed_sampler=True) logger = tutils.get_test_tube_logger(tmpdir, True) trainer = Trainer(logger=logger, show_progress_bar=False, max_epochs=1, gpus=[0, 1], distributed_backend='ddp', use_amp=True) with pytest.warns(UserWarning): trainer.get_dataloaders(model)
def test_ddp_all_dataloaders_passed_to_fit(tmpdir): """Make sure DDP works with dataloaders passed to fit()""" if not tutils.can_run_gpu_test(): return tutils.reset_seed() tutils.set_random_master_port() model, hparams = tutils.get_model() trainer_options = dict(default_save_path=tmpdir, show_progress_bar=False, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, gpus=[0, 1], distributed_backend='ddp') fit_options = dict(train_dataloader=model.train_dataloader(), val_dataloaders=model.val_dataloader()) trainer = Trainer(**trainer_options) result = trainer.fit(model, **fit_options) assert result == 1, "DDP doesn't work with dataloaders passed to fit()."
def test_dp_resume(tmpdir): """Make sure DP continues training correctly.""" if not tutils.can_run_gpu_test(): return tutils.reset_seed() hparams = tutils.get_hparams() model = LightningTestModel(hparams) trainer_options = dict( show_progress_bar=True, max_epochs=2, gpus=2, distributed_backend='dp', ) # get logger logger = tutils.get_test_tube_logger(tmpdir, debug=False) # exp file to get weights # logger file to get weights checkpoint = tutils.init_checkpoint_callback(logger) # add these to the trainer options trainer_options['logger'] = logger trainer_options['checkpoint_callback'] = checkpoint # fit model trainer = Trainer(**trainer_options) trainer.is_slurm_managing_tasks = True result = trainer.fit(model) # track epoch before saving. Increment since we finished the current epoch, don't want to rerun real_global_epoch = trainer.current_epoch + 1 # correct result and ok accuracy assert result == 1, 'amp + dp model failed to complete' # --------------------------- # HPC LOAD/SAVE # --------------------------- # save trainer.hpc_save(tmpdir, logger) # init new trainer new_logger = tutils.get_test_tube_logger(tmpdir, version=logger.version) trainer_options['logger'] = new_logger trainer_options['checkpoint_callback'] = ModelCheckpoint(tmpdir) trainer_options['train_percent_check'] = 0.2 trainer_options['val_percent_check'] = 0.2 trainer_options['max_epochs'] = 1 new_trainer = Trainer(**trainer_options) # set the epoch start hook so we can predict before the model does the full training def assert_good_acc(): assert new_trainer.current_epoch == real_global_epoch and new_trainer.current_epoch > 0 # if model and state loaded correctly, predictions will be good even though we # haven't trained with the new loaded model dp_model = new_trainer.model dp_model.eval() dataloader = trainer.train_dataloader tutils.run_prediction(dataloader, dp_model, dp=True) # new model model = LightningTestModel(hparams) model.on_train_start = assert_good_acc # fit new model which should load hpc weights new_trainer.fit(model) # test freeze on gpu model.freeze() model.unfreeze()