def test_full_loop_ddp_spawn(tmpdir): import os os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' dm = TrialMNISTDataModule(tmpdir) dm.prepare_data() dm.setup() model = EvalModelTemplate() trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, weights_summary=None, distributed_backend='ddp_spawn', gpus=[0, 1]) trainer.fit(model, dm) # fit model result = trainer.fit(model) assert result == 1 # test result = trainer.test(datamodule=dm) result = result[0] assert result['test_acc'] > 0.8
def test_result_obj_predictions_ddp_spawn(tmpdir): seed_everything(4321) distributed_backend = 'ddp_spawn' option = 0 import os os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' dm = TrialMNISTDataModule(tmpdir) prediction_file = Path('predictions.pt') model = EvalModelTemplate(learning_rate=0.005) model.test_option = option model.prediction_file = prediction_file.as_posix() model.test_step = model.test_step_result_preds model.test_step_end = None model.test_epoch_end = None model.test_end = None prediction_files = [ Path('predictions_rank_0.pt'), Path('predictions_rank_1.pt') ] for prediction_file in prediction_files: if prediction_file.exists(): prediction_file.unlink() trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, weights_summary=None, deterministic=True, distributed_backend=distributed_backend, gpus=[0, 1]) # Prediction file shouldn't exist yet because we haven't done anything # assert not model.prediction_file.exists() result = trainer.fit(model, dm) assert result == 1 result = trainer.test(datamodule=dm) result = result[0] assert result['test_loss'] < 0.6 assert result['test_acc'] > 0.8 dm.setup('test') # check prediction file now exists and is of expected length size = 0 for prediction_file in prediction_files: assert prediction_file.exists() predictions = torch.load(prediction_file) size += len(predictions) assert size == len(dm.mnist_test)
def test_data_hooks_called_with_stage_kwarg(tmpdir): dm = TrialMNISTDataModule() dm.prepare_data() assert dm.has_prepared_data is True dm.setup(stage='fit') assert dm.has_setup_fit is True assert dm.has_setup_test is False dm.setup(stage='test') assert dm.has_setup_fit is True assert dm.has_setup_test is True
def test_data_hooks_called(tmpdir): dm = TrialMNISTDataModule() assert dm.has_prepared_data is False assert dm.has_setup_fit is False assert dm.has_setup_test is False dm.prepare_data() assert dm.has_prepared_data is True assert dm.has_setup_fit is False assert dm.has_setup_test is False dm.setup() assert dm.has_prepared_data is True assert dm.has_setup_fit is True assert dm.has_setup_test is True
def test_train_val_loop_only(tmpdir): dm = TrialMNISTDataModule(tmpdir) dm.prepare_data() dm.setup() model = EvalModelTemplate() model.validation_step = None model.validation_step_end = None model.validation_epoch_end = None trainer = Trainer( default_root_dir=tmpdir, max_epochs=3, weights_summary=None, ) trainer.fit(model, dm) # fit model result = trainer.fit(model) assert result == 1 assert trainer.callback_metrics['loss'] < 0.50
def test_full_loop_single_gpu(tmpdir): dm = TrialMNISTDataModule(tmpdir) dm.prepare_data() dm.setup() model = EvalModelTemplate() trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, weights_summary=None, gpus=1) trainer.fit(model, dm) # fit model result = trainer.fit(model) assert result == 1 # test result = trainer.test(datamodule=dm) result = result[0] assert result['test_acc'] > 0.8
def test_base_datamodule_with_verbose_setup(tmpdir): dm = TrialMNISTDataModule() dm.prepare_data() dm.setup('fit') dm.setup('test')
def test_base_datamodule(tmpdir): dm = TrialMNISTDataModule() dm.prepare_data() dm.setup()
def test_dm_pickle_after_setup(tmpdir): dm = TrialMNISTDataModule() dm.prepare_data() dm.setup() pickle.dumps(dm)