def test_dm_init_from_argparse_args(tmpdir):
    parser = ArgumentParser()
    parser = TrialMNISTDataModule.add_argparse_args(parser)
    args = parser.parse_args(['--data_dir', './my_data'])
    dm = TrialMNISTDataModule.from_argparse_args(args)
    dm.prepare_data()
    dm.setup()
Exemplo n.º 2
0
def test_result_obj_predictions_ddp_spawn(tmpdir):
    seed_everything(4321)

    distributed_backend = 'ddp_spawn'
    option = 0

    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'

    dm = TrialMNISTDataModule(tmpdir)

    prediction_file = Path('predictions.pt')

    model = EvalModelTemplate(learning_rate=0.005)
    model.test_option = option
    model.prediction_file = prediction_file.as_posix()
    model.test_step = model.test_step_result_preds
    model.test_step_end = None
    model.test_epoch_end = None
    model.test_end = None

    prediction_files = [
        Path('predictions_rank_0.pt'),
        Path('predictions_rank_1.pt')
    ]
    for prediction_file in prediction_files:
        if prediction_file.exists():
            prediction_file.unlink()

    trainer = Trainer(default_root_dir=tmpdir,
                      max_epochs=3,
                      weights_summary=None,
                      deterministic=True,
                      distributed_backend=distributed_backend,
                      gpus=[0, 1])

    # Prediction file shouldn't exist yet because we haven't done anything
    # assert not model.prediction_file.exists()

    result = trainer.fit(model, dm)
    assert result == 1
    result = trainer.test(datamodule=dm)
    result = result[0]
    assert result['test_loss'] < 0.6
    assert result['test_acc'] > 0.8

    dm.setup('test')

    # check prediction file now exists and is of expected length
    size = 0
    for prediction_file in prediction_files:
        assert prediction_file.exists()
        predictions = torch.load(prediction_file)
        size += len(predictions)
    assert size == len(dm.mnist_test)
def test_can_prepare_data(tmpdir):

    dm = TrialMNISTDataModule()
    trainer = Trainer()
    trainer.datamodule = dm

    # 1 no DM
    # prepare_data_per_node = True
    # local rank = 0   (True)
    trainer.prepare_data_per_node = True
    trainer.local_rank = 0
    assert trainer.data_connector.can_prepare_data()

    # local rank = 1   (False)
    trainer.local_rank = 1
    assert not trainer.data_connector.can_prepare_data()

    # prepare_data_per_node = False (prepare across all nodes)
    # global rank = 0   (True)
    trainer.prepare_data_per_node = False
    trainer.node_rank = 0
    trainer.local_rank = 0
    assert trainer.data_connector.can_prepare_data()

    # global rank = 1   (False)
    trainer.node_rank = 1
    trainer.local_rank = 0
    assert not trainer.data_connector.can_prepare_data()
    trainer.node_rank = 0
    trainer.local_rank = 1
    assert not trainer.data_connector.can_prepare_data()

    # 2 dm
    # prepar per node = True
    # local rank = 0 (True)
    trainer.prepare_data_per_node = True
    trainer.local_rank = 0

    # is_overridden prepare data = True
    # has been called
    # False
    dm._has_prepared_data = True
    assert not trainer.data_connector.can_prepare_data()

    # has not been called
    # True
    dm._has_prepared_data = False
    assert trainer.data_connector.can_prepare_data()

    # is_overridden prepare data = False
    # True
    dm.prepare_data = None
    assert trainer.data_connector.can_prepare_data()
def test_full_loop_ddp_spawn(tmpdir):
    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'

    seed_everything(1234)

    dm = TrialMNISTDataModule(tmpdir)

    model = EvalModelTemplate()

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=5,
        weights_summary=None,
        distributed_backend='ddp_spawn',
        gpus=[0, 1],
        deterministic=True,
    )

    # fit model
    result = trainer.fit(model, dm)
    assert result == 1

    # test
    result = trainer.test(datamodule=dm)
    result = result[0]
    assert result['test_acc'] > 0.8
def test_data_hooks_called_with_stage_kwarg(tmpdir):
    dm = TrialMNISTDataModule()
    dm.prepare_data()
    assert dm.has_prepared_data is True

    dm.setup(stage='fit')
    assert dm.has_setup_fit is True
    assert dm.has_setup_test is False

    dm.setup(stage='test')
    assert dm.has_setup_fit is True
    assert dm.has_setup_test is True
def test_torchscript_properties(modelclass):
    """ Test that scripted LightningModule has unnecessary methods removed. """
    model = modelclass()
    model.datamodule = TrialMNISTDataModule()
    script = model.to_torchscript()
    assert not hasattr(script, "datamodule")
    assert not hasattr(model, "batch_size") or hasattr(script, "batch_size")
    assert not hasattr(model, "learning_rate") or hasattr(script, "learning_rate")

    if LooseVersion(torch.__version__) >= LooseVersion("1.4.0"):
        # only on torch >= 1.4 do these unused methods get removed
        assert not callable(getattr(script, "training_step", None))
Exemplo n.º 7
0
def test_full_loop_ddp_spawn(tmpdir):
    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'

    dm = TrialMNISTDataModule(tmpdir)
    dm.prepare_data()
    dm.setup()

    model = EvalModelTemplate()

    trainer = Trainer(default_root_dir=tmpdir,
                      max_epochs=3,
                      weights_summary=None,
                      distributed_backend='ddp_spawn',
                      gpus=[0, 1])
    trainer.fit(model, dm)

    # fit model
    result = trainer.fit(model)
    assert result == 1

    # test
    result = trainer.test(datamodule=dm)
    result = result[0]
    assert result['test_acc'] > 0.8
def test_test_loop_only(tmpdir):
    reset_seed()

    dm = TrialMNISTDataModule(tmpdir)

    model = EvalModelTemplate()

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=3,
        weights_summary=None,
    )
    trainer.test(model, datamodule=dm)
def test_dm_checkpoint_save(tmpdir):
    reset_seed()

    dm = TrialMNISTDataModule(tmpdir)

    model = EvalModelTemplate()
    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=3,
        weights_summary=None,
        checkpoint_callback=ModelCheckpoint(monitor='early_stop_on'))

    # fit model
    result = trainer.fit(model, dm)
    checkpoint_path = list(trainer.checkpoint_callback.best_k_models.keys())[0]
    checkpoint = torch.load(checkpoint_path)
    assert dm.__class__.__name__ in checkpoint
    assert checkpoint[dm.__class__.__name__] == dm.__class__.__name__
Exemplo n.º 10
0
def test_data_hooks_called_verbose(tmpdir):
    dm = TrialMNISTDataModule()
    assert dm.has_prepared_data is False
    assert dm.has_setup_fit is False
    assert dm.has_setup_test is False

    dm.prepare_data()
    assert dm.has_prepared_data is True
    assert dm.has_setup_fit is False
    assert dm.has_setup_test is False

    dm.setup('fit')
    assert dm.has_prepared_data is True
    assert dm.has_setup_fit is True
    assert dm.has_setup_test is False

    dm.setup('test')
    assert dm.has_prepared_data is True
    assert dm.has_setup_fit is True
    assert dm.has_setup_test is True
Exemplo n.º 11
0
def test_result_obj_predictions(tmpdir, test_option, do_train, gpus):
    tutils.reset_seed()

    dm = TrialMNISTDataModule(tmpdir)
    prediction_file = Path(tmpdir) / 'predictions.pt'

    model = EvalModelTemplate()
    model.test_option = test_option
    model.prediction_file = prediction_file.as_posix()
    model.test_step = model.test_step_result_preds
    model.test_step_end = None
    model.test_epoch_end = None
    model.test_end = None

    if prediction_file.exists():
        prediction_file.unlink()

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=3,
        weights_summary=None,
        deterministic=True,
        gpus=gpus
    )

    # Prediction file shouldn't exist yet because we haven't done anything
    assert not prediction_file.exists()

    if do_train:
        result = trainer.fit(model, dm)
        assert result == 1
        result = trainer.test(datamodule=dm)
        result = result[0]
        assert result['test_loss'] < 0.6
        assert result['test_acc'] > 0.8
    else:
        result = trainer.test(model, datamodule=dm)

    # check prediction file now exists and is of expected length
    assert prediction_file.exists()
    predictions = torch.load(prediction_file)
    assert len(predictions) == len(dm.mnist_test)
Exemplo n.º 12
0
def test_train_val_loop_only(tmpdir):
    reset_seed()

    dm = TrialMNISTDataModule(tmpdir)

    model = EvalModelTemplate()
    model.validation_step = None
    model.validation_step_end = None
    model.validation_epoch_end = None

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=3,
        weights_summary=None,
    )

    # fit model
    result = trainer.fit(model, dm)
    assert result == 1
    assert trainer.logger_connector.callback_metrics['loss'] < 0.6
Exemplo n.º 13
0
def test_full_loop_single_gpu(tmpdir):
    reset_seed()

    dm = TrialMNISTDataModule(tmpdir)

    model = EvalModelTemplate()

    trainer = Trainer(default_root_dir=tmpdir,
                      max_epochs=3,
                      weights_summary=None,
                      gpus=1)

    # fit model
    result = trainer.fit(model, dm)
    assert result == 1

    # test
    result = trainer.test(datamodule=dm)
    result = result[0]
    assert result['test_acc'] > 0.8
Exemplo n.º 14
0
def test_datamodule_parameter(tmpdir):
    """ Test that the datamodule parameter works """

    # trial datamodule
    dm = TrialMNISTDataModule(tmpdir)

    hparams = EvalModelTemplate.get_default_hparams()
    model = EvalModelTemplate(**hparams)

    before_lr = hparams.get('learning_rate')
    # logger file to get meta
    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=2,
    )

    lrfinder = trainer.tuner.lr_find(model, datamodule=dm)
    after_lr = lrfinder.suggestion()
    model.learning_rate = after_lr

    assert before_lr != after_lr, \
        'Learning rate was not altered after running learning rate finder'
Exemplo n.º 15
0
def test_trainer_attached_to_dm(tmpdir):
    reset_seed()

    dm = TrialMNISTDataModule(tmpdir)

    model = EvalModelTemplate()

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=3,
        weights_summary=None,
        deterministic=True,
    )

    # fit model
    result = trainer.fit(model, dm)
    assert result == 1
    assert dm.trainer is not None

    # test
    result = trainer.test(datamodule=dm)
    result = result[0]
    assert dm.trainer is not None
Exemplo n.º 16
0
def test_full_loop_dp(tmpdir):
    reset_seed()

    dm = TrialMNISTDataModule(tmpdir)

    model = EvalModelTemplate()

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=3,
        weights_summary=None,
        distributed_backend='dp',
        gpus=2,
        deterministic=True,
    )

    # fit model
    result = trainer.fit(model, dm)
    assert result == 1

    # test
    result = trainer.test(datamodule=dm)
    result = result[0]
    assert result['test_acc'] > 0.8
Exemplo n.º 17
0
def test_train_val_loop_only(tmpdir):
    dm = TrialMNISTDataModule(tmpdir)
    dm.prepare_data()
    dm.setup()

    model = EvalModelTemplate()
    model.validation_step = None
    model.validation_step_end = None
    model.validation_epoch_end = None

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=3,
        weights_summary=None,
    )
    trainer.fit(model, dm)

    # fit model
    result = trainer.fit(model)
    assert result == 1
    assert trainer.callback_metrics['loss'] < 0.50
Exemplo n.º 18
0
def test_full_loop(tmpdir):
    dm = TrialMNISTDataModule(tmpdir)
    dm.prepare_data()
    dm.setup()

    model = EvalModelTemplate()

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=3,
        weights_summary=None,
    )
    trainer.fit(model, dm)

    # fit model
    result = trainer.fit(model)
    assert result == 1

    # test
    result = trainer.test(datamodule=dm)
    result = result[0]
    assert result['test_acc'] > 0.8
Exemplo n.º 19
0
def test_dm_add_argparse_args(tmpdir):
    parser = ArgumentParser()
    parser = TrialMNISTDataModule.add_argparse_args(parser)
    args = parser.parse_args(['--data_dir', './my_data'])
    assert args.data_dir == './my_data'
Exemplo n.º 20
0
def test_base_datamodule_with_verbose_setup(tmpdir):
    dm = TrialMNISTDataModule()
    dm.prepare_data()
    dm.setup('fit')
    dm.setup('test')
Exemplo n.º 21
0
def test_base_datamodule(tmpdir):
    dm = TrialMNISTDataModule()
    dm.prepare_data()
    dm.setup()
Exemplo n.º 22
0
def test_dm_pickle_after_setup(tmpdir):
    dm = TrialMNISTDataModule()
    dm.prepare_data()
    dm.setup()
    pickle.dumps(dm)
Exemplo n.º 23
0
def test_dm_pickle_after_init(tmpdir):
    dm = TrialMNISTDataModule()
    pickle.dumps(dm)