コード例 #1
0
def test_result_obj_on_tpu(tmpdir):
    seed_everything(1234)

    batches = 5
    epochs = 2

    model = EvalModelTemplate()
    model.training_step = model.training_step_result_obj
    model.training_step_end = None
    model.training_epoch_end = None
    model.validation_step = model.validation_step_result_obj
    model.validation_step_end = None
    model.validation_epoch_end = None
    model.test_step = model.test_step_result_obj
    model.test_step_end = None
    model.test_epoch_end = None

    trainer_options = dict(default_root_dir=tmpdir,
                           max_epochs=epochs,
                           callbacks=[EarlyStopping()],
                           log_every_n_steps=2,
                           limit_train_batches=batches,
                           weights_summary=None,
                           tpu_cores=8)

    tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
コード例 #2
0
def test_result_obj_on_tpu(tmpdir):
    seed_everything(1234)
    os.environ['PL_DEV_DEBUG'] = '1'

    batches = 5
    epochs = 2

    model = EvalModelTemplate()
    model.training_step = model.training_step_result_obj
    model.training_step_end = None
    model.training_epoch_end = None
    model.validation_step = model.validation_step_result_obj
    model.validation_step_end = None
    model.validation_epoch_end = None
    model.test_step = model.test_step_result_obj
    model.test_step_end = None
    model.test_epoch_end = None

    trainer_options = dict(default_root_dir=tmpdir,
                           max_epochs=epochs,
                           early_stop_callback=True,
                           row_log_interval=2,
                           limit_train_batches=batches,
                           weights_summary=None,
                           tpu_cores=8)

    tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
コード例 #3
0
def test_result_obj_predictions_ddp_spawn(tmpdir):
    seed_everything(4321)

    distributed_backend = 'ddp_spawn'
    option = 0

    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'

    dm = TrialMNISTDataModule(tmpdir)

    prediction_file = Path('predictions.pt')

    model = EvalModelTemplate(learning_rate=0.005)
    model.test_option = option
    model.prediction_file = prediction_file.as_posix()
    model.test_step = model.test_step_result_preds
    model.test_step_end = None
    model.test_epoch_end = None
    model.test_end = None

    prediction_files = [
        Path('predictions_rank_0.pt'),
        Path('predictions_rank_1.pt')
    ]
    for prediction_file in prediction_files:
        if prediction_file.exists():
            prediction_file.unlink()

    trainer = Trainer(default_root_dir=tmpdir,
                      max_epochs=3,
                      weights_summary=None,
                      deterministic=True,
                      distributed_backend=distributed_backend,
                      gpus=[0, 1])

    # Prediction file shouldn't exist yet because we haven't done anything
    # assert not model.prediction_file.exists()

    result = trainer.fit(model, dm)
    assert result == 1
    result = trainer.test(datamodule=dm)
    result = result[0]
    assert result['test_loss'] < 0.6
    assert result['test_acc'] > 0.8

    dm.setup('test')

    # check prediction file now exists and is of expected length
    size = 0
    for prediction_file in prediction_files:
        assert prediction_file.exists()
        predictions = torch.load(prediction_file)
        size += len(predictions)
    assert size == len(dm.mnist_test)
コード例 #4
0
def test_val_step_full_loop_result_dp(tmpdir):
    # TODO: finish the full train, val, test loop with dp
    os.environ['PL_DEV_DEBUG'] = '1'

    batches = 10
    epochs = 3

    model = EvalModelTemplate()
    model.training_step = model.training_step_full_loop_result_obj_dp
    model.training_step_end = model.training_step_end_full_loop_result_obj_dp
    model.training_epoch_end = model.training_epoch_end_full_loop_result_obj_dp
    model.validation_step = model.eval_step_full_loop_result_obj_dp
    model.validation_step_end = model.eval_step_end_full_loop_result_obj_dp
    model.validation_epoch_end = model.eval_epoch_end_full_loop_result_obj_dp
    model.test_step = model.eval_step_full_loop_result_obj_dp
    model.test_step_end = model.eval_step_end_full_loop_result_obj_dp
    model.test_epoch_end = model.eval_epoch_end_full_loop_result_obj_dp

    trainer = Trainer(
        default_root_dir=tmpdir,
        distributed_backend='dp',
        gpus=[0, 1],
        max_epochs=epochs,
        early_stop_callback=True,
        log_every_n_steps=2,
        limit_train_batches=batches,
        weights_summary=None,
    )

    trainer.fit(model)

    results = trainer.test()

    # assert we returned all metrics requested
    assert len(results) == 1
    results = results[0]
    assert 'test_epoch_end_metric' in results

    # make sure we saw all the correct keys along all paths
    seen_keys = set()
    for metric in trainer.dev_debugger.logged_metrics:
        seen_keys.update(metric.keys())

    assert 'train_step_metric' in seen_keys
    assert 'train_step_end_metric' in seen_keys
    assert 'train_epoch_end_metric_epoch' in seen_keys
    assert 'validation_step_metric_step/epoch_0' in seen_keys
    assert 'validation_step_metric_epoch' in seen_keys
    assert 'validation_step_end_metric' in seen_keys
    assert 'validation_epoch_end_metric' in seen_keys
    assert 'test_step_metric_step/epoch_2' in seen_keys
    assert 'test_step_metric_epoch' in seen_keys
    assert 'test_step_end_metric' in seen_keys
    assert 'test_epoch_end_metric' in seen_keys
コード例 #5
0
def test_full_loop_result_cpu(tmpdir):
    seed_everything(1234)
    os.environ['PL_DEV_DEBUG'] = '1'

    batches = 10
    epochs = 2

    model = EvalModelTemplate()
    model.training_step = model.training_step_result_obj
    model.training_step_end = None
    model.training_epoch_end = None
    model.validation_step = model.validation_step_result_obj
    model.validation_step_end = None
    model.validation_epoch_end = None
    model.test_step = model.test_step_result_obj
    model.test_step_end = None
    model.test_epoch_end = None

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=epochs,
        early_stop_callback=True,
        log_every_n_steps=2,
        limit_train_batches=batches,
        weights_summary=None,
    )

    trainer.fit(model)

    results = trainer.test()

    # assert we returned all metrics requested
    assert len(results) == 1
    results = results[0]
    assert results['test_loss'] < 0.3
    assert results['test_acc'] > 0.9
    assert len(results) == 2
    assert 'early_stop_on' not in results
    assert 'checkpoint_on' not in results

    results2 = trainer.test()[0]
    for k, v in results.items():
        assert results2[k] == v
コード例 #6
0
def test_result_obj_predictions(tmpdir, test_option, do_train, gpus):
    tutils.reset_seed()

    dm = TrialMNISTDataModule(tmpdir)
    prediction_file = Path(tmpdir) / 'predictions.pt'

    model = EvalModelTemplate()
    model.test_option = test_option
    model.prediction_file = prediction_file.as_posix()
    model.test_step = model.test_step_result_preds
    model.test_step_end = None
    model.test_epoch_end = None
    model.test_end = None

    if prediction_file.exists():
        prediction_file.unlink()

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=3,
        weights_summary=None,
        deterministic=True,
        gpus=gpus
    )

    # Prediction file shouldn't exist yet because we haven't done anything
    assert not prediction_file.exists()

    if do_train:
        result = trainer.fit(model, dm)
        assert result == 1
        result = trainer.test(datamodule=dm)
        result = result[0]
        assert result['test_loss'] < 0.6
        assert result['test_acc'] > 0.8
    else:
        result = trainer.test(model, datamodule=dm)

    # check prediction file now exists and is of expected length
    assert prediction_file.exists()
    predictions = torch.load(prediction_file)
    assert len(predictions) == len(dm.mnist_test)
コード例 #7
0
def test_dm_reload_dataloaders_every_epoch(tmpdir):
    """Test datamodule, where trainer argument
    reload_dataloaders_every_epoch is set to True/False"""

    dm = CustomMNISTDataModule(tmpdir)

    model = EvalModelTemplate()
    model.validation_step = None
    model.validation_step_end = None
    model.validation_epoch_end = None
    model.test_step = None
    model.test_step_end = None
    model.test_epoch_end = None

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=2,
        limit_train_batches=0.01,
        reload_dataloaders_every_epoch=True,
    )
    trainer.fit(model, dm)
コード例 #8
0
def test_train_loop_only(tmpdir):
    dm = TrialMNISTDataModule(tmpdir)

    model = EvalModelTemplate()
    model.validation_step = None
    model.validation_step_end = None
    model.validation_epoch_end = None
    model.test_step = None
    model.test_step_end = None
    model.test_epoch_end = None

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=3,
        weights_summary=None,
    )

    # fit model
    result = trainer.fit(model, dm)
    assert result == 1
    assert trainer.logger_connector.callback_metrics['loss'] < 0.6
コード例 #9
0
def test_testpass_overrides(tmpdir):
    # todo: check duplicated tests against trainer_checks
    hparams = EvalModelTemplate.get_default_hparams()

    # Misconfig when neither test_step or test_end is implemented
    with pytest.raises(MisconfigurationException, match='.*not implement `test_dataloader`.*'):
        model = EvalModelTemplate(hparams)
        model.test_dataloader = LightningModule.test_dataloader
        Trainer().test(model)

    # Misconfig when neither test_step or test_end is implemented
    with pytest.raises(MisconfigurationException):
        model = EvalModelTemplate(hparams)
        model.test_step = LightningModule.test_step
        Trainer().test(model)

    # No exceptions when one or both of test_step or test_end are implemented
    model = EvalModelTemplate(hparams)
    model.test_step_end = LightningModule.test_step_end
    Trainer().test(model)

    model = EvalModelTemplate(hparams)
    Trainer().test(model)