コード例 #1
0
def test_validation_step_arbitrary_dict_return(tmpdir):
    """
    Test that val step can return an arbitrary dict
    """
    model = DeterministicModel()
    model.training_step = model.training_step__dict_return
    model.validation_step = model.validation_step__dummy_dict_return
    model.validation_step_end = None
    model.validation_epoch_end = None

    trainer = Trainer(
        default_root_dir=tmpdir,
        weights_summary=None,
        limit_train_batches=2,
        limit_val_batches=2,
        max_epochs=2,
    )
    trainer.fit(model)

    # out are the results of the full loop
    # eval_results are output of _evaluate
    callback_metrics, eval_results = trainer.run_evaluation()
    assert len(callback_metrics) == 1
    assert len(eval_results) == 2
    assert eval_results[0]['some'] == 171
    assert eval_results[1]['some'] == 171

    assert eval_results[0]['value'] == 'a'
    assert eval_results[1]['value'] == 'a'

    # make sure correct steps were called
    assert model.validation_step_called
    assert not model.validation_step_end_called
    assert not model.validation_epoch_end_called
コード例 #2
0
def training_step_with_step_end(tmpdir):
    """
    Checks train_step + training_step_end
    """
    model = DeterministicModel()
    model.training_step = model.training_step__dict_return
    model.training_step_end = model.training_step_end__dict
    model.val_dataloader = None

    trainer = Trainer(
        default_root_dir=tmpdir,
        fast_dev_run=True,
        weights_summary=None,
    )
    trainer.fit(model)

    # make sure correct steps were called
    assert model.training_step_called
    assert model.training_step_end_called
    assert not model.training_epoch_end_called

    # make sure training outputs what is expected
    for batch_idx, batch in enumerate(model.train_dataloader()):
        break

    out = trainer.train_loop.run_training_batch(batch, batch_idx, 0)
    assert out.signal == 0
    assert trainer.logger_connector.logged_metrics['log_acc1'] == 14.0
    assert trainer.logger_connector.logged_metrics['log_acc2'] == 9.0

    train_step_end_out = out.training_step_output_for_epoch_end
    pbar_metrics = train_step_end_out['progress_bar']
    assert 'train_step_end' in train_step_end_out
    assert pbar_metrics['pbar_acc1'] == 19.0
    assert pbar_metrics['pbar_acc2'] == 21.0
コード例 #3
0
def test_validation_step_scalar_return(tmpdir):
    """
    Test that val step can return a scalar
    """
    model = DeterministicModel()
    model.training_step = model.training_step__dict_return
    model.validation_step = model.validation_step__scalar_return
    model.validation_step_end = None
    model.validation_epoch_end = None

    trainer = Trainer(
        default_root_dir=tmpdir,
        weights_summary=None,
        limit_train_batches=2,
        limit_val_batches=2,
        max_epochs=2,
    )
    trainer.fit(model)

    # out are the results of the full loop
    # eval_results are output of _evaluate
    out, eval_results = trainer.run_evaluation()
    assert len(out) == 1
    assert len(eval_results) == 2
    assert eval_results[0] == 171 and eval_results[1] == 171

    # make sure correct steps were called
    assert model.validation_step_called
    assert not model.validation_step_end_called
    assert not model.validation_epoch_end_called
コード例 #4
0
def test_val_step_step_end_no_return(tmpdir):
    """
    Test that val step + val step end work (with no return in val step end)
    """

    model = DeterministicModel()
    model.training_step = model.training_step__dict_return
    model.validation_step = model.validation_step__dict_return
    model.validation_step_end = model.validation_step_end__no_return
    model.validation_epoch_end = None

    trainer = Trainer(
        default_root_dir=tmpdir,
        weights_summary=None,
        limit_train_batches=2,
        limit_val_batches=2,
        max_epochs=2,
    )
    trainer.fit(model)

    # out are the results of the full loop
    # eval_results are output of _evaluate
    callback_metrics, eval_results = trainer.run_evaluation()
    assert len(callback_metrics) == 1
    assert len(eval_results) == 0

    # make sure correct steps were called
    assert model.validation_step_called
    assert model.validation_step_end_called
    assert not model.validation_epoch_end_called
コード例 #5
0
def training_step_scalar_with_step_end(tmpdir):
    """
    Checks train_step with scalar only + training_step_end
    """
    model = DeterministicModel()
    model.training_step = model.training_step__scalar_return
    model.training_step_end = model.training_step_end__scalar
    model.val_dataloader = None

    trainer = Trainer(fast_dev_run=True, weights_summary=None)
    trainer.fit(model)

    # make sure correct steps were called
    assert model.training_step_called
    assert model.training_step_end_called
    assert not model.training_epoch_end_called

    # make sure training outputs what is expected
    for batch_idx, batch in enumerate(model.train_dataloader()):
        break

    out = trainer.train_loop.run_training_batch(batch, batch_idx, 0)
    assert out.signal == 0
    assert len(out.grad_norm_dic) == 0 and isinstance(out.grad_norm_dic, dict)

    train_step_out = out.training_step_output_for_epoch_end
    assert len(train_step_out) == 1
    train_step_out = train_step_out[0][0]
    assert isinstance(train_step_out, torch.Tensor)
    assert train_step_out.item() == 171

    # make sure the optimizer closure returns the correct things
    opt_closure_result = trainer.train_loop.training_step_and_backward(
        batch, batch_idx, 0, trainer.optimizers[0], trainer.hiddens)
    assert opt_closure_result['loss'].item() == 171
コード例 #6
0
def test_full_training_loop_scalar(tmpdir):
    """
    Checks train_step + training_step_end + training_epoch_end
    (all with scalar return from train_step)
    """

    model = DeterministicModel()
    model.training_step = model.training_step__scalar_return
    model.training_step_end = model.training_step_end__scalar
    model.training_epoch_end = model.training_epoch_end__scalar
    model.val_dataloader = None

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=1,
        weights_summary=None,
    )
    trainer.fit(model)

    # make sure correct steps were called
    assert model.training_step_called
    assert model.training_step_end_called
    assert model.training_epoch_end_called

    # assert epoch end metrics were added
    assert len(trainer.logger_connector.callback_metrics) == 0
    assert len(trainer.logger_connector.progress_bar_metrics) == 0

    # make sure training outputs what is expected
    for batch_idx, batch in enumerate(model.train_dataloader()):
        break

    out = trainer.train_loop.run_training_batch(batch, batch_idx, 0)
    assert out.signal == 0
    assert len(out.grad_norm_dic) == 0 and isinstance(out.grad_norm_dic, dict)

    train_step_out = out.training_step_output_for_epoch_end
    assert len(train_step_out) == 1
    train_step_out = train_step_out[0][0]
    assert isinstance(train_step_out['minimize'], torch.Tensor)
    assert train_step_out['minimize'].item() == 171

    # make sure the optimizer closure returns the correct things
    opt_closure_result = trainer.train_loop.training_step_and_backward(
        batch, batch_idx, 0, trainer.optimizers[0], trainer.hiddens
    )
    assert opt_closure_result['loss'].item() == 171
コード例 #7
0
def test_training_step_dict(tmpdir):
    """
    Tests that only training_step can be used
    """
    model = DeterministicModel()
    model.training_step = model.training_step__dict_return
    model.val_dataloader = None

    trainer = Trainer(
        default_root_dir=tmpdir,
        fast_dev_run=True,
        weights_summary=None,
    )
    trainer.fit(model)

    # make sure correct steps were called
    assert model.training_step_called
    assert not model.training_step_end_called
    assert not model.training_epoch_end_called

    # make sure training outputs what is expected
    for batch_idx, batch in enumerate(model.train_dataloader()):
        break

    out = trainer.train_loop.run_training_batch(batch, batch_idx, 0)

    assert out.signal == 0
    assert trainer.logger_connector.logged_metrics['log_acc1'] == 12.0
    assert trainer.logger_connector.logged_metrics['log_acc2'] == 7.0

    train_step_out = out.training_step_output_for_epoch_end
    assert len(train_step_out) == 1

    train_step_out = train_step_out[0][0]
    pbar_metrics = train_step_out['progress_bar']
    assert 'log' in train_step_out
    assert 'progress_bar' in train_step_out
    assert train_step_out['train_step_test'] == 549
    assert pbar_metrics['pbar_acc1'] == 17.0
    assert pbar_metrics['pbar_acc2'] == 19.0

    # make sure the optimizer closure returns the correct things
    opt_closure_result = trainer.train_loop.training_step_and_backward(
        batch, batch_idx, 0, trainer.optimizers[0], trainer.hiddens
    )
    assert opt_closure_result['loss'] == (42.0 * 3) + (15.0 * 3)
コード例 #8
0
def test_train_step_epoch_end(tmpdir):
    """
    Checks train_step + training_epoch_end (NO training_step_end)
    """
    model = DeterministicModel()
    model.training_step = model.training_step__dict_return
    model.training_step_end = None
    model.training_epoch_end = model.training_epoch_end__dict
    model.val_dataloader = None

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=1,
        weights_summary=None,
    )
    trainer.fit(model)

    # make sure correct steps were called
    assert model.training_step_called
    assert not model.training_step_end_called
    assert model.training_epoch_end_called

    # assert epoch end metrics were added
    assert trainer.logger_connector.callback_metrics['epoch_end_log_1'] == 178
    assert trainer.logger_connector.progress_bar_metrics[
        'epoch_end_pbar_1'] == 234

    # make sure training outputs what is expected
    batch_idx, batch = 0, next(iter(model.train_dataloader()))

    out = trainer.train_loop.run_training_batch(batch, batch_idx, 0)
    assert out.signal == 0
    assert trainer.logger_connector.logged_metrics['log_acc1'] == 12.0
    assert trainer.logger_connector.logged_metrics['log_acc2'] == 7.0

    # outputs are for 1 optimizer and no tbptt
    train_step_end_out = out.training_step_output_for_epoch_end
    assert len(train_step_end_out) == 1
    train_step_end_out = train_step_end_out[0][0]

    pbar_metrics = train_step_end_out['progress_bar']
    assert pbar_metrics['pbar_acc1'] == 17.0
    assert pbar_metrics['pbar_acc2'] == 19.0
コード例 #9
0
def test_result_obj_lr_scheduler_step(tmpdir):
    """
    test that the LR scheduler was called at the correct time with the correct metrics
    """
    model = DeterministicModel()
    model.training_step = model.training_step__for_step_end_dict
    model.training_step_end = model.training_step_end__dict
    model.training_epoch_end = model.training_epoch_end__dict
    model.val_dataloader = None
    model.configure_optimizers = model.configure_optimizers__lr_on_plateau_step

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=2,
        weights_summary=None,
    )
    trainer.fit(model)

    assert len(trainer.dev_debugger.saved_lr_scheduler_updates) == 8
コード例 #10
0
def test_val_step_step_end(tmpdir):
    """
    Test that val step + val step end work
    """

    model = DeterministicModel()
    model.training_step = model.training_step__dict_return
    model.validation_step = model.validation_step__dict_return
    model.validation_step_end = model.validation_step_end
    model.validation_epoch_end = None

    trainer = Trainer(
        default_root_dir=tmpdir,
        weights_summary=None,
        limit_train_batches=2,
        limit_val_batches=2,
        max_epochs=2,
    )
    trainer.fit(model)

    # out are the results of the full loop
    # eval_results are output of _evaluate
    callback_metrics, eval_results = trainer.run_evaluation()
    assert len(callback_metrics) == 1
    assert len(callback_metrics[0]) == 6

    callback_metrics = callback_metrics[0]
    assert callback_metrics['val_step_end'] == 1802
    assert len(eval_results) == 2
    assert eval_results[0]['log']['log_acc1'] == 12
    assert eval_results[1]['log']['log_acc1'] == 13

    for k in ['val_loss', 'log', 'progress_bar']:
        assert k in eval_results[0]
        assert k in eval_results[1]

    # ensure all the keys ended up as candidates for callbacks
    assert len(trainer.logger_connector.callback_metrics) in [8, 9]

    # make sure correct steps were called
    assert model.validation_step_called
    assert model.validation_step_end_called
    assert not model.validation_epoch_end_called