Esempio n. 1
0
def _test_distrib_integration(device, tol=1e-6):
    import numpy as np
    from ignite.engine import Engine

    rank = idist.get_rank()
    n_iters = 100
    s = 10
    offset = n_iters * s

    y_true = torch.arange(0,
                          offset * idist.get_world_size(),
                          dtype=torch.float).to(device)
    y_preds = torch.ones(offset * idist.get_world_size(),
                         dtype=torch.float).to(device)

    def update(engine, i):
        return (
            y_preds[i * s + offset * rank:(i + 1) * s + offset * rank],
            y_true[i * s + offset * rank:(i + 1) * s + offset * rank],
        )

    engine = Engine(update)

    m = MeanSquaredError()
    m.attach(engine, "mse")

    data = list(range(n_iters))
    engine.run(data=data, max_epochs=1)

    assert "mse" in engine.state.metrics
    res = engine.state.metrics["mse"]

    true_res = np.mean(np.power((y_true - y_preds).cpu().numpy(), 2.0))

    assert pytest.approx(res, rel=tol) == true_res
Esempio n. 2
0
def test_ignite_evaluator_reporting_metrics():
    try:
        from ignite.metrics import MeanSquaredError
    except ImportError:
        pytest.skip('pytorch-ignite is not installed')

    # This tests verifies that either, usuer manually reported metrics
    # and ignite calculated ones are correctly reflected in the reporter
    # observation
    model = IgniteDummyModel()
    n_data = 10
    x = torch.randn((n_data, 2), requires_grad=True)
    y = torch.randn((n_data, 2))
    dataset = torch.utils.data.TensorDataset(x, y)
    loader = torch.utils.data.DataLoader(dataset, batch_size=3)
    evaluator = create_dummy_evaluator(model)
    # Attach metrics to the evaluator
    metric = MeanSquaredError()
    metric.attach(evaluator, 'mse')
    evaluator_ignite_ext = ppe.training.extensions.IgniteEvaluator(
        evaluator, loader, model, progress_bar=False)
    reporter = ppe.reporting.Reporter()
    with reporter:
        result = evaluator_ignite_ext()
    # Internally reported metrics
    assert result['main/x'] == 1.5
    # Ignite calculated metric
    assert result['val/mse'] == 0.0
Esempio n. 3
0
    def _test(metric_device):
        engine = Engine(update)

        m = MeanSquaredError(device=metric_device)
        m.attach(engine, "mse")

        data = list(range(n_iters))
        engine.run(data=data, max_epochs=1)

        assert "mse" in engine.state.metrics
        res = engine.state.metrics["mse"]

        true_res = np.mean(np.power((y_true - y_preds).cpu().numpy(), 2.0))

        assert pytest.approx(res, rel=tol) == true_res