Example #1
0
def test_wrong_input_shapes():
    m = FractionalBias()

    with pytest.raises(
            ValueError,
            match=r"Input data shapes should be the same, but given"):
        m.update((torch.rand(4, 1, 2), torch.rand(4, 1)))

    with pytest.raises(
            ValueError,
            match=r"Input data shapes should be the same, but given"):
        m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))

    with pytest.raises(
            ValueError,
            match=r"Input data shapes should be the same, but given"):
        m.update((
            torch.rand(4, 1, 2),
            torch.rand(4, ),
        ))

    with pytest.raises(
            ValueError,
            match=r"Input data shapes should be the same, but given"):
        m.update((
            torch.rand(4, ),
            torch.rand(4, 1, 2),
        ))
Example #2
0
def test_zero_sample():
    m = FractionalBias()
    with pytest.raises(
            NotComputableError,
            match=
            r"FractionalBias must have at least one example before it can be computed"
    ):
        m.compute()
Example #3
0
def test_wrong_input_shapes():
    m = FractionalBias()

    with pytest.raises(ValueError):
        m.update((torch.rand(4, 1, 2), torch.rand(4, 1)))

    with pytest.raises(ValueError):
        m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))

    with pytest.raises(ValueError):
        m.update((torch.rand(4, 1, 2), torch.rand(4,)))

    with pytest.raises(ValueError):
        m.update((torch.rand(4,), torch.rand(4, 1, 2)))
Example #4
0
    def _test(n_epochs, metric_device):
        metric_device = torch.device(metric_device)
        n_iters = 80
        s = 16
        n_classes = 2

        offset = n_iters * s
        y_true = torch.rand(size=(offset * idist.get_world_size(), ),
                            dtype=torch.double).to(device)
        y_preds = torch.rand(size=(offset * idist.get_world_size(), ),
                             dtype=torch.double).to(device)

        def update(engine, i):
            return (
                y_preds[i * s + rank * offset:(i + 1) * s + rank * offset],
                y_true[i * s + rank * offset:(i + 1) * s + rank * offset],
            )

        engine = Engine(update)

        m = FractionalBias(device=metric_device)
        m.attach(engine, "fb")

        data = list(range(n_iters))
        engine.run(data=data, max_epochs=n_epochs)

        assert "fb" in engine.state.metrics

        res = engine.state.metrics["fb"]
        if isinstance(res, torch.Tensor):
            res = res.cpu().numpy()

        np_y_true = y_true.cpu().numpy()
        np_y_preds = y_preds.cpu().numpy()

        np_sum = (2 * (np_y_true - np_y_preds) /
                  (np_y_preds + np_y_true + 1e-30)).sum()
        np_len = len(y_preds)
        np_ans = np_sum / np_len

        assert pytest.approx(res, rel=tol) == np_ans
Example #5
0
    def _test(y_pred, y, batch_size):
        def update_fn(engine, batch):
            idx = (engine.state.iteration - 1) * batch_size
            y_true_batch = np_y[idx : idx + batch_size]
            y_pred_batch = np_y_pred[idx : idx + batch_size]
            return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)

        engine = Engine(update_fn)

        m = FractionalBias()
        m.attach(engine, "fb")

        np_y = y.double().numpy().ravel()
        np_y_pred = y_pred.double().numpy().ravel()

        data = list(range(y_pred.shape[0] // batch_size))
        fb = engine.run(data, max_epochs=1).metrics["fb"]

        np_sum = (2 * (np_y - np_y_pred) / (np_y_pred + np_y)).sum()
        np_len = len(y_pred)
        np_ans = np_sum / np_len

        assert np_ans == pytest.approx(fb)
Example #6
0
    def _test(metric_device):
        metric_device = torch.device(metric_device)
        m = FractionalBias(device=metric_device)
        torch.manual_seed(10 + rank)

        y_pred = torch.randint(0, 10, size=(10,), device=device).float()
        y = torch.randint(0, 10, size=(10,), device=device).float()

        m.update((y_pred, y))

        # gather y_pred, y
        y_pred = idist.all_gather(y_pred)
        y = idist.all_gather(y)

        np_y_pred = y_pred.cpu().numpy()
        np_y = y.cpu().numpy()

        res = m.compute()

        np_sum = (2 * (np_y - np_y_pred) / (np_y_pred + np_y + 1e-30)).sum()
        np_len = len(y_pred)
        np_ans = np_sum / np_len

        assert np_ans == pytest.approx(res, rel=tol)
Example #7
0
def test_fractional_bias():
    a = np.random.randn(4)
    b = np.random.randn(4)
    c = np.random.randn(4)
    d = np.random.randn(4)
    ground_truth = np.random.randn(4)

    m = FractionalBias()

    m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
    np_sum = (2 * (ground_truth - a) / (a + ground_truth)).sum()
    np_len = len(a)
    np_ans = np_sum / np_len
    assert m.compute() == pytest.approx(np_ans)

    m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
    np_sum += (2 * (ground_truth - b) / (b + ground_truth)).sum()
    np_len += len(b)
    np_ans = np_sum / np_len
    assert m.compute() == pytest.approx(np_ans)

    m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
    np_sum += (2 * (ground_truth - c) / (c + ground_truth)).sum()
    np_len += len(c)
    np_ans = np_sum / np_len
    assert m.compute() == pytest.approx(np_ans)

    m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
    np_sum += (2 * (ground_truth - d) / (d + ground_truth)).sum()
    np_len += len(d)
    np_ans = np_sum / np_len
    assert m.compute() == pytest.approx(np_ans)
Example #8
0
def test_error_is_not_nan():
    m = FractionalBias()
    m.update((torch.zeros(4), torch.zeros(4)))
    assert not (torch.isnan(m._sum_of_errors).any() or torch.isinf(m._sum_of_errors).any()), m._sum_of_errors
Example #9
0
def test_zero_div():
    m = FractionalBias()
    with pytest.raises(NotComputableError):
        m.compute()