Exemplo n.º 1
0
def test_extended_binomial(tol):
    with set_approx_log_prob_tol(tol):
        total_count = torch.tensor([0.0, 1.0, 2.0, 10.0])
        probs = torch.tensor([0.5, 0.5, 0.4, 0.2]).requires_grad_()

        d1 = dist.Binomial(total_count, probs)
        d2 = dist.ExtendedBinomial(total_count, probs)
        # Check on good data.
        data = d1.sample((100, ))
        assert_equal(d1.log_prob(data), d2.log_prob(data))

        # Check on extended data.
        data = torch.arange(-10.0, 20.0).unsqueeze(-1)
        with pytest.raises(ValueError):
            d1.log_prob(data)
        log_prob = d2.log_prob(data)
        valid = d1.support.check(data)
        assert ((log_prob > -math.inf) == valid).all()
        check_grad(log_prob, probs)

        # Check on shape error.
        with pytest.raises(ValueError):
            d2.log_prob(torch.tensor([0.0, 0.0]))

        # Check on value error.
        with pytest.raises(ValueError):
            d2.log_prob(torch.tensor(0.5))

        # Check on negative total_count.
        total_count = torch.arange(-10, 0.0)
        probs = torch.tensor(0.5).requires_grad_()
        d = dist.ExtendedBinomial(total_count, probs)
        log_prob = d.log_prob(data)
        assert (log_prob == -math.inf).all()
        check_grad(log_prob, probs)
Exemplo n.º 2
0
def test_binomial_approx_log_prob(tol):
    logits = torch.linspace(-10.0, 10.0, 100)
    k = torch.arange(100.0).unsqueeze(-1)
    n_minus_k = torch.arange(100.0).unsqueeze(-1).unsqueeze(-1)
    n = k + n_minus_k

    expected = torch.distributions.Binomial(n, logits=logits).log_prob(k)
    with set_approx_log_prob_tol(tol):
        actual = dist.Binomial(n, logits=logits).log_prob(k)

    assert_close(actual, expected, atol=tol)
Exemplo n.º 3
0
def test_extended_beta_binomial(tol):
    with set_approx_log_prob_tol(tol):
        concentration1 = torch.tensor([0.2, 1.0, 2.0, 1.0]).requires_grad_()
        concentration0 = torch.tensor([0.2, 0.5, 1.0, 2.0]).requires_grad_()
        total_count = torch.tensor([0., 1., 2., 10.])

        d1 = dist.BetaBinomial(concentration1, concentration0, total_count)
        d2 = dist.ExtendedBetaBinomial(concentration1, concentration0, total_count)

        # Check on good data.
        data = d1.sample((100,))
        assert_equal(d1.log_prob(data), d2.log_prob(data))

        # Check on extended data.
        data = torch.arange(-10., 20.).unsqueeze(-1)
        with pytest.raises(ValueError):
            d1.log_prob(data)
        log_prob = d2.log_prob(data)
        valid = d1.support.check(data)
        assert ((log_prob > -math.inf) == valid).all()
        check_grad(log_prob, concentration1, concentration0)

        # Check on shape error.
        with pytest.raises(ValueError):
            d2.log_prob(torch.tensor([0., 0.]))

        # Check on value error.
        with pytest.raises(ValueError):
            d2.log_prob(torch.tensor(0.5))

        # Check on negative total_count.
        concentration1 = torch.tensor(1.5).requires_grad_()
        concentration0 = torch.tensor(1.5).requires_grad_()
        total_count = torch.arange(-10, 0.)
        d = dist.ExtendedBetaBinomial(concentration1, concentration0, total_count)
        log_prob = d.log_prob(data)
        assert (log_prob == -math.inf).all()
        check_grad(log_prob, concentration1, concentration0)