Exemple #1
0
def test_zip_1_gate(rate):
    # if gate is 1 ZIP is Delta(0)
    zip_ = ZeroInflatedPoisson(torch.ones(1), torch.tensor(rate))
    delta = Delta(torch.zeros(1))
    s = torch.tensor([0., 1.])
    zip_prob = zip_.log_prob(s)
    delta_prob = delta.log_prob(s)
    assert_tensors_equal(zip_prob, delta_prob)
Exemple #2
0
def test_zip_0_gate(rate):
    # if gate is 0 ZIP is Poisson
    zip_ = ZeroInflatedPoisson(torch.zeros(1), torch.tensor(rate))
    pois = Poisson(torch.tensor(rate))
    s = pois.sample((20, ))
    zip_prob = zip_.log_prob(s)
    pois_prob = pois.log_prob(s)
    assert_tensors_equal(zip_prob, pois_prob)
Exemple #3
0
def test_zip_mean_variance(gate, rate):
    num_samples = 1000000
    zip_ = ZeroInflatedPoisson(torch.tensor(gate), torch.tensor(rate))
    s = zip_.sample((num_samples, ))
    expected_mean = zip_.mean
    estimated_mean = s.mean()
    expected_std = zip_.stddev
    estimated_std = s.std()
    assert_tensors_equal(expected_mean, estimated_mean, prec=1e-02)
    assert_tensors_equal(expected_std, estimated_std, prec=1e-02)
Exemple #4
0
def test_log_prob_d2(eta):
    dist = LKJCorrCholesky(2, torch.tensor([eta]))
    test_dist = TransformedDistribution(Beta(eta, eta),
                                        AffineTransform(loc=-1., scale=2.0))

    samples = dist.sample(torch.Size([100]))
    lp = dist.log_prob(samples)
    x = samples[..., 1, 0]
    tst = test_dist.log_prob(x)

    assert_tensors_equal(lp, tst, prec=1e-6)
Exemple #5
0
def test_log_prob_d2(concentration):
    dist = LKJCholesky(2, torch.tensor([concentration]))
    test_dist = TransformedDistribution(Beta(concentration, concentration),
                                        AffineTransform(loc=-1., scale=2.0))

    samples = dist.sample(torch.Size([100]))
    lp = dist.log_prob(samples)
    x = samples[..., 1, 0]
    tst = test_dist.log_prob(x)
    # LKJ prevents inf values in log_prob
    lp[tst == math.inf] = math.inf  # substitute inf for comparison
    assert_tensors_equal(lp, tst, prec=1e-3)
Exemple #6
0
def test_corr_cholesky_transform(x_shape, mapping):
    transform = mapping(constraints.corr_cholesky)
    x = torch.randn(x_shape, requires_grad=True).clamp(-2, 2)
    y = transform(x)

    # test codomain
    assert (transform.codomain.check(y) == 1).all()

    # test inv
    z = transform.inv(y)
    assert_tensors_equal(x, z, prec=1e-4)

    # test domain
    assert (transform.domain.check(z) == 1).all()

    # test log_abs_det_jacobian
    log_det = transform.log_abs_det_jacobian(x, y)
    assert log_det.shape == x_shape[:-1]
Exemple #7
0
def test_unconstrained_to_corr_cholesky_transform(y_shape):
    transform = transforms.CorrLCholeskyTransform()
    y = torch.empty(y_shape).uniform_(-4, 4).requires_grad_()
    x = transform(y)

    # test codomain
    assert (transform.codomain.check(x) == 1).all()

    # test inv
    y_prime = transform.inv(x)
    assert_tensors_equal(y, y_prime, prec=1e-4)

    # test domain
    assert (transform.domain.check(y_prime) == 1).all()

    # test log_abs_det_jacobian
    log_det = transform.log_abs_det_jacobian(y, x)
    assert log_det.shape == y_shape[:-1]
    if len(y_shape) == 1:
        triu_index = x.new_ones(x.shape).triu(diagonal=1).to(torch.bool)
        x_tril_vector = x.t()[triu_index]
        assert_tensors_equal(_autograd_log_det(x_tril_vector, y),
                             log_det,
                             prec=1e-4)

        x_tril_vector = x_tril_vector.detach().requires_grad_()
        x = x.new_zeros(x.shape)
        x[triu_index] = x_tril_vector
        x = x.t()
        z = transform.inv(x)
        assert_tensors_equal(_autograd_log_det(z, x_tril_vector),
                             -log_det,
                             prec=1e-4)