Пример #1
0
def test_arange_simple():
    t = torch.randn(3, 4, 5)
    f = Tensor(t)["i", "j"]
    assert_close(f, f(i=arange("i", 3)))
    assert_close(f, f(j=arange("j", 4)))
    assert_close(f, f(i=arange("i", 3), j=arange("j", 4)))
    assert_close(f, f(i=arange("i", 3), j="j"))
    assert_close(f, f(i="i", j=arange("j", 4)))
Пример #2
0
def test_function_of_torch_tensor():
    x = torch.randn(4, 3)
    y = torch.randn(3, 2)
    f = funsor.torch.function(reals(4, 3), reals(3, 2), reals(4,
                                                              2))(torch.matmul)
    actual = f(x, y)
    expected = f(Tensor(x), Tensor(y))
    assert_close(actual, expected)
Пример #3
0
def test_subs_lambda():
    z = Variable('z', reals())
    i = Variable('i', bint(5))
    ix = random_tensor(OrderedDict([('i', bint(5))]), reals())
    actual = Lambda(i, z)(z=ix)
    expected = Lambda(i(i='j'), z(z=ix))
    check_funsor(actual, expected.inputs, expected.output)
    assert_close(actual, expected)
Пример #4
0
def test_dist_to_funsor_categorical(batch_shape, cardinality):
    logits = torch.randn(batch_shape + (cardinality, ))
    logits -= logits.logsumexp(dim=-1, keepdim=True)
    d = dist.Categorical(logits=logits)
    f = dist_to_funsor(d)
    assert isinstance(f, Tensor)
    expected = tensor_to_funsor(logits, ("value", ))
    assert_close(f, expected)
Пример #5
0
def _check_mvn_affine(d1, data):
    backend_module = import_module(BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()])
    assert isinstance(d1, backend_module.MultivariateNormal)
    d2 = reinterpret(d1)
    assert issubclass(type(d2), GaussianMixture)
    actual = d2(**data)
    expected = d1(**data)
    assert_close(actual, expected)
Пример #6
0
def test_subs_reduce():
    x = random_tensor(OrderedDict([('i', bint(3)), ('j', bint(2))]), reals())
    ix = random_tensor(OrderedDict([('i', bint(3))]), bint(2))
    ix2 = ix(i='i2')
    with interpretation(reflect):
        actual = x.reduce(ops.add, frozenset({"i"}))
    actual = actual(j=ix)
    expected = x(j=ix2).reduce(ops.add, frozenset({"i"}))(i2='i')
    assert_close(actual, expected)
Пример #7
0
def test_reduce_add(inputs):
    int_inputs = OrderedDict((k, d) for k, d in inputs.items() if d.dtype != 'real')
    x = random_gaussian(inputs) + random_tensor(int_inputs)
    assert isinstance(x, Joint)
    actual = x.reduce(ops.add, 'i')

    xs = [x(i=i) for i in range(x.inputs['i'].dtype)]
    expected = reduce(ops.add, xs)
    assert_close(actual, expected, atol=1e-3, rtol=1e-4)
Пример #8
0
def test_einsum(equation):
    sizes = dict(a=2, b=3, c=4)
    inputs, outputs = equation.split('->')
    inputs = inputs.split(',')
    tensors = [randn(tuple(sizes[d] for d in dims)) for dims in inputs]
    funsors = [Tensor(x) for x in tensors]
    expected = Tensor(ops.einsum(equation, *tensors))
    actual = Einsum(equation, tuple(funsors))
    assert_close(actual, expected, atol=1e-5, rtol=None)
Пример #9
0
def test_align(int_inputs, real_inputs):
    inputs1 = OrderedDict(
        list(sorted(int_inputs.items())) + list(sorted(real_inputs.items())))
    inputs2 = OrderedDict(reversed(inputs1.items()))
    g1 = random_gaussian(inputs1)
    g2 = g1.align(tuple(inputs2))
    assert g2.inputs == inputs2
    g3 = g2.align(tuple(inputs1))
    assert_close(g3, g1)
Пример #10
0
def test_cholesky_inverse(batch_shape, size, requires_grad):
    x = torch.randn(batch_shape + (size, size))
    x = x.transpose(-1, -2).matmul(x)
    u = x.cholesky()
    if requires_grad:
        u.requires_grad_()
    assert_close(cholesky_inverse(u), naive_cholesky_inverse(u))
    if requires_grad:
        cholesky_inverse(u).sum().backward()
Пример #11
0
def _assert_conjugate_density_ok(latent, conditional, obs, lazy_latent=None,
                                 num_samples=10000, prec=1e-2):
    sample_inputs = OrderedDict(n=Bint[num_samples])
    lazy_latent = lazy_latent if lazy_latent is not None else latent
    rng_key = None if get_backend() == "torch" else np.array([0, 0], dtype=np.uint32)
    latent_samples = lazy_latent.sample(frozenset(["prior"]), sample_inputs, rng_key=rng_key)
    expected = Integrate(latent_samples, conditional(value=obs).exp(), frozenset(['prior']))
    expected = expected.reduce(ops.add, frozenset(sample_inputs))
    actual = (latent + conditional).reduce(ops.logaddexp, set(["prior"]))(value=obs).exp()
    assert_close(actual, expected, atol=prec, rtol=None)
Пример #12
0
def test_quote(output_shape, inputs):
    if get_backend() == "torch":
        import torch  # noqa: F401

    sizes = {'a': 4, 'b': 5, 'c': 6}
    inputs = OrderedDict((k, bint(sizes[k])) for k in inputs)
    x = random_tensor(inputs, reals(*output_shape))
    s = funsor.quote(x)
    assert isinstance(s, str)
    assert_close(eval(s), x)
Пример #13
0
def test_dist_to_funsor_bernoulli(batch_shape):
    logits = torch.randn(batch_shape)
    d = dist.Bernoulli(logits=logits)
    f = dist_to_funsor(d)
    assert isinstance(f, Funsor)

    value = d.sample()
    actual_log_prob = f(value=tensor_to_funsor(value))
    expected_log_prob = tensor_to_funsor(d.log_prob(value))
    assert_close(actual_log_prob, expected_log_prob)
Пример #14
0
def test_reduce_logsumexp(int_inputs, real_inputs):
    int_inputs = OrderedDict(sorted(int_inputs.items()))
    real_inputs = OrderedDict(sorted(real_inputs.items()))
    inputs = int_inputs.copy()
    inputs.update(real_inputs)

    g = random_gaussian(inputs)
    g_xy = g.reduce(ops.logaddexp, frozenset(['x', 'y']))
    assert_close(g_xy, g.reduce(ops.logaddexp, 'x').reduce(ops.logaddexp, 'y'), atol=1e-3, rtol=None)
    assert_close(g_xy, g.reduce(ops.logaddexp, 'y').reduce(ops.logaddexp, 'x'), atol=1e-3, rtol=None)
Пример #15
0
def test_cholesky_solve(batch_shape, size):
    b = torch.randn(batch_shape + (size, 5))
    x = torch.randn(batch_shape + (size, size))
    x = x.transpose(-1, -2).matmul(x)
    u = x.cholesky()
    expected = cholesky_solve(b, u)
    assert not expected.requires_grad
    actual = cholesky_solve(b.requires_grad_(), u.requires_grad_())
    assert actual.requires_grad
    assert_close(expected, actual)
Пример #16
0
def test_diagonal_rename():
    x = Tensor(
        randn(2, 2, 3),
        OrderedDict(a=funsor.Bint[2], b=funsor.Bint[2], c=funsor.Bint[3]),
        'real')
    d = Variable("d", funsor.Bint[2])
    dt = x.materialize(d)
    yt = x(a=dt, b=dt)
    y = x(a=d, b=d)
    assert_close(y, yt)
Пример #17
0
def test_detach():
    import torch
    try:
        from pyro.distributions.util import detach
    except ImportError:
        pytest.skip("detach() is not available")
    x = Tensor(torch.randn(2, 3, requires_grad=True))
    y = detach(x)
    assert_close(x, y)
    assert x.data.requires_grad
    assert not y.data.requires_grad
Пример #18
0
def test_reduce_logaddexp_gaussian_lazy():
    a = random_gaussian(OrderedDict(i=bint(3), a=reals(2)))
    b = random_tensor(OrderedDict(i=bint(3), b=bint(2)))
    x = a + b
    assert isinstance(x, Contraction)
    assert set(x.inputs) == {'a', 'b', 'i'}

    y = x.reduce(ops.logaddexp, 'i')
    # assert isinstance(y, Reduce)
    assert set(y.inputs) == {'a', 'b'}
    assert_close(x.reduce(ops.logaddexp), y.reduce(ops.logaddexp))
Пример #19
0
def test_dist_to_funsor_normal(batch_shape):
    loc = torch.randn(batch_shape)
    scale = torch.randn(batch_shape).exp()
    d = dist.Normal(loc, scale)
    f = dist_to_funsor(d)
    assert isinstance(f, Funsor)

    value = d.sample()
    actual_log_prob = f(value=tensor_to_funsor(value))
    expected_log_prob = tensor_to_funsor(d.log_prob(value))
    assert_close(actual_log_prob, expected_log_prob, rtol=1e-5)
Пример #20
0
def test_reduce_logaddexp_deltas_lazy():
    a = Delta('a', Tensor(torch.randn(3, 2), OrderedDict(i=bint(3))))
    b = Delta('b', Tensor(torch.randn(3), OrderedDict(i=bint(3))))
    x = a + b
    assert isinstance(x, Delta)
    assert set(x.inputs) == {'a', 'b', 'i'}

    y = x.reduce(ops.logaddexp, 'i')
    # assert isinstance(y, Reduce)
    assert set(y.inputs) == {'a', 'b'}
    assert_close(x.reduce(ops.logaddexp), y.reduce(ops.logaddexp))
Пример #21
0
def check_expand(old_dist, old_data):
    new_batch_shape = (2, ) + old_dist.batch_shape
    new_dist = old_dist.expand(new_batch_shape)
    assert new_dist.batch_shape == new_batch_shape

    old_log_prob = new_dist.log_prob(old_data)
    assert old_log_prob.shape == new_batch_shape

    new_data = old_data.expand(new_batch_shape + new_dist.event_shape)
    new_log_prob = new_dist.log_prob(new_data)
    assert_close(old_log_prob, new_log_prob)
    assert new_dist.log_prob(new_data).shape == new_batch_shape
Пример #22
0
def test_dist_to_funsor_independent(batch_shape, event_shape):
    loc = torch.randn(batch_shape + event_shape)
    scale = torch.randn(batch_shape + event_shape).exp()
    d = dist.Normal(loc, scale).to_event(len(event_shape))
    f = dist_to_funsor(d)
    assert isinstance(f, Funsor)

    value = d.sample()
    funsor_value = tensor_to_funsor(value, event_output=len(event_shape))
    actual_log_prob = f(value=funsor_value)
    expected_log_prob = tensor_to_funsor(d.log_prob(value))
    assert_close(actual_log_prob, expected_log_prob, rtol=1e-5)
Пример #23
0
def test_reduce_logaddexp_deltas_discrete_lazy():
    a = Delta('a', Tensor(randn(3, 2), OrderedDict(i=bint(3))))
    b = Delta('b', Tensor(randn(3), OrderedDict(i=bint(3))))
    c = Tensor(randn(3), OrderedDict(i=bint(3)))
    x = a + b + c
    assert isinstance(x, Contraction)
    assert set(x.inputs) == {'a', 'b', 'i'}

    y = x.reduce(ops.logaddexp, 'i')
    # assert isinstance(y, Reduce)
    assert set(y.inputs) == {'a', 'b'}
    assert_close(x.reduce(ops.logaddexp), y.reduce(ops.logaddexp))
Пример #24
0
def test_block_vector():
    shape = (10, )
    expected = zeros(shape)
    actual = BlockVector(shape)

    expected[1] = randn(())
    actual[1] = expected[1]

    expected[3:5] = randn((2, ))
    actual[3:5] = expected[3:5]

    assert_close(actual.as_tensor(), expected)
Пример #25
0
def test_reshape(batch_shape, old_shape, new_shape):
    inputs = OrderedDict(zip("abc", map(bint, batch_shape)))
    old = random_tensor(inputs, reals(*old_shape))
    assert old.reshape(old.shape) is old

    new = old.reshape(new_shape)
    assert new.inputs == inputs
    assert new.shape == new_shape
    assert new.dtype == old.dtype

    old2 = new.reshape(old_shape)
    assert_close(old2, old)
Пример #26
0
def test_block_vector_batched(batch_shape):
    shape = batch_shape + (10, )
    expected = zeros(shape)
    actual = BlockVector(shape)

    expected[..., 1] = randn(batch_shape)
    actual[..., 1] = expected[..., 1]

    expected[..., 3:5] = randn(batch_shape + (2, ))
    actual[..., 3:5] = expected[..., 3:5]

    assert_close(actual.as_tensor(), expected)
Пример #27
0
def test_categorical_log_prob(sample_shape, batch_shape, cardinality):
    logits = torch.randn(batch_shape + (cardinality, ))
    logits -= logits.logsumexp(dim=-1, keepdim=True)
    actual = Categorical(logits=logits)
    expected = dist.Categorical(logits=logits)
    assert actual.batch_shape == expected.batch_shape
    assert actual.event_shape == expected.event_shape

    value = expected.sample(sample_shape)
    actual_log_prob = actual.log_prob(value)
    expected_log_prob = expected.log_prob(value)
    assert_close(actual_log_prob, expected_log_prob)
Пример #28
0
def test_deepcopy():
    data = randn(3, 2)
    x = Tensor(data)

    y = copy.deepcopy(x)
    assert_close(x, y)
    assert y is not x
    assert y.data is not x.data

    memo = {id(data): data}
    z = copy.deepcopy(x, memo)
    assert z is x
Пример #29
0
def test_dist_to_funsor_masked(batch_shape):
    loc = torch.randn(batch_shape)
    scale = torch.randn(batch_shape).exp()
    mask = torch.bernoulli(torch.full(batch_shape, 0.5)).byte()
    d = dist.Normal(loc, scale).mask(mask)
    assert isinstance(d, MaskedDistribution)
    f = dist_to_funsor(d)
    assert isinstance(f, Funsor)

    value = d.sample()
    actual_log_prob = f(value=tensor_to_funsor(value))
    expected_log_prob = tensor_to_funsor(d.log_prob(value))
    assert_close(actual_log_prob, expected_log_prob)
Пример #30
0
def test_dist_to_funsor_mvn(batch_shape, event_size):
    loc = torch.randn(batch_shape + (event_size, ))
    cov = torch.randn(batch_shape + (event_size, 2 * event_size))
    cov = cov.matmul(cov.transpose(-1, -2))
    scale_tril = torch.cholesky(cov)
    d = dist.MultivariateNormal(loc, scale_tril=scale_tril)
    f = dist_to_funsor(d)
    assert isinstance(f, Funsor)

    value = d.sample()
    actual_log_prob = f(value=tensor_to_funsor(value, event_output=1))
    expected_log_prob = tensor_to_funsor(d.log_prob(value))
    assert_close(actual_log_prob, expected_log_prob)