コード例 #1
0
ファイル: test_joint.py プロジェクト: pangyyyyy/funsor
def test_smoke(expr, expected_type):
    dx = Delta('x', Tensor(randn(2, 3), OrderedDict([('i', bint(2))])))
    assert isinstance(dx, Delta)

    dy = Delta('y', Tensor(randn(3, 4), OrderedDict([('j', bint(3))])))
    assert isinstance(dy, Delta)

    t = Tensor(randn(2, 3), OrderedDict([('i', bint(2)), ('j', bint(3))]))
    assert isinstance(t, Tensor)

    g = Gaussian(info_vec=numeric_array([[0.0, 0.1, 0.2], [2.0, 3.0, 4.0]]),
                 precision=numeric_array([[[1.0, 0.1, 0.2], [0.1, 1.0, 0.3],
                                           [0.2, 0.3, 1.0]],
                                          [[1.0, 0.1, 0.2], [0.1, 1.0, 0.3],
                                           [0.2, 0.3, 1.0]]]),
                 inputs=OrderedDict([('i', bint(2)), ('x', reals(3))]))
    assert isinstance(g, Gaussian)

    i0 = Number(1, 2)
    assert isinstance(i0, Number)

    x0 = Tensor(numeric_array([0.5, 0.6, 0.7]))
    assert isinstance(x0, Tensor)

    result = eval(expr)
    assert isinstance(result, expected_type)
コード例 #2
0
ファイル: test_distribution.py プロジェクト: pangyyyyy/funsor
def test_delta_density(batch_shape, event_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape))

    @funsor.function(reals(*event_shape), reals(), reals(*event_shape),
                     reals())
    def delta(v, log_density, value):
        eq = (v == value)
        for _ in range(len(event_shape)):
            eq = ops.all(eq, -1)
        return ops.log(ops.astype(eq, 'float32')) + log_density

    check_funsor(
        delta, {
            'v': reals(*event_shape),
            'log_density': reals(),
            'value': reals(*event_shape)
        }, reals())

    v = Tensor(randn(batch_shape + event_shape), inputs)
    log_density = Tensor(ops.exp(randn(batch_shape)), inputs)
    for value in [v, Tensor(randn(batch_shape + event_shape), inputs)]:
        expected = delta(v, log_density, value)
        check_funsor(expected, inputs, reals())

        actual = dist.Delta(v, log_density, value)
        check_funsor(actual, inputs, reals())
        assert_close(actual, expected)
コード例 #3
0
ファイル: test_distribution.py プロジェクト: pangyyyyy/funsor
def test_beta_density(batch_shape, eager):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape))

    @funsor.function(reals(), reals(), reals(), reals())
    def beta(concentration1, concentration0, value):
        return backend_dist.Beta(concentration1,
                                 concentration0).log_prob(value)

    check_funsor(beta, {
        'concentration1': reals(),
        'concentration0': reals(),
        'value': reals()
    }, reals())

    concentration1 = Tensor(ops.exp(randn(batch_shape)), inputs)
    concentration0 = Tensor(ops.exp(randn(batch_shape)), inputs)
    value = Tensor(rand(batch_shape), inputs)
    expected = beta(concentration1, concentration0, value)
    check_funsor(expected, inputs, reals())

    d = Variable('value', reals())
    actual = dist.Beta(concentration1, concentration0, value) if eager else \
        dist.Beta(concentration1, concentration0, d)(value=value)
    check_funsor(actual, inputs, reals())
    assert_close(actual, expected)
コード例 #4
0
def test_tensor_tensordot(x_shape, xy_shape, y_shape):
    x = randn(x_shape + xy_shape)
    y = randn(xy_shape + y_shape)
    dim = len(xy_shape)
    actual = tensordot(Tensor(x), Tensor(y), dim)
    expected = Tensor(_numeric_tensordot(x, y, dim))
    assert_close(actual, expected, atol=1e-5, rtol=None)
コード例 #5
0
def test_mvn_affine_reshape():
    x = Variable('x', Reals[2, 2])
    y = Variable('y', Reals[4])
    data = dict(x=Tensor(randn(2, 2)), y=Tensor(randn(4)))
    with interpretation(lazy):
        d = to_funsor(random_mvn((), 4), Real)
        d = d(value=x.reshape((4,)) - y)
    _check_mvn_affine(d, data)
コード例 #6
0
def test_mvn_affine_two_vars():
    x = Variable('x', Reals[2])
    y = Variable('y', Reals[2])
    data = dict(x=Tensor(randn(2)), y=Tensor(randn(2)))
    with interpretation(lazy):
        d = to_funsor(random_mvn((), 2), Real)
        d = d(value=x - y)
    _check_mvn_affine(d, data)
コード例 #7
0
def test_mvn_affine_einsum():
    c = Tensor(randn(3, 2, 2))
    x = Variable('x', Reals[2, 2])
    y = Variable('y', Real)
    data = dict(x=Tensor(randn(2, 2)), y=Tensor(randn(())))
    with interpretation(lazy):
        d = to_funsor(random_mvn((), 3), Real)
        d = d(value=Einsum("abc,bc->a", c, x) + y)
    _check_mvn_affine(d, data)
コード例 #8
0
def test_mvn_affine_matmul_sub():
    x = Variable('x', Reals[2])
    y = Variable('y', Reals[3])
    m = Tensor(randn(2, 3))
    data = dict(x=Tensor(randn(2)), y=Tensor(randn(3)))
    with interpretation(lazy):
        d = to_funsor(random_mvn((), 3), Real)
        d = d(value=x @ m - y)
    _check_mvn_affine(d, data)
コード例 #9
0
def test_mvn_affine_matmul():
    x = Variable('x', Reals[2])
    y = Variable('y', Reals[3])
    m = Tensor(randn(2, 3))
    data = dict(x=Tensor(randn(2)), y=Tensor(randn(3)))
    with interpretation(lazy):
        d = random_mvn((), 3)
        d = dist.MultivariateNormal(loc=y, scale_tril=d.scale_tril, value=x @ m)
    _check_mvn_affine(d, data)
コード例 #10
0
ファイル: test_distribution.py プロジェクト: pangyyyyy/funsor
def _random_scale_tril(shape):
    if get_backend() == "torch":
        data = randn(shape)
        return backend_dist.transforms.transform_to(
            backend_dist.constraints.lower_cholesky)(data)
    else:
        data = randn(shape[:-2] + (shape[-1] * (shape[-1] + 1) // 2, ))
        return backend_dist.biject_to(
            backend_dist.constraints.lower_cholesky)(data)
コード例 #11
0
def test_beta_sample(with_lazy, batch_shape, sample_inputs, reparametrized):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))

    concentration1 = ops.exp(randn(batch_shape))
    concentration0 = ops.exp(randn(batch_shape))
    funsor_dist_class = (dist.Beta if reparametrized else dist.NonreparameterizedBeta)
    params = (concentration1, concentration0)

    _check_sample(funsor_dist_class, params, sample_inputs, inputs, atol=1e-2 if reparametrized else 1e-1,
                  statistic="variance", num_samples=100000, with_lazy=with_lazy)
コード例 #12
0
ファイル: test_joint.py プロジェクト: pangyyyyy/funsor
def test_reduce_logaddexp_deltas_lazy():
    a = Delta('a', Tensor(randn(3, 2), OrderedDict(i=bint(3))))
    b = Delta('b', Tensor(randn(3), OrderedDict(i=bint(3))))
    x = a + b
    assert isinstance(x, Delta)
    assert set(x.inputs) == {'a', 'b', 'i'}

    y = x.reduce(ops.logaddexp, 'i')
    # assert isinstance(y, Reduce)
    assert set(y.inputs) == {'a', 'b'}
    assert_close(x.reduce(ops.logaddexp), y.reduce(ops.logaddexp))
コード例 #13
0
def test_reduce_logaddexp_deltas_discrete_lazy():
    a = Delta('a', Tensor(randn(3, 2), OrderedDict(i=Bint[3])))
    b = Delta('b', Tensor(randn(3), OrderedDict(i=Bint[3])))
    c = Tensor(randn(3), OrderedDict(i=Bint[3]))
    x = a + b + c
    assert isinstance(x, Contraction)
    assert set(x.inputs) == {'a', 'b', 'i'}

    y = x.reduce(ops.logaddexp, 'i')
    # assert isinstance(y, Reduce)
    assert set(y.inputs) == {'a', 'b'}
    assert_close(x.reduce(ops.logaddexp), y.reduce(ops.logaddexp))
コード例 #14
0
def test_block_vector():
    shape = (10, )
    expected = zeros(shape)
    actual = BlockVector(shape)

    expected[1] = randn(())
    actual[1] = expected[1]

    expected[3:5] = randn((2, ))
    actual[3:5] = expected[3:5]

    assert_close(actual.as_tensor(), expected)
コード例 #15
0
def test_block_vector_batched(batch_shape):
    shape = batch_shape + (10, )
    expected = zeros(shape)
    actual = BlockVector(shape)

    expected[..., 1] = randn(batch_shape)
    actual[..., 1] = expected[..., 1]

    expected[..., 3:5] = randn(batch_shape + (2, ))
    actual[..., 3:5] = expected[..., 3:5]

    assert_close(actual.as_tensor(), expected)
コード例 #16
0
def test_function_matmul():
    @funsor.function(reals(3, 4), reals(4, 5), reals(3, 5))
    def matmul(x, y):
        return x @ y

    check_funsor(matmul, {'x': reals(3, 4), 'y': reals(4, 5)}, reals(3, 5))

    x = Tensor(randn((3, 4)))
    y = Tensor(randn((4, 5)))
    actual = matmul(x, y)
    expected_data = x.data @ y.data
    check_funsor(actual, {}, reals(3, 5), expected_data)
コード例 #17
0
ファイル: test_tensor.py プロジェクト: ordabayevy/funsor
def test_function_matmul():
    @funsor.function(Reals[3, 4], Reals[4, 5], Reals[3, 5])
    def matmul(x, y):
        return x @ y

    check_funsor(matmul, {'x': Reals[3, 4], 'y': Reals[4, 5]}, Reals[3, 5])

    x = Tensor(randn((3, 4)))
    y = Tensor(randn((4, 5)))
    actual = matmul(x, y)
    expected_data = x.data @ y.data
    check_funsor(actual, {}, Reals[3, 5], expected_data)
コード例 #18
0
def test_gamma_gamma_conjugate(batch_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))
    full_shape = batch_shape
    prior = Variable("prior", Real)
    concentration0 = Tensor(ops.exp(randn(full_shape)), inputs)
    rate0 = Tensor(ops.exp(randn(full_shape)), inputs)
    concentration = Tensor(ops.exp(randn(full_shape)), inputs)
    latent = dist.Gamma(concentration0, rate0, value=prior)
    conditional = dist.Gamma(concentration, rate=prior)

    obs = Tensor(ops.exp(randn(full_shape)), inputs)
    _assert_conjugate_density_ok(latent, conditional, obs, prec=0.02)
コード例 #19
0
ファイル: test_tensor.py プロジェクト: ordabayevy/funsor
def test_function_hint_matmul():
    @funsor.function
    def matmul(x: Reals[3, 4], y: Reals[4, 5]) -> Reals[3, 5]:
        return x @ y

    assert get_type_hints(matmul) == get_type_hints(matmul.fn)

    check_funsor(matmul, {'x': Reals[3, 4], 'y': Reals[4, 5]}, Reals[3, 5])

    x = Tensor(randn((3, 4)))
    y = Tensor(randn((4, 5)))
    actual = matmul(x, y)
    expected_data = x.data @ y.data
    check_funsor(actual, {}, Reals[3, 5], expected_data)
コード例 #20
0
def test_function_lazy_matmul():
    @funsor.function(reals(3, 4), reals(4, 5), reals(3, 5))
    def matmul(x, y):
        return x @ y

    x_lazy = Variable('x', reals(3, 4))
    y = Tensor(randn((4, 5)))
    actual_lazy = matmul(x_lazy, y)
    check_funsor(actual_lazy, {'x': reals(3, 4)}, reals(3, 5))
    assert isinstance(actual_lazy, funsor.tensor.Function)

    x = Tensor(randn((3, 4)))
    actual = actual_lazy(x=x)
    expected_data = x.data @ y.data
    check_funsor(actual, {}, reals(3, 5), expected_data)
コード例 #21
0
def test_advanced_indexing_shape():
    I, J, M, N = 4, 4, 2, 3
    x = Tensor(randn((I, J)), OrderedDict([
        ('i', bint(I)),
        ('j', bint(J)),
    ]))
    m = Tensor(numeric_array([2, 3]), OrderedDict([('m', bint(M))]), I)
    n = Tensor(numeric_array([0, 1, 1]), OrderedDict([('n', bint(N))]), J)
    assert x.data.shape == (I, J)

    check_funsor(x(i=m), {'j': bint(J), 'm': bint(M)}, reals())
    check_funsor(x(i=m, j=n), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(i=m, j=n, k=m), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(i=m, k=m), {'j': bint(J), 'm': bint(M)}, reals())
    check_funsor(x(i=n), {'j': bint(J), 'n': bint(N)}, reals())
    check_funsor(x(i=n, k=m), {'j': bint(J), 'n': bint(N)}, reals())
    check_funsor(x(j=m), {'i': bint(I), 'm': bint(M)}, reals())
    check_funsor(x(j=m, i=n), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(j=m, i=n, k=m), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(j=m, k=m), {'i': bint(I), 'm': bint(M)}, reals())
    check_funsor(x(j=n), {'i': bint(I), 'n': bint(N)}, reals())
    check_funsor(x(j=n, k=m), {'i': bint(I), 'n': bint(N)}, reals())
    check_funsor(x(m), {'j': bint(J), 'm': bint(M)}, reals())
    check_funsor(x(m, j=n), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(m, j=n, k=m), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(m, k=m), {'j': bint(J), 'm': bint(M)}, reals())
    check_funsor(x(m, n), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(m, n, k=m), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(n), {'j': bint(J), 'n': bint(N)}, reals())
    check_funsor(x(n, k=m), {'j': bint(J), 'n': bint(N)}, reals())
    check_funsor(x(n, m), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(n, m, k=m), {'m': bint(M), 'n': bint(N)}, reals())
コード例 #22
0
def test_dirichlet_multinomial_density(batch_shape, event_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))
    max_count = 10

    @funsor.function
    def dirichlet_multinomial(concentration: Reals[event_shape], total_count: Real,
                              value: Reals[event_shape]) -> Real:
        return backend_dist.DirichletMultinomial(concentration, total_count).log_prob(value)

    check_funsor(dirichlet_multinomial, {'concentration': Reals[event_shape],
                                         'total_count': Real,
                                         'value': Reals[event_shape]},
                 Real)

    concentration = Tensor(ops.exp(randn(batch_shape + event_shape)), inputs)
    value_data = ops.astype(randint(0, max_count, size=batch_shape + event_shape), 'float32')
    total_count_data = value_data.sum(-1) + ops.astype(randint(0, max_count, size=batch_shape), 'float32')
    value = Tensor(value_data, inputs)
    total_count = Tensor(total_count_data, inputs)
    expected = dirichlet_multinomial(concentration, total_count, value)
    check_funsor(expected, inputs, Real)
    actual = dist.DirichletMultinomial(concentration, total_count, value)
    check_funsor(actual, inputs, Real)
    assert_close(actual, expected)
コード例 #23
0
def test_lambda_getitem():
    data = randn((2, ))
    x = Tensor(data)
    y = Tensor(data, OrderedDict(i=bint(2)))
    i = Variable('i', bint(2))
    assert x[i] is y
    assert Lambda(i, y) is x
コード例 #24
0
def test_getitem_tensor():
    data = randn((5, 4, 3, 2))
    x = Tensor(data)
    i = Variable('i', bint(5))
    j = Variable('j', bint(4))
    k = Variable('k', bint(3))
    m = Variable('m', bint(2))

    y = random_tensor(OrderedDict(), bint(5))
    assert_close(x[i](i=y), x[y])

    y = random_tensor(OrderedDict(), bint(4))
    assert_close(x[:, j](j=y), x[:, y])

    y = random_tensor(OrderedDict(), bint(3))
    assert_close(x[:, :, k](k=y), x[:, :, y])

    y = random_tensor(OrderedDict(), bint(2))
    assert_close(x[:, :, :, m](m=y), x[:, :, :, y])

    y = random_tensor(OrderedDict([('i', i.output)]), bint(j.dtype))
    assert_close(x[i, j](j=y), x[i, y])

    y = random_tensor(OrderedDict([('i', i.output), ('j', j.output)]),
                      bint(k.dtype))
    assert_close(x[i, j, k](k=y), x[i, j, y])
コード例 #25
0
def test_to_funsor(shape, dtype):
    t = ops.astype(randn(shape), dtype)
    f = funsor.to_funsor(t)
    assert isinstance(f, Tensor)
    assert funsor.to_funsor(t, reals(*shape)) is f
    with pytest.raises(ValueError):
        funsor.to_funsor(t, reals(5, *shape))
コード例 #26
0
ファイル: test_tensor.py プロジェクト: ordabayevy/funsor
def test_getitem_tensor():
    data = randn((5, 4, 3, 2))
    x = Tensor(data)
    i = Variable('i', Bint[5])
    j = Variable('j', Bint[4])
    k = Variable('k', Bint[3])
    m = Variable('m', Bint[2])

    y = random_tensor(OrderedDict(), Bint[5])
    assert_close(x[i](i=y), x[y])

    y = random_tensor(OrderedDict(), Bint[4])
    assert_close(x[:, j](j=y), x[:, y])

    y = random_tensor(OrderedDict(), Bint[3])
    assert_close(x[:, :, k](k=y), x[:, :, y])

    y = random_tensor(OrderedDict(), Bint[2])
    assert_close(x[:, :, :, m](m=y), x[:, :, :, y])

    y = random_tensor(OrderedDict([('i', i.output)]), Bint[j.dtype])
    assert_close(x[i, j](j=y), x[i, y])

    y = random_tensor(OrderedDict([('i', i.output), ('j', j.output)]),
                      Bint[k.dtype])
    assert_close(x[i, j, k](k=y), x[i, j, y])
コード例 #27
0
ファイル: test_tensor.py プロジェクト: ordabayevy/funsor
def test_advanced_indexing_shape():
    I, J, M, N = 4, 4, 2, 3
    x = Tensor(randn((I, J)), OrderedDict([
        ('i', Bint[I]),
        ('j', Bint[J]),
    ]))
    m = Tensor(numeric_array([2, 3]), OrderedDict([('m', Bint[M])]), I)
    n = Tensor(numeric_array([0, 1, 1]), OrderedDict([('n', Bint[N])]), J)
    assert x.data.shape == (I, J)

    check_funsor(x(i=m), {'j': Bint[J], 'm': Bint[M]}, Real)
    check_funsor(x(i=m, j=n), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(i=m, j=n, k=m), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(i=m, k=m), {'j': Bint[J], 'm': Bint[M]}, Real)
    check_funsor(x(i=n), {'j': Bint[J], 'n': Bint[N]}, Real)
    check_funsor(x(i=n, k=m), {'j': Bint[J], 'n': Bint[N]}, Real)
    check_funsor(x(j=m), {'i': Bint[I], 'm': Bint[M]}, Real)
    check_funsor(x(j=m, i=n), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(j=m, i=n, k=m), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(j=m, k=m), {'i': Bint[I], 'm': Bint[M]}, Real)
    check_funsor(x(j=n), {'i': Bint[I], 'n': Bint[N]}, Real)
    check_funsor(x(j=n, k=m), {'i': Bint[I], 'n': Bint[N]}, Real)
    check_funsor(x(m), {'j': Bint[J], 'm': Bint[M]}, Real)
    check_funsor(x(m, j=n), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(m, j=n, k=m), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(m, k=m), {'j': Bint[J], 'm': Bint[M]}, Real)
    check_funsor(x(m, n), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(m, n, k=m), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(n), {'j': Bint[J], 'n': Bint[N]}, Real)
    check_funsor(x(n, k=m), {'j': Bint[J], 'n': Bint[N]}, Real)
    check_funsor(x(n, m), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(n, m, k=m), {'m': Bint[M], 'n': Bint[N]}, Real)
コード例 #28
0
ファイル: test_tensor.py プロジェクト: ordabayevy/funsor
def test_pickle():
    x = Tensor(randn(2, 3))
    f = io.BytesIO()
    pickle.dump(x, f)
    f.seek(0)
    y = pickle.load(f)
    assert_close(x, y)
コード例 #29
0
def test_mvn_affine_one_var():
    x = Variable('x', Reals[2])
    data = dict(x=Tensor(randn(2)))
    with interpretation(lazy):
        d = to_funsor(random_mvn((), 2), Real)
        d = d(value=2 * x + 1)
    _check_mvn_affine(d, data)
コード例 #30
0
def test_mvn_affine_getitem():
    x = Variable('x', Reals[2, 2])
    data = dict(x=Tensor(randn(2, 2)))
    with interpretation(lazy):
        d = to_funsor(random_mvn((), 2), Real)
        d = d(value=x[0] - x[1])
    _check_mvn_affine(d, data)