Ejemplo n.º 1
0
def test_reduce_moment_matching_multivariate():
    int_inputs = [('i', bint(4))]
    real_inputs = [('x', reals(2))]
    inputs = OrderedDict(int_inputs + real_inputs)
    int_inputs = OrderedDict(int_inputs)
    real_inputs = OrderedDict(real_inputs)

    loc = numeric_array([[-10., -1.], [+10., -1.], [+10., +1.], [-10., +1.]])
    precision = zeros(4, 1, 1) + ops.new_eye(loc, (2, ))
    discrete = Tensor(zeros(4), int_inputs)
    gaussian = Gaussian(loc, precision, inputs)
    gaussian -= gaussian.log_normalizer
    joint = discrete + gaussian
    with interpretation(moment_matching):
        actual = joint.reduce(ops.logaddexp, 'i')
    assert_close(actual.reduce(ops.logaddexp), joint.reduce(ops.logaddexp))

    expected_loc = zeros(2)
    expected_covariance = numeric_array([[101., 0.], [0., 2.]])
    expected_precision = _inverse(expected_covariance)
    expected_gaussian = Gaussian(expected_loc, expected_precision, real_inputs)
    expected_gaussian -= expected_gaussian.log_normalizer
    expected_discrete = Tensor(ops.log(numeric_array(4.)))
    expected = expected_discrete + expected_gaussian
    assert_close(actual, expected, atol=1e-5, rtol=None)
Ejemplo n.º 2
0
def test_advanced_indexing_shape():
    I, J, M, N = 4, 4, 2, 3
    x = Tensor(randn((I, J)), OrderedDict([
        ('i', bint(I)),
        ('j', bint(J)),
    ]))
    m = Tensor(numeric_array([2, 3]), OrderedDict([('m', bint(M))]), I)
    n = Tensor(numeric_array([0, 1, 1]), OrderedDict([('n', bint(N))]), J)
    assert x.data.shape == (I, J)

    check_funsor(x(i=m), {'j': bint(J), 'm': bint(M)}, reals())
    check_funsor(x(i=m, j=n), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(i=m, j=n, k=m), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(i=m, k=m), {'j': bint(J), 'm': bint(M)}, reals())
    check_funsor(x(i=n), {'j': bint(J), 'n': bint(N)}, reals())
    check_funsor(x(i=n, k=m), {'j': bint(J), 'n': bint(N)}, reals())
    check_funsor(x(j=m), {'i': bint(I), 'm': bint(M)}, reals())
    check_funsor(x(j=m, i=n), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(j=m, i=n, k=m), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(j=m, k=m), {'i': bint(I), 'm': bint(M)}, reals())
    check_funsor(x(j=n), {'i': bint(I), 'n': bint(N)}, reals())
    check_funsor(x(j=n, k=m), {'i': bint(I), 'n': bint(N)}, reals())
    check_funsor(x(m), {'j': bint(J), 'm': bint(M)}, reals())
    check_funsor(x(m, j=n), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(m, j=n, k=m), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(m, k=m), {'j': bint(J), 'm': bint(M)}, reals())
    check_funsor(x(m, n), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(m, n, k=m), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(n), {'j': bint(J), 'n': bint(N)}, reals())
    check_funsor(x(n, k=m), {'j': bint(J), 'n': bint(N)}, reals())
    check_funsor(x(n, m), {'m': bint(M), 'n': bint(N)}, reals())
    check_funsor(x(n, m, k=m), {'m': bint(M), 'n': bint(N)}, reals())
Ejemplo n.º 3
0
def test_advanced_indexing_shape():
    I, J, M, N = 4, 4, 2, 3
    x = Tensor(randn((I, J)), OrderedDict([
        ('i', Bint[I]),
        ('j', Bint[J]),
    ]))
    m = Tensor(numeric_array([2, 3]), OrderedDict([('m', Bint[M])]), I)
    n = Tensor(numeric_array([0, 1, 1]), OrderedDict([('n', Bint[N])]), J)
    assert x.data.shape == (I, J)

    check_funsor(x(i=m), {'j': Bint[J], 'm': Bint[M]}, Real)
    check_funsor(x(i=m, j=n), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(i=m, j=n, k=m), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(i=m, k=m), {'j': Bint[J], 'm': Bint[M]}, Real)
    check_funsor(x(i=n), {'j': Bint[J], 'n': Bint[N]}, Real)
    check_funsor(x(i=n, k=m), {'j': Bint[J], 'n': Bint[N]}, Real)
    check_funsor(x(j=m), {'i': Bint[I], 'm': Bint[M]}, Real)
    check_funsor(x(j=m, i=n), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(j=m, i=n, k=m), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(j=m, k=m), {'i': Bint[I], 'm': Bint[M]}, Real)
    check_funsor(x(j=n), {'i': Bint[I], 'n': Bint[N]}, Real)
    check_funsor(x(j=n, k=m), {'i': Bint[I], 'n': Bint[N]}, Real)
    check_funsor(x(m), {'j': Bint[J], 'm': Bint[M]}, Real)
    check_funsor(x(m, j=n), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(m, j=n, k=m), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(m, k=m), {'j': Bint[J], 'm': Bint[M]}, Real)
    check_funsor(x(m, n), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(m, n, k=m), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(n), {'j': Bint[J], 'n': Bint[N]}, Real)
    check_funsor(x(n, k=m), {'j': Bint[J], 'n': Bint[N]}, Real)
    check_funsor(x(n, m), {'m': Bint[M], 'n': Bint[N]}, Real)
    check_funsor(x(n, m, k=m), {'m': Bint[M], 'n': Bint[N]}, Real)
Ejemplo n.º 4
0
def test_smoke(expr, expected_type):
    dx = Delta('x', Tensor(randn(2, 3), OrderedDict([('i', bint(2))])))
    assert isinstance(dx, Delta)

    dy = Delta('y', Tensor(randn(3, 4), OrderedDict([('j', bint(3))])))
    assert isinstance(dy, Delta)

    t = Tensor(randn(2, 3), OrderedDict([('i', bint(2)), ('j', bint(3))]))
    assert isinstance(t, Tensor)

    g = Gaussian(info_vec=numeric_array([[0.0, 0.1, 0.2], [2.0, 3.0, 4.0]]),
                 precision=numeric_array([[[1.0, 0.1, 0.2], [0.1, 1.0, 0.3],
                                           [0.2, 0.3, 1.0]],
                                          [[1.0, 0.1, 0.2], [0.1, 1.0, 0.3],
                                           [0.2, 0.3, 1.0]]]),
                 inputs=OrderedDict([('i', bint(2)), ('x', reals(3))]))
    assert isinstance(g, Gaussian)

    i0 = Number(1, 2)
    assert isinstance(i0, Number)

    x0 = Tensor(numeric_array([0.5, 0.6, 0.7]))
    assert isinstance(x0, Tensor)

    result = eval(expr)
    assert isinstance(result, expected_type)
Ejemplo n.º 5
0
def test_mc_plate_gaussian():
    log_measure = Gaussian(numeric_array([0.]), numeric_array([[1.]]),
                           (('loc', Real),)) + numeric_array(-0.9189)
    integrand = Gaussian(randn((100, 1)) + 3., ones((100, 1, 1)),
                         (('data', Bint[100]), ('loc', Real)))

    rng_key = None if get_backend() != 'jax' else np.array([0, 0], dtype=np.uint32)
    res = Integrate(log_measure.sample('loc', rng_key=rng_key), integrand, 'loc')
    res = res.reduce(ops.mul, 'data')
    assert not ((res == float('inf')) | (res == float('-inf'))).any()
Ejemplo n.º 6
0
def test_smoke(expr, expected_type):
    g1 = Gaussian(info_vec=numeric_array([[0.0, 0.1, 0.2], [2.0, 3.0, 4.0]]),
                  precision=numeric_array([[[1.0, 0.1, 0.2], [0.1, 1.0, 0.3],
                                            [0.2, 0.3, 1.0]],
                                           [[1.0, 0.1, 0.2], [0.1, 1.0, 0.3],
                                            [0.2, 0.3, 1.0]]]),
                  inputs=OrderedDict([('i', bint(2)), ('x', reals(3))]))
    assert isinstance(g1, Gaussian)

    g2 = Gaussian(info_vec=numeric_array([[0.0, 0.1], [2.0, 3.0]]),
                  precision=numeric_array([[[1.0, 0.2], [0.2, 1.0]],
                                           [[1.0, 0.2], [0.2, 1.0]]]),
                  inputs=OrderedDict([('i', bint(2)), ('y', reals(2))]))
    assert isinstance(g2, Gaussian)

    shift = Tensor(numeric_array([-1., 1.]), OrderedDict([('i', bint(2))]))
    assert isinstance(shift, Tensor)

    i0 = Number(1, 2)
    assert isinstance(i0, Number)

    x0 = Tensor(numeric_array([0.5, 0.6, 0.7]))
    assert isinstance(x0, Tensor)

    y0 = Tensor(numeric_array([[0.2, 0.3], [0.8, 0.9]]),
                inputs=OrderedDict([('i', bint(2))]))
    assert isinstance(y0, Tensor)

    result = eval(expr)
    assert isinstance(result, expected_type)
Ejemplo n.º 7
0
def test_reduce_moment_matching_univariate():
    int_inputs = [('i', bint(2))]
    real_inputs = [('x', reals())]
    inputs = OrderedDict(int_inputs + real_inputs)
    int_inputs = OrderedDict(int_inputs)
    real_inputs = OrderedDict(real_inputs)

    p = 0.8
    t = 1.234
    s1, s2, s3 = 2.0, 3.0, 4.0
    loc = numeric_array([[-s1], [s1]])
    precision = numeric_array([[[s2**-2]], [[s3**-2]]])
    info_vec = (precision @ ops.unsqueeze(loc, -1)).squeeze(-1)
    discrete = Tensor(ops.log(numeric_array([1 - p, p])) + t, int_inputs)
    gaussian = Gaussian(info_vec, precision, inputs)
    gaussian -= gaussian.log_normalizer
    joint = discrete + gaussian
    with interpretation(moment_matching):
        actual = joint.reduce(ops.logaddexp, 'i')
    assert_close(actual.reduce(ops.logaddexp), joint.reduce(ops.logaddexp))

    expected_loc = numeric_array([(2 * p - 1) * s1])
    expected_variance = (4 * p * (1 - p) * s1**2 + (1 - p) * s2**2 + p * s3**2)
    expected_precision = numeric_array([[1 / expected_variance]])
    expected_info_vec = (
        expected_precision @ ops.unsqueeze(expected_loc, -1)).squeeze(-1)
    expected_gaussian = Gaussian(expected_info_vec, expected_precision,
                                 real_inputs)
    expected_gaussian -= expected_gaussian.log_normalizer
    expected_discrete = Tensor(numeric_array(t))
    expected = expected_discrete + expected_gaussian
    assert_close(actual, expected, atol=1e-5, rtol=None)
Ejemplo n.º 8
0
def test_normal_affine(expr):

    scale = Tensor(numeric_array(0.3), OrderedDict())
    x = Variable('x', Real)
    y = Variable('y', Real)

    expected = dist.Normal(x, scale, y)
    actual = eval(expr)

    assert isinstance(actual, Contraction)
    assert dict(actual.inputs) == dict(expected.inputs), (actual.inputs, expected.inputs)

    for ta, te in zip(actual.terms, expected.terms):
        assert_close(ta.align(tuple(te.inputs)), te)
Ejemplo n.º 9
0
def test_memoize_sample(check_sample):
    if get_backend() == "jax":
        from funsor.jax.distributions import Normal
    else:
        from funsor.torch.distributions import Normal

    rng_keys = (None, None, None) if get_backend() == "torch" \
        else np.array([[0, 1], [0, 2], [0, 3]], dtype=np.uint32)

    with memoize():
        m, s = numeric_array(0.), numeric_array(1.)
        j1 = Normal(m, s, 'x')
        j2 = Normal(m, s, 'x')
        x1 = j1.sample(frozenset({'x'}), rng_key=rng_keys[0])
        x12 = j1.sample(frozenset({'x'}), rng_key=rng_keys[1])
        x2 = j2.sample(frozenset({'x'}), rng_key=rng_keys[2])

    # this assertion now passes
    assert j1 is j2

    # these assertions fail because sample is not memoized
    if check_sample:
        assert x1 is x12
        assert x1 is x2
Ejemplo n.º 10
0
def numbers_to_tensors(*args):
    """
    Convert :class:`~funsor.terms.Number` s to :class:`funsor.tensor.Tensor` s,
    using any provided tensor as a prototype, if available.
    """
    if any(isinstance(x, Number) for x in args):
        prototype = get_default_prototype()
        options = dict(dtype=prototype.dtype)
        for x in args:
            if isinstance(x, Tensor):
                options = dict(dtype=x.data.dtype,
                               device=getattr(x.data, "device", None))
                break
        with ignore_jit_warnings():
            args = tuple(
                Tensor(numeric_array(x.data, **options), dtype=x.dtype
                       ) if isinstance(x, Number) else x for x in args)
    return args
Ejemplo n.º 11
0
def test_beta_bernoulli_conjugate(batch_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))
    full_shape = batch_shape
    prior = Variable("prior", Real)
    concentration1 = Tensor(ops.exp(randn(full_shape)), inputs)
    concentration0 = Tensor(ops.exp(randn(full_shape)), inputs)
    latent = dist.Beta(concentration1, concentration0, value=prior)
    conditional = dist.Bernoulli(probs=prior)
    reduced = (latent + conditional).reduce(ops.logaddexp, set(["prior"]))
    assert isinstance(reduced, dist.DirichletMultinomial)
    concentration = stack((concentration0, concentration1), dim=-1)
    assert_close(reduced.concentration, concentration)
    assert_close(reduced.total_count, Tensor(numeric_array(1.)))

    # we need lazy expression for Beta to draw samples from it
    with interpretation(funsor.terms.lazy):
        lazy_latent = dist.Beta(concentration1, concentration0, value=prior)
    obs = Tensor(rand(batch_shape).round(), inputs)
    _assert_conjugate_density_ok(latent, conditional, obs, lazy_latent=lazy_latent)
Ejemplo n.º 12
0
def test_eager_subs_ground(log_density):
    point1 = Tensor(randn(3))
    point2 = Tensor(randn(3))
    d = Delta('foo', point1, log_density)
    check_funsor(d(foo=point1), {}, Real, numeric_array(float(log_density)))
    check_funsor(d(foo=point2), {}, Real, numeric_array(float('-inf')))
Ejemplo n.º 13
0
def test_delta_delta():
    v = Variable('v', Reals[2])
    point = Tensor(randn(2))
    log_density = Tensor(numeric_array(0.5))
    d = dist.Delta(point, log_density, v)
    assert d is Delta('v', point, log_density)