Beispiel #1
0
def test_von_mises_probs_density(batch_shape, syntax):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape))

    @funsor.function(reals(), reals(), reals(), reals())
    def von_mises(loc, concentration, value):
        return backend_dist.VonMises(loc, concentration).log_prob(value)

    check_funsor(von_mises, {
        'concentration': reals(),
        'loc': reals(),
        'value': reals()
    }, reals())

    concentration = Tensor(rand(batch_shape), inputs)
    loc = Tensor(rand(batch_shape), inputs)
    value = Tensor(ops.abs(randn(batch_shape)), inputs)
    expected = von_mises(loc, concentration, value)
    check_funsor(expected, inputs, reals())

    d = Variable('value', reals())
    if syntax == 'eager':
        actual = dist.VonMises(loc, concentration, value)
    elif syntax == 'lazy':
        actual = dist.VonMises(loc, concentration, d)(value=value)
    check_funsor(actual, inputs, reals())
    assert_close(actual, expected)
Beispiel #2
0
def test_einsum_categorical(equation):
    if get_backend() == "jax":
        from funsor.jax.distributions import Categorical
    else:
        from funsor.torch.distributions import Categorical

    inputs, outputs, sizes, operands, _ = make_einsum_example(equation)
    operands = [ops.abs(operand) / ops.abs(operand).sum(-1)[..., None]
                for operand in operands]

    expected = opt_einsum.contract(equation, *operands,
                                   backend=BACKEND_TO_EINSUM_BACKEND[get_backend()])

    with interpretation(reflect):
        funsor_operands = [
            Categorical(probs=Tensor(
                operand,
                inputs=OrderedDict([(d, Bint[sizes[d]]) for d in inp[:-1]])
            ))(value=Variable(inp[-1], Bint[sizes[inp[-1]]])).exp()
            for inp, operand in zip(inputs, operands)
        ]

        naive_ast = naive_einsum(equation, *funsor_operands)
        optimized_ast = apply_optimizer(naive_ast)

    print("Naive expression: {}".format(naive_ast))
    print("Optimized expression: {}".format(optimized_ast))
    actual_optimized = reinterpret(optimized_ast)  # eager by default
    actual = naive_einsum(equation, *map(reinterpret, funsor_operands))

    if len(outputs[0]) > 0:
        actual = actual.align(tuple(outputs[0]))
        actual_optimized = actual_optimized.align(tuple(outputs[0]))

    assert_close(actual, actual_optimized, atol=1e-4)

    assert expected.shape == actual.data.shape
    assert_close(expected, actual.data)
    for output in outputs:
        for i, output_dim in enumerate(output):
            assert output_dim in actual.inputs
            assert actual.inputs[output_dim].dtype == sizes[output_dim]
Beispiel #3
0
def test_transform_exp(shape):
    point = Tensor(ops.abs(randn(shape)))
    x = Variable('x', reals(*shape))
    actual = Delta('y', point)(y=ops.exp(x))
    expected = Delta('x', point.log(), point.log().sum())
    assert_close(actual, expected)
Beispiel #4
0
def allclose(a, b, rtol=1e-05, atol=1e-08):
    if type(a) != type(b):
        return False
    return ops.abs(a - b) < rtol + atol * ops.abs(b)