예제 #1
0
def test_reduce_subset(op, reduced_vars):
    reduced_vars = frozenset(reduced_vars)
    x = Variable('x', Bint[2])
    y = Variable('y', Bint[3])
    z = Variable('z', Bint[4])
    f = x * y + z
    dtype = f.dtype
    check_funsor(f, {
        'x': Bint[2],
        'y': Bint[3],
        'z': Bint[4]
    }, Array[dtype, ()])
    if isinstance(op, ops.LogAddExpOp):
        pytest.skip()  # not defined for integers

    with interpretation(sequential):
        actual = f.reduce(op, reduced_vars)
        expected = f
        for v in [x, y, z]:
            if v.name in reduced_vars:
                expected = reduce(op,
                                  [expected(**{v.name: i}) for i in v.output])

    try:
        check_funsor(actual, expected.inputs, expected.output)
    except AssertionError:
        assert type(actual).__origin__ == type(expected).__origin__
        assert actual.inputs == expected.inputs
        assert actual.output.dtype != 'real' and expected.output.dtype != 'real'
        pytest.xfail(reason="bound inference not quite right")

    # TODO check data
    if not reduced_vars:
        assert actual is f
예제 #2
0
def test_reduce_all(op):
    x = Variable('x', Bint[2])
    y = Variable('y', Bint[3])
    z = Variable('z', Bint[4])
    if isinstance(op, ops.LogAddExpOp):
        pytest.skip()  # not defined for integers

    with interpretation(sequential):
        f = x * y + z
        dtype = f.dtype
        check_funsor(f, {
            'x': Bint[2],
            'y': Bint[3],
            'z': Bint[4]
        }, Array[dtype, ()])
        actual = f.reduce(op)

    with interpretation(sequential):
        values = [
            f(x=i, y=j, z=k) for i in x.output for j in y.output
            for k in z.output
        ]
        expected = reduce(op, values)

    assert actual == expected
예제 #3
0
def test_stack_subs():
    x = Variable('x', reals())
    y = Variable('y', reals())
    z = Variable('z', reals())
    j = Variable('j', bint(3))

    f = Stack('i', (Number(0), x, y * z))
    check_funsor(f, {
        'i': bint(3),
        'x': reals(),
        'y': reals(),
        'z': reals()
    }, reals())

    assert f(i=Number(0, 3)) is Number(0)
    assert f(i=Number(1, 3)) is x
    assert f(i=Number(2, 3)) is y * z
    assert f(i=j) is Stack('j', (Number(0), x, y * z))
    assert f(i='j') is Stack('j', (Number(0), x, y * z))
    assert f.reduce(ops.add, 'i') is Number(0) + x + (y * z)

    assert f(x=0) is Stack('i', (Number(0), Number(0), y * z))
    assert f(y=x) is Stack('i', (Number(0), x, x * z))
    assert f(x=0, y=x) is Stack('i', (Number(0), Number(0), x * z))
    assert f(x=0, y=x, i=Number(2, 3)) is x * z
    assert f(x=0, i=j) is Stack('j', (Number(0), Number(0), y * z))
    assert f(x=0, i='j') is Stack('j', (Number(0), Number(0), y * z))
예제 #4
0
def test_reduce_subset(dims, reduced_vars, op):
    reduced_vars = frozenset(reduced_vars)
    sizes = {'a': 3, 'b': 4, 'c': 5}
    shape = tuple(sizes[d] for d in dims)
    inputs = OrderedDict((d, bint(sizes[d])) for d in dims)
    data = torch.rand(shape) + 0.5
    dtype = 'real'
    if op in [ops.and_, ops.or_]:
        data = data.byte()
        dtype = 2
    x = Tensor(data, inputs, dtype)
    actual = x.reduce(op, reduced_vars)
    expected_inputs = OrderedDict(
        (d, bint(sizes[d])) for d in dims if d not in reduced_vars)

    reduced_vars &= frozenset(dims)
    if not reduced_vars:
        assert actual is x
    else:
        if reduced_vars == frozenset(dims):
            if op is ops.logaddexp:
                # work around missing torch.Tensor.logsumexp()
                data = data.reshape(-1).logsumexp(0)
            else:
                data = REDUCE_OP_TO_TORCH[op](data)
        else:
            for pos in reversed(sorted(map(dims.index, reduced_vars))):
                data = REDUCE_OP_TO_TORCH[op](data, pos)
                if op in (ops.min, ops.max):
                    data = data[0]
        check_funsor(actual, expected_inputs, Domain((), dtype))
        assert_close(actual,
                     Tensor(data, expected_inputs, dtype),
                     atol=1e-5,
                     rtol=1e-5)
예제 #5
0
def test_normalize_einsum(equation, plates, backend, einsum_impl):
    inputs, outputs, sizes, operands, funsor_operands = make_einsum_example(equation)

    with interpretation(reflect):
        expr = einsum_impl(equation, *funsor_operands, backend=backend, plates=plates)

    with interpretation(normalize):
        transformed_expr = reinterpret(expr)

    assert isinstance(transformed_expr, Contraction)
    check_funsor(transformed_expr, expr.inputs, expr.output)

    assert all(isinstance(v, (Number, Tensor, Contraction)) for v in transformed_expr.terms)

    with interpretation(normalize):
        transformed_expr2 = reinterpret(transformed_expr)

    assert transformed_expr2 is transformed_expr  # check normalization

    with interpretation(eager):
        actual = reinterpret(transformed_expr)
        expected = reinterpret(expr)

    assert_close(actual, expected, rtol=1e-4)

    actual = eval(quote(expected))  # requires torch, bint
    assert_close(actual, expected)
예제 #6
0
def test_reduce_subset(op, reduced_vars):
    reduced_vars = frozenset(reduced_vars)
    x = Variable('x', bint(2))
    y = Variable('y', bint(3))
    z = Variable('z', bint(4))
    f = x * y + z
    dtype = f.dtype
    check_funsor(f, {
        'x': bint(2),
        'y': bint(3),
        'z': bint(4)
    }, Domain((), dtype))
    if op is ops.logaddexp:
        pytest.skip()

    with interpretation(sequential):
        actual = f.reduce(op, reduced_vars)
        expected = f
        for v in [x, y, z]:
            if v.name in reduced_vars:
                expected = reduce(op,
                                  [expected(**{v.name: i}) for i in v.output])

    try:
        check_funsor(actual, expected.inputs, expected.output)
    except AssertionError:
        assert type(actual).__origin__ == type(expected).__origin__
        assert actual.inputs == expected.inputs
        assert actual.output.dtype != 'real' and expected.output.dtype != 'real'
        pytest.xfail(reason="bound inference not quite right")

    # TODO check data
    if not reduced_vars:
        assert actual is f
예제 #7
0
def test_reduce_subset(dims, reduced_vars, op):
    reduced_vars = frozenset(reduced_vars)
    sizes = {'a': 3, 'b': 4, 'c': 5}
    shape = tuple(sizes[d] for d in dims)
    inputs = OrderedDict((d, bint(sizes[d])) for d in dims)
    data = rand(shape) + 0.5
    dtype = 'real'
    if op in [ops.and_, ops.or_]:
        data = ops.astype(data, 'uint8')
        dtype = 2
    x = Tensor(data, inputs, dtype)
    actual = x.reduce(op, reduced_vars)
    expected_inputs = OrderedDict(
        (d, bint(sizes[d])) for d in dims if d not in reduced_vars)

    reduced_vars &= frozenset(dims)
    if not reduced_vars:
        assert actual is x
    else:
        if reduced_vars == frozenset(dims):
            data = REDUCE_OP_TO_NUMERIC[op](data, None)
        else:
            for pos in reversed(sorted(map(dims.index, reduced_vars))):
                data = REDUCE_OP_TO_NUMERIC[op](data, pos)
        check_funsor(actual, expected_inputs, Domain((), dtype))
        assert_close(actual,
                     Tensor(data, expected_inputs, dtype),
                     atol=1e-5,
                     rtol=1e-5)
예제 #8
0
def test_reduce_all(op):
    x = Variable('x', bint(2))
    y = Variable('y', bint(3))
    z = Variable('z', bint(4))
    if op is ops.logaddexp:
        pytest.skip()

    with interpretation(sequential):
        f = x * y + z
        dtype = f.dtype
        check_funsor(f, {
            'x': bint(2),
            'y': bint(3),
            'z': bint(4)
        }, Domain((), dtype))
        actual = f.reduce(op)

    with interpretation(sequential):
        values = [
            f(x=i, y=j, z=k) for i in x.output for j in y.output
            for k in z.output
        ]
        expected = reduce(op, values)

    assert actual == expected
예제 #9
0
def test_subs_lambda():
    z = Variable('z', reals())
    i = Variable('i', bint(5))
    ix = random_tensor(OrderedDict([('i', bint(5))]), reals())
    actual = Lambda(i, z)(z=ix)
    expected = Lambda(i(i='j'), z(z=ix))
    check_funsor(actual, expected.inputs, expected.output)
    assert_close(actual, expected)
예제 #10
0
def test_slice_lambda():
    z = Variable('z', Real)
    i = Variable('i', Bint[5])
    j = Variable('j', Bint[7])
    zi = Lambda(i, z)
    zj = Lambda(j, z)
    zij = Lambda(j, zi)
    zj2 = zij[:, i]
    check_funsor(zj2, zj.inputs, zj.output)
예제 #11
0
def test_slice_lambda():
    z = Variable('z', reals())
    i = Variable('i', bint(5))
    j = Variable('j', bint(7))
    zi = Lambda(i, z)
    zj = Lambda(j, z)
    zij = Lambda(j, zi)
    zj2 = zij[:, i]
    check_funsor(zj2, zj.inputs, zj.output)
예제 #12
0
def test_binary_funsor_scalar(symbol, dims, scalar):
    sizes = {'a': 3, 'b': 4, 'c': 5}
    shape = tuple(sizes[d] for d in dims)
    inputs = OrderedDict((d, Bint[sizes[d]]) for d in dims)
    data1 = rand(shape) + 0.5
    expected_data = binary_eval(symbol, data1, scalar)

    x1 = Tensor(data1, inputs)
    actual = binary_eval(symbol, x1, scalar)
    check_funsor(actual, inputs, Real, expected_data)
예제 #13
0
def test_binary_scalar_funsor(symbol, dims, scalar):
    sizes = {'a': 3, 'b': 4, 'c': 5}
    shape = tuple(sizes[d] for d in dims)
    inputs = OrderedDict((d, bint(sizes[d])) for d in dims)
    data1 = rand(shape) + 0.5
    expected_data = binary_eval(symbol, scalar, data1)

    x1 = Tensor(data1, inputs)
    actual = binary_eval(symbol, scalar, x1)
    check_funsor(actual, inputs, reals(), expected_data)
예제 #14
0
def test_unary(symbol, data):
    dtype = 'real'
    if symbol == '~':
        data = bool(data)
        dtype = 2
    expected_data = unary_eval(symbol, data)

    x = Number(data, dtype)
    actual = unary_eval(symbol, x)
    check_funsor(actual, {}, Domain((), dtype), expected_data)
예제 #15
0
def test_sample_subs_smoke():
    x = random_tensor(OrderedDict([('i', bint(3)), ('j', bint(2))]), reals())
    with interpretation(reflect):
        z = x(i=1)
    rng_key = None if get_backend() == "torch" else np.array([0, 1],
                                                             dtype=np.uint32)
    actual = z.sample(frozenset({"j"}),
                      OrderedDict({"i": bint(4)}),
                      rng_key=rng_key)
    check_funsor(actual, {"j": bint(2), "i": bint(4)}, reals())
예제 #16
0
def test_function_matmul():
    @funsor.torch.function(reals(3, 4), reals(4, 5), reals(3, 5))
    def matmul(x, y):
        return torch.matmul(x, y)

    check_funsor(matmul, {'x': reals(3, 4), 'y': reals(4, 5)}, reals(3, 5))

    x = Tensor(torch.randn(3, 4))
    y = Tensor(torch.randn(4, 5))
    actual = matmul(x, y)
    expected_data = torch.matmul(x.data, y.data)
    check_funsor(actual, {}, reals(3, 5), expected_data)
예제 #17
0
def test_unary(symbol, data):
    dtype = 'real'
    if symbol == '~':
        data = bool(data)
        dtype = 2
    if symbol == 'atanh':
        data = min(data, 0.99)
    expected_data = unary_eval(symbol, data)

    x = Number(data, dtype)
    actual = unary_eval(symbol, x)
    check_funsor(actual, {}, Array[dtype, ()], expected_data)
예제 #18
0
def test_reduce_all(dims, op):
    sizes = {'a': 3, 'b': 4, 'c': 5}
    shape = tuple(sizes[d] for d in dims)
    inputs = OrderedDict((d, bint(sizes[d])) for d in dims)
    data = rand(shape) + 0.5
    if op in [ops.and_, ops.or_]:
        data = ops.astype(data, 'uint8')
    expected_data = REDUCE_OP_TO_NUMERIC[op](data, None)

    x = Tensor(data, inputs)
    actual = x.reduce(op)
    check_funsor(actual, {}, reals(), expected_data)
예제 #19
0
def test_stack_simple():
    x = Number(0.)
    y = Number(1.)
    z = Number(4.)

    xyz = Stack('i', (x, y, z))
    check_funsor(xyz, {'i': bint(3)}, reals())

    assert xyz(i=Number(0, 3)) is x
    assert xyz(i=Number(1, 3)) is y
    assert xyz(i=Number(2, 3)) is z
    assert xyz.reduce(ops.add, 'i') == 5.
예제 #20
0
def test_function_matmul():
    @funsor.function(Reals[3, 4], Reals[4, 5], Reals[3, 5])
    def matmul(x, y):
        return x @ y

    check_funsor(matmul, {'x': Reals[3, 4], 'y': Reals[4, 5]}, Reals[3, 5])

    x = Tensor(randn((3, 4)))
    y = Tensor(randn((4, 5)))
    actual = matmul(x, y)
    expected_data = x.data @ y.data
    check_funsor(actual, {}, Reals[3, 5], expected_data)
예제 #21
0
def test_function_matmul():
    @funsor.function(reals(3, 4), reals(4, 5), reals(3, 5))
    def matmul(x, y):
        return x @ y

    check_funsor(matmul, {'x': reals(3, 4), 'y': reals(4, 5)}, reals(3, 5))

    x = Tensor(randn((3, 4)))
    y = Tensor(randn((4, 5)))
    actual = matmul(x, y)
    expected_data = x.data @ y.data
    check_funsor(actual, {}, reals(3, 5), expected_data)
예제 #22
0
def _get_stat_diff(funsor_dist_class, sample_inputs, inputs, num_samples,
                   statistic, with_lazy, params):
    params = [Tensor(p, inputs) for p in params]
    if isinstance(with_lazy, bool):
        with interpretation(lazy if with_lazy else eager):
            funsor_dist = funsor_dist_class(*params)
    else:
        funsor_dist = funsor_dist_class(*params)

    rng_key = None if get_backend() == "torch" else np.array([0, 0],
                                                             dtype=np.uint32)
    sample_value = funsor_dist.sample(frozenset(['value']),
                                      sample_inputs,
                                      rng_key=rng_key)
    expected_inputs = OrderedDict(
        tuple(sample_inputs.items()) + tuple(inputs.items()) +
        (('value', funsor_dist.inputs['value']), ))
    check_funsor(sample_value, expected_inputs, reals())

    if sample_inputs:

        actual_mean = Integrate(sample_value,
                                Variable('value', funsor_dist.inputs['value']),
                                frozenset(['value'
                                           ])).reduce(ops.add,
                                                      frozenset(sample_inputs))

        inputs, tensors = align_tensors(
            *list(funsor_dist.params.values())[:-1])
        raw_dist = funsor_dist.dist_class(
            **dict(zip(funsor_dist._ast_fields[:-1], tensors)))
        expected_mean = Tensor(raw_dist.mean, inputs)

        if statistic == "mean":
            actual_stat, expected_stat = actual_mean, expected_mean
        elif statistic == "variance":
            actual_stat = Integrate(
                sample_value, (Variable('value', funsor_dist.inputs['value']) -
                               actual_mean)**2,
                frozenset(['value'])).reduce(ops.add, frozenset(sample_inputs))
            expected_stat = Tensor(raw_dist.variance, inputs)
        elif statistic == "entropy":
            actual_stat = -Integrate(sample_value, funsor_dist,
                                     frozenset(['value'])).reduce(
                                         ops.add, frozenset(sample_inputs))
            expected_stat = Tensor(raw_dist.entropy(), inputs)
        else:
            raise ValueError("invalid test statistic")

        diff = actual_stat.reduce(ops.add).data - expected_stat.reduce(
            ops.add).data
        return diff.sum(), diff
예제 #23
0
def test_unary(symbol, dims):
    sizes = {'a': 3, 'b': 4}
    shape = tuple(sizes[d] for d in dims)
    inputs = OrderedDict((d, bint(sizes[d])) for d in dims)
    dtype = 'real'
    data = torch.rand(shape) + 0.5
    if symbol == '~':
        data = data.byte()
        dtype = 2
    expected_data = unary_eval(symbol, data)

    x = Tensor(data, inputs, dtype)
    actual = unary_eval(symbol, x)
    check_funsor(actual, inputs, funsor.Domain((), dtype), expected_data)
예제 #24
0
def test_function_hint_matmul():
    @funsor.function
    def matmul(x: Reals[3, 4], y: Reals[4, 5]) -> Reals[3, 5]:
        return x @ y

    assert get_type_hints(matmul) == get_type_hints(matmul.fn)

    check_funsor(matmul, {'x': Reals[3, 4], 'y': Reals[4, 5]}, Reals[3, 5])

    x = Tensor(randn((3, 4)))
    y = Tensor(randn((4, 5)))
    actual = matmul(x, y)
    expected_data = x.data @ y.data
    check_funsor(actual, {}, Reals[3, 5], expected_data)
예제 #25
0
def test_sequential_sum_product_adjoint(impl, sum_op, prod_op, batch_inputs,
                                        state_domain, num_steps):
    # test mostly copied from test_sum_product.py
    inputs = OrderedDict(batch_inputs)
    inputs.update(prev=state_domain, curr=state_domain)
    inputs["time"] = bint(num_steps)
    if state_domain.dtype == "real":
        trans = random_gaussian(inputs)
    else:
        trans = random_tensor(inputs)
    time = Variable("time", bint(num_steps))

    with AdjointTape() as actual_tape:
        actual = impl(sum_op, prod_op, trans, time, {"prev": "curr"})

    expected_inputs = batch_inputs.copy()
    expected_inputs.update(prev=state_domain, curr=state_domain)
    assert dict(actual.inputs) == expected_inputs

    # Check against contract.
    operands = tuple(
        trans(time=t, prev="t_{}".format(t), curr="t_{}".format(t + 1))
        for t in range(num_steps))
    reduce_vars = frozenset("t_{}".format(t) for t in range(1, num_steps))
    with AdjointTape() as expected_tape:
        with interpretation(reflect):
            expected = sum_product(sum_op, prod_op, operands, reduce_vars)
        expected = apply_optimizer(expected)
        expected = expected(**{
            "t_0": "prev",
            "t_{}".format(num_steps): "curr"
        })
        expected = expected.align(tuple(actual.inputs.keys()))

    # check forward pass (sanity check)
    assert_close(actual, expected, rtol=5e-4 * num_steps)

    # perform backward passes only after the sanity check
    expected_bwds = expected_tape.adjoint(sum_op, prod_op, expected, operands)
    actual_bwd = actual_tape.adjoint(sum_op, prod_op, actual, (trans, ))[trans]

    # check backward pass
    for t, operand in enumerate(operands):
        actual_bwd_t = actual_bwd(time=t,
                                  prev="t_{}".format(t),
                                  curr="t_{}".format(t + 1))
        expected_bwd = expected_bwds[operand].align(
            tuple(actual_bwd_t.inputs.keys()))
        check_funsor(actual_bwd_t, expected_bwd.inputs, expected_bwd.output)
        assert_close(actual_bwd_t, expected_bwd, rtol=5e-4 * num_steps)
예제 #26
0
def test_function_lazy_matmul():
    @funsor.function(reals(3, 4), reals(4, 5), reals(3, 5))
    def matmul(x, y):
        return x @ y

    x_lazy = Variable('x', reals(3, 4))
    y = Tensor(randn((4, 5)))
    actual_lazy = matmul(x_lazy, y)
    check_funsor(actual_lazy, {'x': reals(3, 4)}, reals(3, 5))
    assert isinstance(actual_lazy, funsor.tensor.Function)

    x = Tensor(randn((3, 4)))
    actual = actual_lazy(x=x)
    expected_data = x.data @ y.data
    check_funsor(actual, {}, reals(3, 5), expected_data)
예제 #27
0
def test_binary(symbol, data1, data2):
    dtype = 'real'
    if symbol in BOOLEAN_OPS:
        dtype = 2
        data1 = bool(data1)
        data2 = bool(data2)
    try:
        expected_data = binary_eval(symbol, data1, data2)
    except ZeroDivisionError:
        return

    x1 = Number(data1, dtype)
    x2 = Number(data2, dtype)
    actual = binary_eval(symbol, x1, x2)
    check_funsor(actual, {}, Domain((), dtype), expected_data)
예제 #28
0
def test_function_lazy_matmul():
    @funsor.torch.function(reals(3, 4), reals(4, 5), reals(3, 5))
    def matmul(x, y):
        return torch.matmul(x, y)

    x_lazy = Variable('x', reals(3, 4))
    y = Tensor(torch.randn(4, 5))
    actual_lazy = matmul(x_lazy, y)
    check_funsor(actual_lazy, {'x': reals(3, 4)}, reals(3, 5))
    assert isinstance(actual_lazy, funsor.torch.Function)

    x = Tensor(torch.randn(3, 4))
    actual = actual_lazy(x=x)
    expected_data = torch.matmul(x.data, y.data)
    check_funsor(actual, {}, reals(3, 5), expected_data)
예제 #29
0
def test_variable(domain):
    x = Variable('x', domain)
    check_funsor(x, {'x': domain}, domain)
    assert Variable('x', domain) is x
    assert x('x') is x
    y = Variable('y', domain)
    assert x('y') is y
    assert x(x='y') is y
    assert x(x=y) is y
    x4 = Variable('x', bint(4))
    assert x4 is not x
    assert x4('x') is x4
    assert x(y=x4) is x

    xp1 = x + 1.
    assert xp1(x=2.) == 3.
예제 #30
0
def test_reduce_all(dims, op):
    sizes = {'a': 3, 'b': 4, 'c': 5}
    shape = tuple(sizes[d] for d in dims)
    inputs = OrderedDict((d, bint(sizes[d])) for d in dims)
    data = torch.rand(shape) + 0.5
    if op in [ops.and_, ops.or_]:
        data = data.byte()
    if op is ops.logaddexp:
        # work around missing torch.Tensor.logsumexp()
        expected_data = data.reshape(-1).logsumexp(0)
    else:
        expected_data = REDUCE_OP_TO_TORCH[op](data)

    x = Tensor(data, inputs)
    actual = x.reduce(op)
    check_funsor(actual, {}, reals(), expected_data)