Пример #1
0
def eager_negate_variable(op, var):
    if var.dtype != "real":
        return None

    const = Number(0.)
    coeffs = ((var, Number(-1, "real")), )
    return Affine(const, coeffs)
Пример #2
0
def model(size, position=0):
    if size == 1:
        name = str(position)
        return Uniform((Delta(name, Number(0, 2)), Delta(name, Number(1, 2))))
    return Uniform(
        model(t, position) + model(size - t, t + position)
        for t in range(1, size))
Пример #3
0
def test_reduce_syntactic_sugar():
    x = Stack("i", (Number(1), Number(2), Number(3)))
    expected = Number(1 + 2 + 3)
    assert x.reduce(ops.add) is expected
    assert x.reduce(ops.add, "i") is expected
    assert x.reduce(ops.add, {"i"}) is expected
    assert x.reduce(ops.add, frozenset(["i"])) is expected
Пример #4
0
def eager_multinomial(total_count, probs, value):
    # Multinomial.log_prob() supports inhomogeneous total_count only by
    # avoiding passing total_count to the constructor.
    inputs, (total_count, probs, value) = align_tensors(total_count, probs, value)
    shape = broadcast_shape(total_count.shape + (1,), probs.shape, value.shape)
    probs = Tensor(probs.expand(shape), inputs)
    value = Tensor(value.expand(shape), inputs)
    total_count = Number(total_count.max().item())  # Used by distributions validation code.
    return Multinomial.eager_log_prob(total_count=total_count, probs=probs, value=value)
Пример #5
0
def test_reduce_syntactic_sugar():
    i = Variable("i", Bint[3])
    x = Stack("i", (Number(1), Number(2), Number(3)))
    expected = Number(1 + 2 + 3)
    assert x.reduce(ops.add) is expected
    assert x.reduce(ops.add, "i") is expected
    assert x.reduce(ops.add, {"i"}) is expected
    assert x.reduce(ops.add, frozenset(["i"])) is expected
    assert x.reduce(ops.add, i) is expected
    assert x.reduce(ops.add, {i}) is expected
    assert x.reduce(ops.add, frozenset([i])) is expected
Пример #6
0
def test_unify_binary():
    with interpretation(lazy):
        pattern = Variable('a', reals()) + Number(2.) * Variable('b', reals())
        expr = Number(1.) + Number(2.) * (Number(3.) - Number(4.))

    subs = unify(pattern, expr)
    print(subs, pattern(**{k.name: v for k, v in subs.items()}))
    assert subs is not False

    with interpretation(unify_interpreter):
        assert unify((pattern, ), (expr, )) is not False
Пример #7
0
def eager_binary(op, lhs, rhs):
    if lhs.dtype != "real" or rhs.dtype != "real":
        return None

    if op is ops.add:
        const = Number(0.)
        coeffs = ((lhs, Number(1.)), (rhs, Number(1.)))
        return Affine(const, coeffs)
    elif op is ops.sub:
        return lhs + -rhs
    return None
Пример #8
0
def test_slice():
    t_slice = Slice("t", 10)

    s_slice = t_slice(t="s")
    assert isinstance(s_slice, Slice)
    assert s_slice.slice == t_slice.slice
    assert s_slice(s="t") is t_slice

    assert t_slice(t=0) is Number(0, 10)
    assert t_slice(t=1) is Number(1, 10)
    assert t_slice(t=2) is Number(2, 10)
    assert t_slice(t=t_slice) is t_slice
Пример #9
0
def test_binary(symbol, data1, data2):
    dtype = 'real'
    if symbol in BOOLEAN_OPS:
        dtype = 2
        data1 = bool(data1)
        data2 = bool(data2)
    try:
        expected_data = binary_eval(symbol, data1, data2)
    except ZeroDivisionError:
        return

    x1 = Number(data1, dtype)
    x2 = Number(data2, dtype)
    actual = binary_eval(symbol, x1, x2)
    check_funsor(actual, {}, Domain((), dtype), expected_data)
Пример #10
0
    def eager_reduce(self, op, reduced_vars):
        if op is ops.logaddexp:
            if reduced_vars - self.fresh and self.fresh - reduced_vars:
                result = self.eager_reduce(op, reduced_vars & self.fresh) if reduced_vars & self.fresh else self
                if result is not self:
                    result = result.eager_reduce(op, reduced_vars - self.fresh) if reduced_vars - self.fresh else self
                    return result if result is not self else None
                return None

            result_terms = [(name, (point, log_density)) for name, (point, log_density) in self.terms
                            if name not in reduced_vars]

            result_terms, scale = [], Number(0)
            for name, (point, log_density) in self.terms:
                if name in reduced_vars:
                    # XXX obscenely wasteful - need a lazy Zero term
                    if point.inputs:
                        scale += (point == point).all().log()
                    if log_density.inputs:
                        scale += log_density * 0.
                else:
                    result_terms.append((name, (point, log_density)))

            result = Delta(tuple(result_terms)) + scale if result_terms else scale
            return result.reduce(op, reduced_vars - self.fresh)

        if op is ops.add:
            raise NotImplementedError("TODO Implement ops.add to simulate .to_event().")

        return None  # defer to default implementation
Пример #11
0
    def eager_subs(self, subs):
        terms = OrderedDict(self.terms)
        new_terms = terms.copy()
        log_density = Number(0)
        for name, value in subs:
            if isinstance(value, Variable):
                new_terms[value.name] = new_terms.pop(name)
                continue

            if not any(d.dtype == 'real' for side in (value, terms[name][0])
                       for d in side.inputs.values()):
                point, point_log_density = new_terms.pop(name)
                log_density += (value == point).all().log() + point_log_density
                continue

            # Try to invert the substitution.
            soln = solve(value, terms[name][0])
            if soln is None:
                return None  # lazily substitute
            new_name, new_point, point_log_density = soln
            old_point, old_point_density = new_terms.pop(name)
            new_terms[new_name] = (new_point,
                                   old_point_density + point_log_density)

        return Delta(tuple(
            new_terms.items())) + log_density if new_terms else log_density
Пример #12
0
def test_smoke(expr, expected_type):
    g1 = Gaussian(info_vec=numeric_array([[0.0, 0.1, 0.2], [2.0, 3.0, 4.0]]),
                  precision=numeric_array([[[1.0, 0.1, 0.2], [0.1, 1.0, 0.3],
                                            [0.2, 0.3, 1.0]],
                                           [[1.0, 0.1, 0.2], [0.1, 1.0, 0.3],
                                            [0.2, 0.3, 1.0]]]),
                  inputs=OrderedDict([('i', bint(2)), ('x', reals(3))]))
    assert isinstance(g1, Gaussian)

    g2 = Gaussian(info_vec=numeric_array([[0.0, 0.1], [2.0, 3.0]]),
                  precision=numeric_array([[[1.0, 0.2], [0.2, 1.0]],
                                           [[1.0, 0.2], [0.2, 1.0]]]),
                  inputs=OrderedDict([('i', bint(2)), ('y', reals(2))]))
    assert isinstance(g2, Gaussian)

    shift = Tensor(numeric_array([-1., 1.]), OrderedDict([('i', bint(2))]))
    assert isinstance(shift, Tensor)

    i0 = Number(1, 2)
    assert isinstance(i0, Number)

    x0 = Tensor(numeric_array([0.5, 0.6, 0.7]))
    assert isinstance(x0, Tensor)

    y0 = Tensor(numeric_array([[0.2, 0.3], [0.8, 0.9]]),
                inputs=OrderedDict([('i', bint(2))]))
    assert isinstance(y0, Tensor)

    result = eval(expr)
    assert isinstance(result, expected_type)
Пример #13
0
def test_smoke(expr, expected_type):
    dx = Delta('x', Tensor(torch.randn(2, 3), OrderedDict([('i', bint(2))])))
    assert isinstance(dx, Delta)

    dy = Delta('y', Tensor(torch.randn(3, 4), OrderedDict([('j', bint(3))])))
    assert isinstance(dy, Delta)

    t = Tensor(torch.randn(2, 3), OrderedDict([('i', bint(2)),
                                               ('j', bint(3))]))
    assert isinstance(t, Tensor)

    g = Gaussian(info_vec=torch.tensor([[0.0, 0.1, 0.2], [2.0, 3.0, 4.0]]),
                 precision=torch.tensor([[[1.0, 0.1, 0.2], [0.1, 1.0, 0.3],
                                          [0.2, 0.3, 1.0]],
                                         [[1.0, 0.1, 0.2], [0.1, 1.0, 0.3],
                                          [0.2, 0.3, 1.0]]]),
                 inputs=OrderedDict([('i', bint(2)), ('x', reals(3))]))
    assert isinstance(g, Gaussian)

    i0 = Number(1, 2)
    assert isinstance(i0, Number)

    x0 = Tensor(torch.tensor([0.5, 0.6, 0.7]))
    assert isinstance(x0, Tensor)

    result = eval(expr)
    assert isinstance(result, expected_type)
Пример #14
0
    def eager_reduce(self, op, reduced_vars):
        if op is ops.logaddexp:
            if self.name in reduced_vars:
                return Number(0)  # Deltas are normalized.

        # TODO Implement ops.add to simulate .to_event().

        return None  # defer to default implementation
Пример #15
0
def eager_binary(op, var, other):
    if var.dtype != "real" or other.dtype != "real":
        return None

    if op is ops.add:
        const = other
        coeffs = ((var, Number(1.)), )
        return Affine(const, coeffs)
    elif op is ops.mul:
        const = Number(0.)
        coeffs = ((var, other), )
        return Affine(const, coeffs)
    elif op is ops.sub:
        return var + -other
    elif op is ops.truediv:
        return var * (1. / other)
    return None
Пример #16
0
 def __call__(cls, *args):
     if len(args) > 1:
         assert len(args) == 2 or len(args) == 3
         assert isinstance(args[0], str) and isinstance(args[1], Funsor)
         args = args + (Number(0.), ) if len(args) == 2 else args
         args = (((args[0], (to_funsor(args[1]), to_funsor(args[2]))), ), )
     assert isinstance(args[0], tuple)
     return super().__call__(args[0])
Пример #17
0
    def eager_reduce(self, op, reduced_vars):
        if op is ops.logaddexp:
            # Keep mixture parameters lazy.
            mixture_vars = frozenset(k
                                     for k, d in self.gaussian.inputs.items()
                                     if d.dtype != 'real')
            mixture_vars = mixture_vars.union(*(x.point.inputs
                                                for x in self.deltas))
            lazy_vars = reduced_vars & mixture_vars
            reduced_vars -= lazy_vars

            # Integrate out degenerate variables, i.e. drop selected delta.
            deltas = []
            remaining_vars = set(reduced_vars)
            for d in self.deltas:
                if d.name in reduced_vars:
                    remaining_vars.remove(d.name)
                else:
                    deltas.append(d)
            deltas = tuple(deltas)
            reduced_vars = frozenset(remaining_vars)

            # Integrate out delayed discrete variables.
            discrete_vars = reduced_vars.intersection(self.discrete.inputs)
            discrete = self.discrete.reduce(op, discrete_vars)
            reduced_vars -= discrete_vars

            # Integrate out delayed gaussian variables.
            gaussian_vars = reduced_vars.intersection(self.gaussian.inputs)
            gaussian = self.gaussian.reduce(ops.logaddexp, gaussian_vars)
            reduced_vars -= gaussian_vars

            # Scale to account for remaining reduced_vars that were inputs to dropped deltas.
            eager_result = Joint(deltas, discrete)
            if gaussian is not Number(0):
                eager_result += gaussian
            reduced_vars |= lazy_vars.difference(eager_result.inputs)
            lazy_vars = lazy_vars.intersection(eager_result.inputs)
            if reduced_vars:
                eager_result += ops.log(
                    reduce(ops.mul,
                           [self.inputs[v].dtype for v in reduced_vars]))

            # Return a value only if progress has been made.
            if eager_result is self:
                return None  # defer to default implementation
            else:
                return eager_result.reduce(ops.logaddexp, lazy_vars)

        if op is ops.add:
            terms = list(self.deltas) + [self.discrete, self.gaussian]
            for i, term in enumerate(terms):
                terms[i] = term.reduce(ops.add,
                                       reduced_vars.intersection(term.inputs))
            return reduce(ops.add, terms)

        return None  # defer to default implementation
Пример #18
0
def test_reduce_moment_matching_moments():
    x = Variable('x', Reals[2])
    gaussian = random_gaussian(
        OrderedDict([('i', Bint[2]), ('j', Bint[3]), ('x', Reals[2])]))
    with interpretation(moment_matching):
        approx = gaussian.reduce(ops.logaddexp, 'j')
    with interpretation(MonteCarlo(s=Bint[100000])):
        actual = Integrate(approx, Number(1.), 'x')
        expected = Integrate(gaussian, Number(1.), {'j', 'x'})
        assert_close(actual, expected, atol=1e-3, rtol=1e-3)

        actual = Integrate(approx, x, 'x')
        expected = Integrate(gaussian, x, {'j', 'x'})
        assert_close(actual, expected, atol=1e-2, rtol=1e-2)

        actual = Integrate(approx, x * x, 'x')
        expected = Integrate(gaussian, x * x, {'j', 'x'})
        assert_close(actual, expected, atol=1e-2, rtol=1e-2)
Пример #19
0
def test_binary(symbol, data1, data2):
    dtype = 'real'
    if symbol in BOOLEAN_OPS:
        dtype = 2
        data1 = bool(data1)
        data2 = bool(data2)
    try:
        expected_data = binary_eval(symbol, data1, data2)
    except ZeroDivisionError:
        return

    x1 = Number(data1, dtype)
    x2 = Number(data2, dtype)
    actual = binary_eval(symbol, x1, x2)
    check_funsor(actual, {}, Domain((), dtype), expected_data)
    with interpretation(normalize):
        actual_reflect = binary_eval(symbol, x1, x2)
    assert actual.output == actual_reflect.output
Пример #20
0
def test_reduce_moment_matching_moments():
    x = Variable('x', reals(2))
    gaussian = random_gaussian(
        OrderedDict([('i', bint(2)), ('j', bint(3)), ('x', reals(2))]))
    with interpretation(moment_matching):
        approx = gaussian.reduce(ops.logaddexp, 'j')
    with monte_carlo_interpretation(s=bint(100000)):
        actual = Integrate(approx, Number(1.), 'x')
        expected = Integrate(gaussian, Number(1.), {'j', 'x'})
        assert_close(actual, expected, atol=1e-3, rtol=1e-3)

        actual = Integrate(approx, x, 'x')
        expected = Integrate(gaussian, x, {'j', 'x'})
        assert_close(actual, expected, atol=1e-2, rtol=1e-2)

        actual = Integrate(approx, x * x, 'x')
        expected = Integrate(gaussian, x * x, {'j', 'x'})
        assert_close(actual, expected, atol=1e-2, rtol=1e-2)
Пример #21
0
def sum_product(sum_op, prod_op, factors, eliminate=frozenset(), plates=frozenset()):
    """
    Performs sum-product contraction of a collection of factors.

    :return: a single contracted Funsor.
    :rtype: :class:`~funsor.terms.Funsor`
    """
    factors = partial_sum_product(sum_op, prod_op, factors, eliminate, plates)
    return reduce(prod_op, factors, Number(UNITS[prod_op]))
Пример #22
0
def test_advanced_indexing_lazy(output_shape):
    x = Tensor(randn((2, 3, 4) + output_shape),
               OrderedDict([
                   ('i', bint(2)),
                   ('j', bint(3)),
                   ('k', bint(4)),
               ]))
    u = Variable('u', bint(2))
    v = Variable('v', bint(3))
    with interpretation(lazy):
        i = Number(1, 2) - u
        j = Number(2, 3) - v
        k = u + v

    expected_data = empty((2, 3) + output_shape)
    i_data = x.materialize(i).data
    j_data = x.materialize(j).data
    k_data = x.materialize(k).data
    for u in range(2):
        for v in range(3):
            expected_data[u, v] = x.data[i_data[u], j_data[v], k_data[u, v]]
    expected = Tensor(expected_data,
                      OrderedDict([
                          ('u', bint(2)),
                          ('v', bint(3)),
                      ]))

    assert_equiv(expected, x(i, j, k))
    assert_equiv(expected, x(i=i, j=j, k=k))

    assert_equiv(expected, x(i=i, j=j)(k=k))
    assert_equiv(expected, x(j=j, k=k)(i=i))
    assert_equiv(expected, x(k=k, i=i)(j=j))

    assert_equiv(expected, x(i=i)(j=j, k=k))
    assert_equiv(expected, x(j=j)(k=k, i=i))
    assert_equiv(expected, x(k=k)(i=i, j=j))

    assert_equiv(expected, x(i=i)(j=j)(k=k))
    assert_equiv(expected, x(i=i)(k=k)(j=j))
    assert_equiv(expected, x(j=j)(i=i)(k=k))
    assert_equiv(expected, x(j=j)(k=k)(i=i))
    assert_equiv(expected, x(k=k)(i=i)(j=j))
    assert_equiv(expected, x(k=k)(j=j)(i=i))
Пример #23
0
def test_match_binary():
    with interpretation(lazy):
        pattern = Variable('a', reals()) + Number(2.) * Variable('b', reals())
        expr = Number(1.) + Number(2.) * (Number(3.) - Number(4.))

    @match_vars(pattern)
    def expand_2_vars(a, b):
        return a + b + b

    @match(pattern)
    def expand_2_walk(x):
        return x.lhs + x.rhs.rhs + x.rhs.rhs

    eager_val = reinterpret(expr)
    lazy_val = expand_2_vars(expr)
    assert eager_val == reinterpret(lazy_val)

    lazy_val_2 = expand_2_walk(expr)
    assert eager_val == reinterpret(lazy_val_2)
Пример #24
0
def test_unary(symbol, data):
    dtype = 'real'
    if symbol == '~':
        data = bool(data)
        dtype = 2
    expected_data = unary_eval(symbol, data)

    x = Number(data, dtype)
    actual = unary_eval(symbol, x)
    check_funsor(actual, {}, Domain((), dtype), expected_data)
Пример #25
0
def eager_add(op, joint, other):
    # Update with a delayed gaussian random variable.
    subs = tuple(
        (d.name, d.point) for d in joint.deltas if d.name in other.inputs)
    if subs:
        other = Subs(other, subs)
    if joint.gaussian is not Number(0):
        other = joint.gaussian + other
    if not isinstance(other, Gaussian):
        return Joint(joint.deltas, joint.discrete) + other
    return Joint(joint.deltas, joint.discrete, other)
Пример #26
0
def test_unary(symbol, data):
    dtype = 'real'
    if symbol == '~':
        data = bool(data)
        dtype = 2
    if symbol == 'atanh':
        data = min(data, 0.99)
    expected_data = unary_eval(symbol, data)

    x = Number(data, dtype)
    actual = unary_eval(symbol, x)
    check_funsor(actual, {}, Array[dtype, ()], expected_data)
Пример #27
0
def eager_joint(deltas, discrete, gaussian):

    if not isinstance(gaussian, (Number, Tensor, Gaussian)):
        return Joint(deltas, discrete) + gaussian

    if any(not isinstance(d, Delta) for d in deltas):
        new_deltas = []
        for d in deltas:
            if isinstance(d, Delta):
                new_deltas.append(d)
            elif isinstance(d, (Number, Tensor)):
                discrete += d
            else:
                raise ValueError("Invalid component for Joint: {}".format(d))
        return Joint(tuple(new_deltas), discrete) + gaussian

    if isinstance(gaussian, (Number, Tensor)) and gaussian is not Number(0):
        discrete += gaussian
        return Joint(deltas, discrete, Number(0))

    # Demote a Joint to a simpler elementary funsor.
    if not deltas:
        if gaussian is Number(0):
            return discrete
        elif discrete is Number(0):
            return gaussian
    elif len(deltas) == 1:
        if discrete is Number(0) and gaussian is Number(0):
            return deltas[0]

    return None  # defer to default implementation
Пример #28
0
def test_cons_hash():
    assert Variable('x', bint(3)) is Variable('x', bint(3))
    assert Variable('x', reals()) is Variable('x', reals())
    assert Variable('x', reals()) is not Variable('x', bint(3))
    assert Number(0, 3) is Number(0, 3)
    assert Number(0.) is Number(0.)
    assert Number(0.) is not Number(0, 3)
Пример #29
0
def adjoint(expr, targets, start=Number(0.)):

    adjoint_values = defaultdict(lambda: Number(0.))  # 1 in logspace
    multiplicities = defaultdict(lambda: 0)

    tape_recorder = AdjointTape()
    with interpretation(tape_recorder):
        adjoint_values[reinterpret(expr)] = start

    while tape_recorder.tape:
        output, fn, inputs = tape_recorder.tape.pop()
        in_adjs = adjoint_ops(fn, adjoint_values[output], output, *inputs)
        for v, adjv in in_adjs.items():
            multiplicities[v] += 1
            adjoint_values[v] = adjoint_values[v] + adjv  # product in logspace

    target_adjs = {}
    for v in targets:
        target_adjs[v] = adjoint_values[v] / multiplicities[v]
        if not isinstance(v, Variable):
            target_adjs[v] = target_adjs[v] + v
    return target_adjs
Пример #30
0
def eager_binary_affine_variable(op, affine, other):
    if op is ops.add:
        const = affine.const
        coeffs = affine.coeffs.copy()
        if other in affine.inputs:
            coeffs[other] += 1
        else:
            coeffs[other] = Number(1.)
        return Affine(const, tuple(coeffs.items()))

    if op is ops.sub:
        return affine + -other

    return None