def test_sequential_sum_product_bias_2(num_steps, num_sensors, dim): time = Variable("time", bint(num_steps)) bias = Variable("bias", reals(num_sensors, dim)) bias_dist = random_gaussian( OrderedDict([ ("bias", reals(num_sensors, dim)), ])) trans = random_gaussian( OrderedDict([ ("time", bint(num_steps)), ("x_prev", reals(dim)), ("x_curr", reals(dim)), ])) obs = random_gaussian( OrderedDict([ ("time", bint(num_steps)), ("x_curr", reals(dim)), ("bias", reals(dim)), ])) # Each time step only a single sensor observes x, # and each sensor has a different bias. sensor_id = Tensor(torch.arange(num_steps) % 2, OrderedDict(time=bint(num_steps)), dtype=2) with interpretation(eager_or_die): factor = trans + obs(bias=bias[sensor_id]) + bias_dist assert set(factor.inputs) == {"time", "bias", "x_prev", "x_curr"} result = sequential_sum_product(ops.logaddexp, ops.add, factor, time, {"x_prev": "x_curr"}) assert set(result.inputs) == {"bias", "x_prev", "x_curr"}
def test_add_gaussian_gaussian(lhs_inputs, rhs_inputs): lhs_inputs = OrderedDict(sorted(lhs_inputs.items())) rhs_inputs = OrderedDict(sorted(rhs_inputs.items())) inputs = lhs_inputs.copy() inputs.update(rhs_inputs) int_inputs = OrderedDict((k, d) for k, d in inputs.items() if d.dtype != 'real') real_inputs = OrderedDict((k, d) for k, d in inputs.items() if d.dtype == 'real') g1 = random_gaussian(lhs_inputs) g2 = random_gaussian(rhs_inputs) values = {name: random_tensor(int_inputs, domain) for name, domain in real_inputs.items()} assert_close((g1 + g2)(**values), g1(**values) + g2(**values), atol=1e-4, rtol=None)
def test_sequential_sum_product(impl, sum_op, prod_op, batch_inputs, state_domain, num_steps): inputs = OrderedDict(batch_inputs) inputs.update(prev=state_domain, curr=state_domain) if num_steps is None: num_steps = 1 else: inputs["time"] = bint(num_steps) if state_domain.dtype == "real": trans = random_gaussian(inputs) else: trans = random_tensor(inputs) time = Variable("time", bint(num_steps)) actual = impl(sum_op, prod_op, trans, time, {"prev": "curr"}) expected_inputs = batch_inputs.copy() expected_inputs.update(prev=state_domain, curr=state_domain) assert dict(actual.inputs) == expected_inputs # Check against contract. operands = tuple( trans(time=t, prev="t_{}".format(t), curr="t_{}".format(t + 1)) for t in range(num_steps)) reduce_vars = frozenset("t_{}".format(t) for t in range(1, num_steps)) with interpretation(reflect): expected = sum_product(sum_op, prod_op, operands, reduce_vars) expected = apply_optimizer(expected) expected = expected(**{"t_0": "prev", "t_{}".format(num_steps): "curr"}) expected = expected.align(tuple(actual.inputs.keys())) assert_close(actual, expected, rtol=5e-4 * num_steps)
def test_sarkka_bilmes_generic(time_input, global_inputs, local_inputs, num_periods): lags = { kk: reduce(max, [ len(re.search("^P*", k).group(0)) for k, v in local_inputs if k.strip("P") == kk ], 0) for kk, vv in local_inputs if not kk.startswith("P") } expected_inputs = dict(global_inputs + tuple( set(((t * "P" + k), v) for k, v in local_inputs if not k.startswith("P") for t in range(0, lags[k] + 1)))) trans_inputs = OrderedDict(global_inputs + (time_input, ) + local_inputs) global_vars = frozenset(k for k, v in global_inputs) if any(v.dtype == "real" for v in trans_inputs.values()): trans = random_gaussian(trans_inputs) else: trans = random_tensor(trans_inputs) try: _check_sarkka_bilmes(trans, expected_inputs, global_vars, num_periods) except NotImplementedError as e: partial_reasons = ('TODO handle partial windows', ) if any(reason in e.args[0] for reason in partial_reasons): pytest.xfail(reason=e.args[0]) else: raise
def test_joint_shape(sample_inputs, int_event_inputs, real_event_inputs): event_inputs = int_event_inputs + real_event_inputs discrete_inputs = OrderedDict(int_event_inputs) gaussian_inputs = OrderedDict(event_inputs) expected_inputs = OrderedDict(sample_inputs + event_inputs) sample_inputs = OrderedDict(sample_inputs) event_inputs = OrderedDict(event_inputs) t = random_tensor(discrete_inputs) g = random_gaussian(gaussian_inputs) x = t + g # Joint(discrete=t, gaussian=g) xfail = False for num_sampled in range(len(event_inputs)): for sampled_vars in itertools.combinations(list(event_inputs), num_sampled): sampled_vars = frozenset(sampled_vars) print('sampled_vars: {}'.format(', '.join(sampled_vars))) try: y = x.sample(sampled_vars, sample_inputs) except NotImplementedError: xfail = True continue if sampled_vars: assert dict(y.inputs) == dict(expected_inputs), sampled_vars else: assert y is x if xfail: pytest.xfail(reason='Not implemented')
def test_gaussian_shape(sample_inputs, batch_inputs, event_inputs): be_inputs = OrderedDict(batch_inputs + event_inputs) expected_inputs = OrderedDict(sample_inputs + batch_inputs + event_inputs) sample_inputs = OrderedDict(sample_inputs) batch_inputs = OrderedDict(batch_inputs) event_inputs = OrderedDict(event_inputs) x = random_gaussian(be_inputs) rng_key = subkey = None if get_backend() == "torch" else np.array( [0, 0], dtype=np.uint32) xfail = False for num_sampled in range(len(event_inputs) + 1): for sampled_vars in itertools.combinations(list(event_inputs), num_sampled): sampled_vars = frozenset(sampled_vars) print('sampled_vars: {}'.format(', '.join(sampled_vars))) try: if rng_key is not None: import jax rng_key, subkey = jax.random.split(rng_key) y = x.sample(sampled_vars, sample_inputs, rng_key=subkey) except NotImplementedError: xfail = True continue if num_sampled == len(event_inputs): assert isinstance(y, (Delta, Contraction)) if sampled_vars: assert dict(y.inputs) == dict(expected_inputs), sampled_vars else: assert y is x if xfail: pytest.xfail(reason='Not implemented')
def test_reduce_add(inputs): g = random_gaussian(inputs) actual = g.reduce(ops.add, 'i') gs = [g(i=i) for i in range(g.inputs['i'].dtype)] expected = reduce(ops.add, gs) assert_close(actual, expected)
def test_gaussian_shape(sample_inputs, batch_inputs, event_inputs): be_inputs = OrderedDict(batch_inputs + event_inputs) expected_inputs = OrderedDict(sample_inputs + batch_inputs + event_inputs) sample_inputs = OrderedDict(sample_inputs) batch_inputs = OrderedDict(batch_inputs) event_inputs = OrderedDict(event_inputs) x = random_gaussian(be_inputs) xfail = False for num_sampled in range(len(event_inputs) + 1): for sampled_vars in itertools.combinations(list(event_inputs), num_sampled): sampled_vars = frozenset(sampled_vars) print('sampled_vars: {}'.format(', '.join(sampled_vars))) try: y = x.sample(sampled_vars, sample_inputs) except NotImplementedError: xfail = True continue if num_sampled == len(event_inputs): assert isinstance(y, (Delta, Contraction)) if sampled_vars: assert dict(y.inputs) == dict(expected_inputs), sampled_vars else: assert y is x if xfail: pytest.xfail(reason='Not implemented')
def test_joint_shape(sample_inputs, int_event_inputs, real_event_inputs): event_inputs = int_event_inputs + real_event_inputs discrete_inputs = OrderedDict(int_event_inputs) gaussian_inputs = OrderedDict(event_inputs) expected_inputs = OrderedDict(sample_inputs + event_inputs) sample_inputs = OrderedDict(sample_inputs) event_inputs = OrderedDict(event_inputs) t = random_tensor(discrete_inputs) g = random_gaussian(gaussian_inputs) x = t + g # Joint(discrete=t, gaussian=g) rng_key = subkey = None if get_backend() == "torch" else np.array( [0, 0], dtype=np.uint32) xfail = False for num_sampled in range(len(event_inputs)): for sampled_vars in itertools.combinations(list(event_inputs), num_sampled): sampled_vars = frozenset(sampled_vars) print('sampled_vars: {}'.format(', '.join(sampled_vars))) try: if rng_key is not None: import jax rng_key, subkey = jax.random.split(rng_key) y = x.sample(sampled_vars, sample_inputs, rng_key=subkey) except NotImplementedError: xfail = True continue if sampled_vars: assert dict(y.inputs) == dict(expected_inputs), sampled_vars else: assert y is x if xfail: pytest.xfail(reason='Not implemented')
def test_gaussian_distribution(event_inputs, batch_inputs): num_samples = 100000 sample_inputs = OrderedDict(particle=bint(num_samples)) be_inputs = OrderedDict(batch_inputs + event_inputs) batch_inputs = OrderedDict(batch_inputs) event_inputs = OrderedDict(event_inputs) sampled_vars = frozenset(event_inputs) p = random_gaussian(be_inputs) rng_key = None if get_backend() == "torch" else np.array([0, 0], dtype=np.uint32) q = p.sample(sampled_vars, sample_inputs, rng_key=rng_key) p_vars = sampled_vars q_vars = sampled_vars | frozenset(['particle']) # Check zeroth moment. assert_close(q.reduce(ops.logaddexp, q_vars), p.reduce(ops.logaddexp, p_vars), atol=1e-6) for k1, d1 in event_inputs.items(): x = Variable(k1, d1) # Check first moments. assert_close(Integrate(q, x, q_vars), Integrate(p, x, p_vars), atol=0.5, rtol=0.2) for k2, d2 in event_inputs.items(): y = Variable(k2, d2) # Check second moments. continue # FIXME: Quadratic integration is not supported: assert_close(Integrate(q, x * y, q_vars), Integrate(p, x * y, p_vars), atol=1e-2)
def test_reduce_logaddexp(int_inputs, real_inputs): int_inputs = OrderedDict(sorted(int_inputs.items())) real_inputs = OrderedDict(sorted(real_inputs.items())) inputs = int_inputs.copy() inputs.update(real_inputs) t = random_tensor(int_inputs) g = random_gaussian(inputs) truth = { name: random_tensor(int_inputs, domain) for name, domain in real_inputs.items() } state = 0 state += g state += t for name, point in truth.items(): with xfail_if_not_implemented(): state += Delta(name, point) actual = state.reduce(ops.logaddexp, frozenset(truth)) expected = t + g(**truth) assert_close(actual, expected, atol=1e-5, rtol=1e-4 if get_backend() == "jax" else 1e-5)
def test_integrate_gaussian(int_inputs, real_inputs): int_inputs = OrderedDict(sorted(int_inputs.items())) real_inputs = OrderedDict(sorted(real_inputs.items())) inputs = int_inputs.copy() inputs.update(real_inputs) log_measure = random_gaussian(inputs) integrand = random_gaussian(inputs) reduced_vars = frozenset(real_inputs) with monte_carlo_interpretation(particle=bint(10000)): approx = Integrate(log_measure, integrand, reduced_vars) assert isinstance(approx, Tensor) exact = Integrate(log_measure, integrand, reduced_vars) assert isinstance(exact, Tensor) assert_close(approx, exact, atol=0.1, rtol=0.1)
def test_integrate_gaussian(int_inputs, real_inputs): int_inputs = OrderedDict(sorted(int_inputs.items())) real_inputs = OrderedDict(sorted(real_inputs.items())) inputs = int_inputs.copy() inputs.update(real_inputs) log_measure = random_gaussian(inputs) integrand = random_gaussian(inputs) reduced_vars = frozenset(real_inputs) sampled_log_measure = log_measure.sample(reduced_vars, OrderedDict(particle=bint(10000))) approx = Integrate(sampled_log_measure, integrand, reduced_vars | {'particle'}) assert isinstance(approx, Tensor) exact = Integrate(log_measure, integrand, reduced_vars) assert isinstance(exact, Tensor) assert_close(approx, exact, atol=0.1, rtol=0.1)
def test_integrate_gaussian(int_inputs, real_inputs): int_inputs = OrderedDict(sorted(int_inputs.items())) real_inputs = OrderedDict(sorted(real_inputs.items())) inputs = int_inputs.copy() inputs.update(real_inputs) log_measure = random_gaussian(inputs) integrand = random_gaussian(inputs) reduced_vars = frozenset(real_inputs) rng_key = None if get_backend() != 'jax' else np.array([0, 0], dtype=np.uint32) sampled_log_measure = log_measure.sample(reduced_vars, OrderedDict(particle=Bint[100000]), rng_key=rng_key) approx = Integrate(sampled_log_measure, integrand, reduced_vars | {'particle'}) assert isinstance(approx, Tensor) exact = Integrate(log_measure, integrand, reduced_vars) assert isinstance(exact, Tensor) assert_close(approx, exact, atol=0.1, rtol=0.1)
def test_reduce_add(inputs): int_inputs = OrderedDict((k, d) for k, d in inputs.items() if d.dtype != 'real') x = random_gaussian(inputs) + random_tensor(int_inputs) assert isinstance(x, Joint) actual = x.reduce(ops.add, 'i') xs = [x(i=i) for i in range(x.inputs['i'].dtype)] expected = reduce(ops.add, xs) assert_close(actual, expected, atol=1e-3, rtol=1e-4)
def test_align(int_inputs, real_inputs): inputs1 = OrderedDict( list(sorted(int_inputs.items())) + list(sorted(real_inputs.items()))) inputs2 = OrderedDict(reversed(inputs1.items())) g1 = random_gaussian(inputs1) g2 = g1.align(tuple(inputs2)) assert g2.inputs == inputs2 g3 = g2.align(tuple(inputs1)) assert_close(g3, g1)
def test_reduce_logsumexp(int_inputs, real_inputs): int_inputs = OrderedDict(sorted(int_inputs.items())) real_inputs = OrderedDict(sorted(real_inputs.items())) inputs = int_inputs.copy() inputs.update(real_inputs) g = random_gaussian(inputs) g_xy = g.reduce(ops.logaddexp, frozenset(['x', 'y'])) assert_close(g_xy, g.reduce(ops.logaddexp, 'x').reduce(ops.logaddexp, 'y'), atol=1e-3, rtol=None) assert_close(g_xy, g.reduce(ops.logaddexp, 'y').reduce(ops.logaddexp, 'x'), atol=1e-3, rtol=None)
def test_eager_subs_variable(): inputs = OrderedDict([('i', bint(2)), ('x', reals()), ('y', reals(2))]) g1 = random_gaussian(inputs) g2 = g1(x='z') assert set(g2.inputs) == {'i', 'y', 'z'} g2 = g1(x='y', y='x') assert set(g2.inputs) == {'i', 'x', 'y'} assert g2.inputs['x'] == reals(2)
def test_reduce_moment_matching_shape(interp): delta = Delta('x', random_tensor(OrderedDict([('h', bint(7))]))) discrete = random_tensor(OrderedDict( [('h', bint(7)), ('i', bint(6)), ('j', bint(5)), ('k', bint(4))])) gaussian = random_gaussian(OrderedDict( [('k', bint(4)), ('l', bint(3)), ('m', bint(2)), ('y', reals()), ('z', reals(2))])) reduced_vars = frozenset(['i', 'k', 'l']) joint = delta + discrete + gaussian with interpretation(interp): actual = joint.reduce(ops.logaddexp, reduced_vars) assert set(actual.inputs) == set(joint.inputs) - reduced_vars
def test_reduce_logaddexp_gaussian_lazy(): a = random_gaussian(OrderedDict(i=bint(3), a=reals(2))) b = random_tensor(OrderedDict(i=bint(3), b=bint(2))) x = a + b assert isinstance(x, Contraction) assert set(x.inputs) == {'a', 'b', 'i'} y = x.reduce(ops.logaddexp, 'i') # assert isinstance(y, Reduce) assert set(y.inputs) == {'a', 'b'} assert_close(x.reduce(ops.logaddexp), y.reduce(ops.logaddexp))
def test_eager_subs_origin(int_inputs, real_inputs): int_inputs = OrderedDict(sorted(int_inputs.items())) real_inputs = OrderedDict(sorted(real_inputs.items())) inputs = int_inputs.copy() inputs.update(real_inputs) g = random_gaussian(inputs) # Check that Gaussian log density at origin is zero. origin = {k: zeros(d.shape) for k, d in real_inputs.items()} actual = g(**origin) expected_data = zeros(tuple(d.size for d in int_inputs.values())) expected = Tensor(expected_data, int_inputs) assert_close(actual, expected)
def test_sequential_sum_product_bias_1(num_steps, dim): time = Variable("time", bint(num_steps)) bias_dist = random_gaussian(OrderedDict([ ("bias", reals(dim)), ])) trans = random_gaussian( OrderedDict([ ("time", bint(num_steps)), ("x_prev", reals(dim)), ("x_curr", reals(dim)), ])) obs = random_gaussian( OrderedDict([ ("time", bint(num_steps)), ("x_curr", reals(dim)), ("bias", reals(dim)), ])) factor = trans + obs + bias_dist assert set(factor.inputs) == {"time", "bias", "x_prev", "x_curr"} result = sequential_sum_product(ops.logaddexp, ops.add, factor, time, {"x_prev": "x_curr"}) assert set(result.inputs) == {"bias", "x_prev", "x_curr"}
def test_sequential_sum_product_adjoint(impl, sum_op, prod_op, batch_inputs, state_domain, num_steps): # test mostly copied from test_sum_product.py inputs = OrderedDict(batch_inputs) inputs.update(prev=state_domain, curr=state_domain) inputs["time"] = bint(num_steps) if state_domain.dtype == "real": trans = random_gaussian(inputs) else: trans = random_tensor(inputs) time = Variable("time", bint(num_steps)) with AdjointTape() as actual_tape: actual = impl(sum_op, prod_op, trans, time, {"prev": "curr"}) expected_inputs = batch_inputs.copy() expected_inputs.update(prev=state_domain, curr=state_domain) assert dict(actual.inputs) == expected_inputs # Check against contract. operands = tuple( trans(time=t, prev="t_{}".format(t), curr="t_{}".format(t + 1)) for t in range(num_steps)) reduce_vars = frozenset("t_{}".format(t) for t in range(1, num_steps)) with AdjointTape() as expected_tape: with interpretation(reflect): expected = sum_product(sum_op, prod_op, operands, reduce_vars) expected = apply_optimizer(expected) expected = expected(**{ "t_0": "prev", "t_{}".format(num_steps): "curr" }) expected = expected.align(tuple(actual.inputs.keys())) # check forward pass (sanity check) assert_close(actual, expected, rtol=5e-4 * num_steps) # perform backward passes only after the sanity check expected_bwds = expected_tape.adjoint(sum_op, prod_op, expected, operands) actual_bwd = actual_tape.adjoint(sum_op, prod_op, actual, (trans, ))[trans] # check backward pass for t, operand in enumerate(operands): actual_bwd_t = actual_bwd(time=t, prev="t_{}".format(t), curr="t_{}".format(t + 1)) expected_bwd = expected_bwds[operand].align( tuple(actual_bwd_t.inputs.keys())) check_funsor(actual_bwd_t, expected_bwd.inputs, expected_bwd.output) assert_close(actual_bwd_t, expected_bwd, rtol=5e-4 * num_steps)
def test_add_gaussian_number(int_inputs, real_inputs): int_inputs = OrderedDict(sorted(int_inputs.items())) real_inputs = OrderedDict(sorted(real_inputs.items())) inputs = int_inputs.copy() inputs.update(real_inputs) g = random_gaussian(inputs) n = Number(1.234) values = {name: random_tensor(int_inputs, domain) for name, domain in real_inputs.items()} assert_close((g + n)(**values), g(**values) + n, atol=1e-5, rtol=1e-5) assert_close((n + g)(**values), n + g(**values), atol=1e-5, rtol=1e-5) assert_close((g - n)(**values), g(**values) - n, atol=1e-5, rtol=1e-5)
def test_add_gaussian_tensor(int_inputs, real_inputs): int_inputs = OrderedDict(sorted(int_inputs.items())) real_inputs = OrderedDict(sorted(real_inputs.items())) inputs = int_inputs.copy() inputs.update(real_inputs) g = random_gaussian(inputs) t = random_tensor(int_inputs, reals()) values = {name: random_tensor(int_inputs, domain) for name, domain in real_inputs.items()} assert_close((g + t)(**values), g(**values) + t, atol=1e-5, rtol=1e-5) assert_close((t + g)(**values), t + g(**values), atol=1e-5, rtol=1e-5) assert_close((g - t)(**values), g(**values) - t, atol=1e-5, rtol=1e-5)
def test_sequential_sum_product_multi(impl, x_domain, y_domain, batch_inputs, num_steps): sum_op = ops.logaddexp prod_op = ops.add inputs = OrderedDict(batch_inputs) inputs.update(x_prev=x_domain, x_curr=x_domain, y_prev=y_domain, y_curr=y_domain) if num_steps is None: num_steps = 1 else: inputs["time"] = bint(num_steps) if any(v.dtype == "real" for v in inputs.values()): trans = random_gaussian(inputs) else: trans = random_tensor(inputs) time = Variable("time", bint(num_steps)) step = {"x_prev": "x_curr", "y_prev": "y_curr"} with interpretation(moment_matching): actual = impl(sum_op, prod_op, trans, time, step) expected_inputs = batch_inputs.copy() expected_inputs.update(x_prev=x_domain, x_curr=x_domain, y_prev=y_domain, y_curr=y_domain) assert dict(actual.inputs) == expected_inputs # Check against contract. operands = tuple( trans(time=t, x_prev="x_{}".format(t), x_curr="x_{}".format(t + 1), y_prev="y_{}".format(t), y_curr="y_{}".format(t + 1)) for t in range(num_steps)) reduce_vars = frozenset("x_{}".format(t) for t in range(1, num_steps)).union( "y_{}".format(t) for t in range(1, num_steps)) expected = sum_product(sum_op, prod_op, operands, reduce_vars) expected = expected( **{ "x_0": "x_prev", "x_{}".format(num_steps): "x_curr", "y_0": "y_prev", "y_{}".format(num_steps): "y_curr" }) expected = expected.align(tuple(actual.inputs.keys()))
def test_reduce_moment_matching_finite(): delta = Delta('x', random_tensor(OrderedDict([('h', bint(7))]))) discrete = random_tensor( OrderedDict([('i', bint(6)), ('j', bint(5)), ('k', bint(3))])) gaussian = random_gaussian( OrderedDict([('k', bint(3)), ('l', bint(2)), ('y', reals()), ('z', reals(2))])) discrete.data[1:, :] = -float('inf') discrete.data[:, 1:] = -float('inf') reduced_vars = frozenset(['j', 'k']) joint = delta + discrete + gaussian with interpretation(moment_matching): joint.reduce(ops.logaddexp, reduced_vars)
def test_reduce_moment_matching_shape(interp): delta = Delta('x', random_tensor(OrderedDict([('h', Bint[7])]))) discrete = random_tensor( OrderedDict([('h', Bint[7]), ('i', Bint[6]), ('j', Bint[5]), ('k', Bint[4])])) gaussian = random_gaussian( OrderedDict([('k', Bint[4]), ('l', Bint[3]), ('m', Bint[2]), ('y', Real), ('z', Reals[2])])) reduced_vars = frozenset(['i', 'k', 'l']) real_vars = frozenset(k for k, d in gaussian.inputs.items() if d.dtype == "real") joint = delta + discrete + gaussian with interpretation(interp): actual = joint.reduce(ops.logaddexp, reduced_vars) assert set(actual.inputs) == set(joint.inputs) - reduced_vars assert_close(actual.reduce(ops.logaddexp, real_vars), joint.reduce(ops.logaddexp, real_vars | reduced_vars))
def test_reduce_moment_matching_moments(): x = Variable('x', Reals[2]) gaussian = random_gaussian( OrderedDict([('i', Bint[2]), ('j', Bint[3]), ('x', Reals[2])])) with interpretation(moment_matching): approx = gaussian.reduce(ops.logaddexp, 'j') with interpretation(MonteCarlo(s=Bint[100000])): actual = Integrate(approx, Number(1.), 'x') expected = Integrate(gaussian, Number(1.), {'j', 'x'}) assert_close(actual, expected, atol=1e-3, rtol=1e-3) actual = Integrate(approx, x, 'x') expected = Integrate(gaussian, x, {'j', 'x'}) assert_close(actual, expected, atol=1e-2, rtol=1e-2) actual = Integrate(approx, x * x, 'x') expected = Integrate(gaussian, x * x, {'j', 'x'}) assert_close(actual, expected, atol=1e-2, rtol=1e-2)
def test_reduce_moment_matching_moments(): x = Variable('x', reals(2)) gaussian = random_gaussian( OrderedDict([('i', bint(2)), ('j', bint(3)), ('x', reals(2))])) with interpretation(moment_matching): approx = gaussian.reduce(ops.logaddexp, 'j') with monte_carlo_interpretation(s=bint(100000)): actual = Integrate(approx, Number(1.), 'x') expected = Integrate(gaussian, Number(1.), {'j', 'x'}) assert_close(actual, expected, atol=1e-3, rtol=1e-3) actual = Integrate(approx, x, 'x') expected = Integrate(gaussian, x, {'j', 'x'}) assert_close(actual, expected, atol=1e-2, rtol=1e-2) actual = Integrate(approx, x * x, 'x') expected = Integrate(gaussian, x * x, {'j', 'x'}) assert_close(actual, expected, atol=1e-2, rtol=1e-2)