def test_cat_simple(output): x = random_tensor(OrderedDict([ ('i', bint(2)), ]), output) y = random_tensor(OrderedDict([ ('i', bint(3)), ('j', bint(4)), ]), output) z = random_tensor(OrderedDict([ ('i', bint(5)), ('k', bint(6)), ]), output) assert Cat('i', (x, )) is x assert Cat('i', (y, )) is y assert Cat('i', (z, )) is z xy = Cat('i', (x, y)) assert isinstance(xy, Tensor) assert xy.inputs == OrderedDict([ ('i', bint(2 + 3)), ('j', bint(4)), ]) assert xy.output == output xyz = Cat('i', (x, y, z)) assert isinstance(xyz, Tensor) assert xyz.inputs == OrderedDict([ ('i', bint(2 + 3 + 5)), ('j', bint(4)), ('k', bint(6)), ]) assert xy.output == output
def test_binomial_density(batch_shape, eager): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape)) max_count = 10 @funsor.torch.function(reals(), reals(), reals(), reals()) def binomial(total_count, probs, value): return torch.distributions.Binomial(total_count, probs).log_prob(value) check_funsor(binomial, { 'total_count': reals(), 'probs': reals(), 'value': reals() }, reals()) value_data = random_tensor(inputs, bint(max_count)).data.float() total_count_data = value_data + random_tensor( inputs, bint(max_count)).data.float() value = Tensor(value_data, inputs) total_count = Tensor(total_count_data, inputs) probs = Tensor(torch.rand(batch_shape), inputs) expected = binomial(total_count, probs, value) check_funsor(expected, inputs, reals()) m = Variable('value', reals()) actual = dist.Binomial(total_count, probs, value) if eager else \ dist.Binomial(total_count, probs, m)(value=value) check_funsor(actual, inputs, reals()) assert_close(actual, expected)
def test_cat_simple(output): x = random_tensor(OrderedDict([ ('i', Bint[2]), ]), output) y = random_tensor(OrderedDict([ ('i', Bint[3]), ('j', Bint[4]), ]), output) z = random_tensor(OrderedDict([ ('i', Bint[5]), ('k', Bint[6]), ]), output) assert Cat('i', (x, )) is x assert Cat('i', (y, )) is y assert Cat('i', (z, )) is z xy = Cat('i', (x, y)) assert isinstance(xy, Tensor) assert xy.inputs == OrderedDict([ ('i', Bint[2 + 3]), ('j', Bint[4]), ]) assert xy.output == output xyz = Cat('i', (x, y, z)) assert isinstance(xyz, Tensor) assert xyz.inputs == OrderedDict([ ('i', Bint[2 + 3 + 5]), ('j', Bint[4]), ('k', Bint[6]), ]) assert xy.output == output
def test_reduce_logaddexp(int_inputs, real_inputs): int_inputs = OrderedDict(sorted(int_inputs.items())) real_inputs = OrderedDict(sorted(real_inputs.items())) inputs = int_inputs.copy() inputs.update(real_inputs) t = random_tensor(int_inputs) g = random_gaussian(inputs) truth = { name: random_tensor(int_inputs, domain) for name, domain in real_inputs.items() } state = 0 state += g state += t for name, point in truth.items(): with xfail_if_not_implemented(): state += Delta(name, point) actual = state.reduce(ops.logaddexp, frozenset(truth)) expected = t + g(**truth) assert_close(actual, expected, atol=1e-5, rtol=1e-4 if get_backend() == "jax" else 1e-5)
def test_independent(): f = Variable('x_i', reals(4, 5)) + random_tensor(OrderedDict(i=bint(3))) assert f.inputs['x_i'] == reals(4, 5) assert f.inputs['i'] == bint(3) actual = Independent(f, 'x', 'i', 'x_i') assert actual.inputs['x'] == reals(3, 4, 5) assert 'i' not in actual.inputs x = Variable('x', reals(3, 4, 5)) expected = f(x_i=x['i']).reduce(ops.add, 'i') assert actual.inputs == expected.inputs assert actual.output == expected.output data = random_tensor(OrderedDict(), x.output) assert_close(actual(data), expected(data), atol=1e-5, rtol=1e-5) renamed = actual(x='y') assert isinstance(renamed, Independent) assert_close(renamed(y=data), expected(x=data), atol=1e-5, rtol=1e-5) # Ensure it's ok for .reals_var and .diag_var to be the same. renamed = actual(x='x_i') assert isinstance(renamed, Independent) assert_close(renamed(x_i=data), expected(x=data), atol=1e-5, rtol=1e-5)
def test_subs_reduce(): x = random_tensor(OrderedDict([('i', bint(3)), ('j', bint(2))]), reals()) ix = random_tensor(OrderedDict([('i', bint(3))]), bint(2)) ix2 = ix(i='i2') with interpretation(reflect): actual = x.reduce(ops.add, frozenset({"i"})) actual = actual(j=ix) expected = x(j=ix2).reduce(ops.add, frozenset({"i"}))(i2='i') assert_close(actual, expected)
def test_normal_independent(): loc = random_tensor(OrderedDict(), reals(2)) scale = random_tensor(OrderedDict(), reals(2)).exp() fn = dist.Normal(loc['i'], scale['i'], value='z_i') assert fn.inputs['z_i'] == reals() d = Independent(fn, 'z', 'i', 'z_i') assert d.inputs['z'] == reals(2) sample = d.sample(frozenset(['z'])) assert isinstance(sample, Contraction) assert sample.inputs['z'] == reals(2)
def test_normal_independent(): loc = random_tensor(OrderedDict(), Reals[2]) scale = ops.exp(random_tensor(OrderedDict(), Reals[2])) fn = dist.Normal(loc['i'], scale['i'], value='z_i') assert fn.inputs['z_i'] == Real d = Independent(fn, 'z', 'i', 'z_i') assert d.inputs['z'] == Reals[2] rng_key = None if get_backend() == "torch" else np.array([0, 0], dtype=np.uint32) sample = d.sample(frozenset(['z']), rng_key=rng_key) assert isinstance(sample, Contraction) assert sample.inputs['z'] == Reals[2]
def test_reduce_moment_matching_shape(interp): delta = Delta('x', random_tensor(OrderedDict([('h', bint(7))]))) discrete = random_tensor(OrderedDict( [('h', bint(7)), ('i', bint(6)), ('j', bint(5)), ('k', bint(4))])) gaussian = random_gaussian(OrderedDict( [('k', bint(4)), ('l', bint(3)), ('m', bint(2)), ('y', reals()), ('z', reals(2))])) reduced_vars = frozenset(['i', 'k', 'l']) joint = delta + discrete + gaussian with interpretation(interp): actual = joint.reduce(ops.logaddexp, reduced_vars) assert set(actual.inputs) == set(joint.inputs) - reduced_vars
def test_syntactic_sugar(): i = Variable("i", bint(3)) log_measure = random_tensor(OrderedDict(i=bint(3))) integrand = random_tensor(OrderedDict(i=bint(3))) expected = (log_measure.exp() * integrand).reduce(ops.add, "i") assert_close(Integrate(log_measure, integrand, "i"), expected) assert_close(Integrate(log_measure, integrand, {"i"}), expected) assert_close(Integrate(log_measure, integrand, frozenset(["i"])), expected) assert_close(Integrate(log_measure, integrand, i), expected) assert_close(Integrate(log_measure, integrand, {i}), expected) assert_close(Integrate(log_measure, integrand, frozenset([i])), expected)
def test_advanced_indexing_tensor(output_shape): # u v # / \ / \ # i j k # \ | / # \ | / # x output = reals(*output_shape) x = random_tensor( OrderedDict([ ('i', bint(2)), ('j', bint(3)), ('k', bint(4)), ]), output) i = random_tensor(OrderedDict([ ('u', bint(5)), ]), bint(2)) j = random_tensor(OrderedDict([ ('v', bint(6)), ('u', bint(5)), ]), bint(3)) k = random_tensor(OrderedDict([ ('v', bint(6)), ]), bint(4)) expected_data = empty((5, 6) + output_shape) for u in range(5): for v in range(6): expected_data[u, v] = x.data[i.data[u], j.data[v, u], k.data[v]] expected = Tensor(expected_data, OrderedDict([ ('u', bint(5)), ('v', bint(6)), ])) assert_equiv(expected, x(i, j, k)) assert_equiv(expected, x(i=i, j=j, k=k)) assert_equiv(expected, x(i=i, j=j)(k=k)) assert_equiv(expected, x(j=j, k=k)(i=i)) assert_equiv(expected, x(k=k, i=i)(j=j)) assert_equiv(expected, x(i=i)(j=j, k=k)) assert_equiv(expected, x(j=j)(k=k, i=i)) assert_equiv(expected, x(k=k)(i=i, j=j)) assert_equiv(expected, x(i=i)(j=j)(k=k)) assert_equiv(expected, x(i=i)(k=k)(j=j)) assert_equiv(expected, x(j=j)(i=i)(k=k)) assert_equiv(expected, x(j=j)(k=k)(i=i)) assert_equiv(expected, x(k=k)(i=i)(j=j)) assert_equiv(expected, x(k=k)(j=j)(i=i))
def test_matmul(inputs1, inputs2, output_shape1, output_shape2): sizes = {'a': 6, 'b': 7, 'c': 8} inputs1 = OrderedDict((k, bint(sizes[k])) for k in inputs1) inputs2 = OrderedDict((k, bint(sizes[k])) for k in inputs2) x1 = random_tensor(inputs1, reals(*output_shape1)) x2 = random_tensor(inputs1, reals(*output_shape2)) actual = x1 @ x2 assert actual.output == find_domain(ops.matmul, x1.output, x2.output) block = {'a': 1, 'b': 2, 'c': 3} actual_block = actual(**block) expected_block = Tensor(x1(**block).data @ x2(**block).data) assert_close(actual_block, expected_block, atol=1e-5, rtol=1e-5)
def test_add_gaussian_tensor(int_inputs, real_inputs): int_inputs = OrderedDict(sorted(int_inputs.items())) real_inputs = OrderedDict(sorted(real_inputs.items())) inputs = int_inputs.copy() inputs.update(real_inputs) g = random_gaussian(inputs) t = random_tensor(int_inputs, reals()) values = {name: random_tensor(int_inputs, domain) for name, domain in real_inputs.items()} assert_close((g + t)(**values), g(**values) + t, atol=1e-5, rtol=1e-5) assert_close((t + g)(**values), t + g(**values), atol=1e-5, rtol=1e-5) assert_close((g - t)(**values), g(**values) - t, atol=1e-5, rtol=1e-5)
def test_binary_broadcast(inputs1, inputs2, output_shape1, output_shape2): sizes = {'a': 4, 'b': 5, 'c': 6} inputs1 = OrderedDict((k, bint(sizes[k])) for k in inputs1) inputs2 = OrderedDict((k, bint(sizes[k])) for k in inputs2) x1 = random_tensor(inputs1, reals(*output_shape1)) x2 = random_tensor(inputs1, reals(*output_shape2)) actual = x1 + x2 assert actual.output == find_domain(ops.add, x1.output, x2.output) block = {'a': 1, 'b': 2, 'c': 3} actual_block = actual(**block) expected_block = Tensor(x1(**block).data + x2(**block).data) assert_close(actual_block, expected_block)
def test_lognormal_distribution(moment): num_samples = 100000 inputs = OrderedDict(batch=bint(10)) loc = random_tensor(inputs) scale = random_tensor(inputs).exp() log_measure = dist.LogNormal(loc, scale)(value='x') probe = Variable('x', reals())**moment with monte_carlo_interpretation(particle=bint(num_samples)): with xfail_if_not_implemented(): actual = Integrate(log_measure, probe, frozenset(['x'])) samples = backend_dist.LogNormal(loc, scale).sample((num_samples, )) expected = (samples**moment).mean(0) assert_close(actual.data, expected, atol=1e-2, rtol=1e-2)
def test_reduce_moment_matching_finite(): delta = Delta('x', random_tensor(OrderedDict([('h', bint(7))]))) discrete = random_tensor( OrderedDict([('i', bint(6)), ('j', bint(5)), ('k', bint(3))])) gaussian = random_gaussian( OrderedDict([('k', bint(3)), ('l', bint(2)), ('y', reals()), ('z', reals(2))])) discrete.data[1:, :] = -float('inf') discrete.data[:, 1:] = -float('inf') reduced_vars = frozenset(['j', 'k']) joint = delta + discrete + gaussian with interpretation(moment_matching): joint.reduce(ops.logaddexp, reduced_vars)
def test_subs_independent(): f = Variable('x', reals(4, 5)) + random_tensor(OrderedDict(i=bint(3))) actual = Independent(f, 'x', 'i') assert 'i' not in actual.inputs y = Variable('y', reals(3, 4, 5)) fsub = y + (0. * random_tensor(OrderedDict(i=bint(7)))) actual = actual(x=fsub) assert actual.inputs['i'] == bint(7) expected = f(x=y['i']).reduce(ops.add, 'i') data = random_tensor(OrderedDict(i=bint(7)), y.output) assert_close(actual(y=data), expected(y=data))
def test_sequential_sum_product(impl, sum_op, prod_op, batch_inputs, state_domain, num_steps): inputs = OrderedDict(batch_inputs) inputs.update(prev=state_domain, curr=state_domain) if num_steps is None: num_steps = 1 else: inputs["time"] = bint(num_steps) if state_domain.dtype == "real": trans = random_gaussian(inputs) else: trans = random_tensor(inputs) time = Variable("time", bint(num_steps)) actual = impl(sum_op, prod_op, trans, time, {"prev": "curr"}) expected_inputs = batch_inputs.copy() expected_inputs.update(prev=state_domain, curr=state_domain) assert dict(actual.inputs) == expected_inputs # Check against contract. operands = tuple( trans(time=t, prev="t_{}".format(t), curr="t_{}".format(t + 1)) for t in range(num_steps)) reduce_vars = frozenset("t_{}".format(t) for t in range(1, num_steps)) with interpretation(reflect): expected = sum_product(sum_op, prod_op, operands, reduce_vars) expected = apply_optimizer(expected) expected = expected(**{"t_0": "prev", "t_{}".format(num_steps): "curr"}) expected = expected.align(tuple(actual.inputs.keys())) assert_close(actual, expected, rtol=5e-4 * num_steps)
def test_independent(): f = Variable('x', reals(4, 5)) + random_tensor(OrderedDict(i=bint(3))) assert f.inputs['x'] == reals(4, 5) assert f.inputs['i'] == bint(3) actual = Independent(f, 'x', 'i') assert actual.inputs['x'] == reals(3, 4, 5) assert 'i' not in actual.inputs x = Variable('x', reals(3, 4, 5)) expected = f(x=x['i']).reduce(ops.add, 'i') assert actual.inputs == expected.inputs assert actual.output == expected.output data = random_tensor(OrderedDict(), x.output) assert_close(actual(data), expected(data), atol=1e-5, rtol=1e-5)
def test_quote(output_shape, inputs): sizes = {'a': 4, 'b': 5, 'c': 6} inputs = OrderedDict((k, bint(sizes[k])) for k in inputs) x = random_tensor(inputs, reals(*output_shape)) s = funsor.quote(x) assert isinstance(s, str) assert_close(eval(s), x)
def test_eager_contract_tensor_tensor(red_op, bin_op, x_inputs, x_shape, y_inputs, y_shape): backend = get_backend() inputs = OrderedDict([("i", bint(4)), ("j", bint(5)), ("k", bint(6))]) x_inputs = OrderedDict((k, v) for k, v in inputs.items() if k in x_inputs) y_inputs = OrderedDict((k, v) for k, v in inputs.items() if k in y_inputs) x = random_tensor(x_inputs, reals(*x_shape)) y = random_tensor(y_inputs, reals(*y_shape)) xy = bin_op(x, y) all_vars = frozenset(x.inputs).union(y.inputs) for n in range(len(all_vars)): for reduced_vars in map(frozenset, itertools.combinations(all_vars, n)): print(f"reduced_vars = {reduced_vars}") expected = xy.reduce(red_op, reduced_vars) actual = Contraction(red_op, bin_op, reduced_vars, (x, y)) assert_close(actual, expected, atol=1e-4, rtol=5e-4 if backend == "jax" else 1e-4)
def test_subs_independent(): f = Variable('x_i', Reals[4, 5]) + random_tensor(OrderedDict(i=Bint[3])) actual = Independent(f, 'x', 'i', 'x_i') assert 'i' not in actual.inputs assert 'x_i' not in actual.inputs y = Variable('y', Reals[3, 4, 5]) fsub = y + (0. * random_tensor(OrderedDict(i=Bint[7]))) actual = actual(x=fsub) assert actual.inputs['i'] == Bint[7] expected = f(x_i=y['i']).reduce(ops.add, 'i') data = random_tensor(OrderedDict(i=Bint[7]), y.output) assert_close(actual(y=data), expected(y=data))
def test_joint_shape(sample_inputs, int_event_inputs, real_event_inputs): event_inputs = int_event_inputs + real_event_inputs discrete_inputs = OrderedDict(int_event_inputs) gaussian_inputs = OrderedDict(event_inputs) expected_inputs = OrderedDict(sample_inputs + event_inputs) sample_inputs = OrderedDict(sample_inputs) event_inputs = OrderedDict(event_inputs) t = random_tensor(discrete_inputs) g = random_gaussian(gaussian_inputs) x = t + g # Joint(discrete=t, gaussian=g) xfail = False for num_sampled in range(len(event_inputs)): for sampled_vars in itertools.combinations(list(event_inputs), num_sampled): sampled_vars = frozenset(sampled_vars) print('sampled_vars: {}'.format(', '.join(sampled_vars))) try: y = x.sample(sampled_vars, sample_inputs) except NotImplementedError: xfail = True continue if sampled_vars: assert dict(y.inputs) == dict(expected_inputs), sampled_vars else: assert y is x if xfail: pytest.xfail(reason='Not implemented')
def test_partition(inputs, dims, expected_num_components): sizes = dict(zip('abc', [2, 3, 4])) terms = [ random_tensor(OrderedDict((s, bint(sizes[s])) for s in input_)) for input_ in inputs ] components = list(_partition(terms, dims)) # Check that result is a partition. expected_terms = sorted(terms, key=id) actual_terms = sorted((x for c in components for x in c[0]), key=id) assert actual_terms == expected_terms assert dims == set.union(set(), *(c[1] for c in components)) # Check that the partition is not too coarse. assert len(components) == expected_num_components # Check that partition is not too fine. component_dict = { x: i for i, (terms, _) in enumerate(components) for x in terms } for x in terms: for y in terms: if x is not y: if dims.intersection(x.inputs, y.inputs): assert component_dict[x] == component_dict[y]
def test_tensor_shape(sample_inputs, batch_inputs, event_inputs): be_inputs = OrderedDict(batch_inputs + event_inputs) expected_inputs = OrderedDict(sample_inputs + batch_inputs + event_inputs) sample_inputs = OrderedDict(sample_inputs) batch_inputs = OrderedDict(batch_inputs) event_inputs = OrderedDict(event_inputs) x = random_tensor(be_inputs) rng_key = subkey = None if get_backend() == "torch" else np.array( [0, 0], dtype=np.uint32) for num_sampled in range(len(event_inputs) + 1): for sampled_vars in itertools.combinations(list(event_inputs), num_sampled): sampled_vars = frozenset(sampled_vars) print('sampled_vars: {}'.format(', '.join(sampled_vars))) if rng_key is not None: import jax rng_key, subkey = jax.random.split(rng_key) y = x.sample(sampled_vars, sample_inputs, rng_key=subkey) if num_sampled == len(event_inputs): assert isinstance(y, (Delta, Contraction)) if sampled_vars: assert dict(y.inputs) == dict(expected_inputs), sampled_vars else: assert y is x
def test_sarkka_bilmes_generic(time_input, global_inputs, local_inputs, num_periods): lags = { kk: reduce(max, [ len(re.search("^P*", k).group(0)) for k, v in local_inputs if k.strip("P") == kk ], 0) for kk, vv in local_inputs if not kk.startswith("P") } expected_inputs = dict(global_inputs + tuple( set(((t * "P" + k), v) for k, v in local_inputs if not k.startswith("P") for t in range(0, lags[k] + 1)))) trans_inputs = OrderedDict(global_inputs + (time_input, ) + local_inputs) global_vars = frozenset(k for k, v in global_inputs) if any(v.dtype == "real" for v in trans_inputs.values()): trans = random_gaussian(trans_inputs) else: trans = random_tensor(trans_inputs) try: _check_sarkka_bilmes(trans, expected_inputs, global_vars, num_periods) except NotImplementedError as e: partial_reasons = ('TODO handle partial windows', ) if any(reason in e.args[0] for reason in partial_reasons): pytest.xfail(reason=e.args[0]) else: raise
def test_joint_shape(sample_inputs, int_event_inputs, real_event_inputs): event_inputs = int_event_inputs + real_event_inputs discrete_inputs = OrderedDict(int_event_inputs) gaussian_inputs = OrderedDict(event_inputs) expected_inputs = OrderedDict(sample_inputs + event_inputs) sample_inputs = OrderedDict(sample_inputs) event_inputs = OrderedDict(event_inputs) t = random_tensor(discrete_inputs) g = random_gaussian(gaussian_inputs) x = t + g # Joint(discrete=t, gaussian=g) rng_key = subkey = None if get_backend() == "torch" else np.array( [0, 0], dtype=np.uint32) xfail = False for num_sampled in range(len(event_inputs)): for sampled_vars in itertools.combinations(list(event_inputs), num_sampled): sampled_vars = frozenset(sampled_vars) print('sampled_vars: {}'.format(', '.join(sampled_vars))) try: if rng_key is not None: import jax rng_key, subkey = jax.random.split(rng_key) y = x.sample(sampled_vars, sample_inputs, rng_key=subkey) except NotImplementedError: xfail = True continue if sampled_vars: assert dict(y.inputs) == dict(expected_inputs), sampled_vars else: assert y is x if xfail: pytest.xfail(reason='Not implemented')
def test_reduce_moment_matching_shape(interp): delta = Delta('x', random_tensor(OrderedDict([('h', Bint[7])]))) discrete = random_tensor( OrderedDict([('h', Bint[7]), ('i', Bint[6]), ('j', Bint[5]), ('k', Bint[4])])) gaussian = random_gaussian( OrderedDict([('k', Bint[4]), ('l', Bint[3]), ('m', Bint[2]), ('y', Real), ('z', Reals[2])])) reduced_vars = frozenset(['i', 'k', 'l']) real_vars = frozenset(k for k, d in gaussian.inputs.items() if d.dtype == "real") joint = delta + discrete + gaussian with interpretation(interp): actual = joint.reduce(ops.logaddexp, reduced_vars) assert set(actual.inputs) == set(joint.inputs) - reduced_vars assert_close(actual.reduce(ops.logaddexp, real_vars), joint.reduce(ops.logaddexp, real_vars | reduced_vars))
def test_lognormal_distribution(moment): num_samples = 100000 inputs = OrderedDict(batch=Bint[10]) loc = random_tensor(inputs) scale = random_tensor(inputs).exp() log_measure = dist.LogNormal(loc, scale)(value='x') probe = Variable('x', Real)**moment with interpretation(MonteCarlo(particle=Bint[num_samples])): with xfail_if_not_implemented(): actual = Integrate(log_measure, probe, frozenset(['x'])) _, (loc_data, scale_data) = align_tensors(loc, scale) samples = backend_dist.LogNormal(loc_data, scale_data).sample( (num_samples, )) expected = (samples**moment).mean(0) assert_close(actual.data, expected, atol=1e-2, rtol=1e-2)
def test_subs_lambda(): z = Variable('z', reals()) i = Variable('i', bint(5)) ix = random_tensor(OrderedDict([('i', bint(5))]), reals()) actual = Lambda(i, z)(z=ix) expected = Lambda(i(i='j'), z(z=ix)) check_funsor(actual, expected.inputs, expected.output) assert_close(actual, expected)