def test_reduce_moment_matching_multivariate(): int_inputs = [('i', bint(4))] real_inputs = [('x', reals(2))] inputs = OrderedDict(int_inputs + real_inputs) int_inputs = OrderedDict(int_inputs) real_inputs = OrderedDict(real_inputs) loc = numeric_array([[-10., -1.], [+10., -1.], [+10., +1.], [-10., +1.]]) precision = zeros(4, 1, 1) + ops.new_eye(loc, (2, )) discrete = Tensor(zeros(4), int_inputs) gaussian = Gaussian(loc, precision, inputs) gaussian -= gaussian.log_normalizer joint = discrete + gaussian with interpretation(moment_matching): actual = joint.reduce(ops.logaddexp, 'i') assert_close(actual.reduce(ops.logaddexp), joint.reduce(ops.logaddexp)) expected_loc = zeros(2) expected_covariance = numeric_array([[101., 0.], [0., 2.]]) expected_precision = _inverse(expected_covariance) expected_gaussian = Gaussian(expected_loc, expected_precision, real_inputs) expected_gaussian -= expected_gaussian.log_normalizer expected_discrete = Tensor(ops.log(numeric_array(4.))) expected = expected_discrete + expected_gaussian assert_close(actual, expected, atol=1e-5, rtol=None)
def test_eager_subs_origin(int_inputs, real_inputs): int_inputs = OrderedDict(sorted(int_inputs.items())) real_inputs = OrderedDict(sorted(real_inputs.items())) inputs = int_inputs.copy() inputs.update(real_inputs) g = random_gaussian(inputs) # Check that Gaussian log density at origin is zero. origin = {k: zeros(d.shape) for k, d in real_inputs.items()} actual = g(**origin) expected_data = zeros(tuple(d.size for d in int_inputs.values())) expected = Tensor(expected_data, int_inputs) assert_close(actual, expected)
def test_block_vector_batched(batch_shape): shape = batch_shape + (10, ) expected = zeros(shape) actual = BlockVector(shape) expected[..., 1] = randn(batch_shape) actual[..., 1] = expected[..., 1] expected[..., 3:5] = randn(batch_shape + (2, )) actual[..., 3:5] = expected[..., 3:5] assert_close(actual.as_tensor(), expected)
def test_block_vector(): shape = (10, ) expected = zeros(shape) actual = BlockVector(shape) expected[1] = randn(()) actual[1] = expected[1] expected[3:5] = randn((2, )) actual[3:5] = expected[3:5] assert_close(actual.as_tensor(), expected)
def test_block_matrix(sparse): shape = (10, 10) expected = zeros(shape) actual = BlockMatrix(shape) expected[1, 1] = randn(()) actual[1, 1] = expected[1, 1] if not sparse: expected[1, 3:5] = randn((2, )) actual[1, 3:5] = expected[1, 3:5] expected[3:5, 1] = randn((2, )) actual[3:5, 1] = expected[3:5, 1] expected[3:5, 3:5] = randn((2, 2)) actual[3:5, 3:5] = expected[3:5, 3:5] assert_close(actual.as_tensor(), expected)
def test_block_matrix_batched(batch_shape, sparse): shape = batch_shape + (10, 10) expected = zeros(shape) actual = BlockMatrix(shape) expected[..., 1, 1] = randn(batch_shape) actual[..., 1, 1] = expected[..., 1, 1] if not sparse: expected[..., 1, 3:5] = randn(batch_shape + (2, )) actual[..., 1, 3:5] = expected[..., 1, 3:5] expected[..., 3:5, 1] = randn(batch_shape + (2, )) actual[..., 3:5, 1] = expected[..., 3:5, 1] expected[..., 3:5, 3:5] = randn(batch_shape + (2, 2)) actual[..., 3:5, 3:5] = expected[..., 3:5, 3:5] assert_close(actual.as_tensor(), expected)
def test_to_data_error(): data = zeros((3, 3)) x = Tensor(data, OrderedDict(i=bint(3))) with pytest.raises(ValueError): funsor.to_data(x)
def test_to_data(): data = zeros((3, 3)) x = Tensor(data) assert funsor.to_data(x) is data