예제 #1
0
def test_reduce_subset(dims, reduced_vars, op):
    reduced_vars = frozenset(reduced_vars)
    sizes = {'a': 3, 'b': 4, 'c': 5}
    shape = tuple(sizes[d] for d in dims)
    inputs = OrderedDict((d, bint(sizes[d])) for d in dims)
    data = rand(shape) + 0.5
    dtype = 'real'
    if op in [ops.and_, ops.or_]:
        data = ops.astype(data, 'uint8')
        dtype = 2
    x = Tensor(data, inputs, dtype)
    actual = x.reduce(op, reduced_vars)
    expected_inputs = OrderedDict(
        (d, bint(sizes[d])) for d in dims if d not in reduced_vars)

    reduced_vars &= frozenset(dims)
    if not reduced_vars:
        assert actual is x
    else:
        if reduced_vars == frozenset(dims):
            data = REDUCE_OP_TO_NUMERIC[op](data, None)
        else:
            for pos in reversed(sorted(map(dims.index, reduced_vars))):
                data = REDUCE_OP_TO_NUMERIC[op](data, pos)
        check_funsor(actual, expected_inputs, Domain((), dtype))
        assert_close(actual,
                     Tensor(data, expected_inputs, dtype),
                     atol=1e-5,
                     rtol=1e-5)
예제 #2
0
def eager_integrate(log_measure, integrand, reduced_vars):
    real_vars = frozenset(k for k in reduced_vars if log_measure.inputs[k].dtype == 'real')
    if real_vars:

        lhs_reals = frozenset(k for k, d in log_measure.inputs.items() if d.dtype == 'real')
        rhs_reals = frozenset(k for k, d in integrand.inputs.items() if d.dtype == 'real')
        if lhs_reals == real_vars and rhs_reals <= real_vars:
            inputs = OrderedDict((k, d) for t in (log_measure, integrand)
                                 for k, d in t.inputs.items())
            lhs_info_vec, lhs_precision = align_gaussian(inputs, log_measure)
            rhs_info_vec, rhs_precision = align_gaussian(inputs, integrand)
            lhs = Gaussian(lhs_info_vec, lhs_precision, inputs)

            # Compute the expectation of a non-normalized quadratic form.
            # See "The Matrix Cookbook" (November 15, 2012) ss. 8.2.2 eq. 380.
            # http://www.math.uwaterloo.ca/~hwolkowi/matrixcookbook.pdf
            norm = ops.exp(lhs.log_normalizer.data)
            lhs_cov = ops.cholesky_inverse(lhs._precision_chol)
            lhs_loc = ops.cholesky_solve(ops.unsqueeze(lhs.info_vec, -1), lhs._precision_chol).squeeze(-1)
            vmv_term = _vv(lhs_loc, rhs_info_vec - 0.5 * _mv(rhs_precision, lhs_loc))
            data = norm * (vmv_term - 0.5 * _trace_mm(rhs_precision, lhs_cov))
            inputs = OrderedDict((k, d) for k, d in inputs.items() if k not in reduced_vars)
            result = Tensor(data, inputs)
            return result.reduce(ops.add, reduced_vars - real_vars)

        raise NotImplementedError('TODO implement partial integration')

    return None  # defer to default implementation
예제 #3
0
def eager_integrate(log_measure, integrand, reduced_vars):
    real_vars = frozenset(k for k in reduced_vars if log_measure.inputs[k].dtype == 'real')
    if real_vars == frozenset([integrand.name]):
        loc = ops.cholesky_solve(ops.unsqueeze(log_measure.info_vec, -1), log_measure._precision_chol).squeeze(-1)
        data = loc * ops.unsqueeze(ops.exp(log_measure.log_normalizer.data), -1)
        data = data.reshape(loc.shape[:-1] + integrand.output.shape)
        inputs = OrderedDict((k, d) for k, d in log_measure.inputs.items() if d.dtype != 'real')
        result = Tensor(data, inputs)
        return result.reduce(ops.add, reduced_vars - real_vars)
    return None  # defer to default implementation
예제 #4
0
def test_reduce_all(dims, op):
    sizes = {'a': 3, 'b': 4, 'c': 5}
    shape = tuple(sizes[d] for d in dims)
    inputs = OrderedDict((d, bint(sizes[d])) for d in dims)
    data = rand(shape) + 0.5
    if op in [ops.and_, ops.or_]:
        data = ops.astype(data, 'uint8')
    expected_data = REDUCE_OP_TO_NUMERIC[op](data, None)

    x = Tensor(data, inputs)
    actual = x.reduce(op)
    check_funsor(actual, {}, reals(), expected_data)
예제 #5
0
def _get_stat_diff(funsor_dist_class, sample_inputs, inputs, num_samples,
                   statistic, with_lazy, params):
    params = [Tensor(p, inputs) for p in params]
    if isinstance(with_lazy, bool):
        with interpretation(lazy if with_lazy else eager):
            funsor_dist = funsor_dist_class(*params)
    else:
        funsor_dist = funsor_dist_class(*params)

    rng_key = None if get_backend() == "torch" else np.array([0, 0],
                                                             dtype=np.uint32)
    sample_value = funsor_dist.sample(frozenset(['value']),
                                      sample_inputs,
                                      rng_key=rng_key)
    expected_inputs = OrderedDict(
        tuple(sample_inputs.items()) + tuple(inputs.items()) +
        (('value', funsor_dist.inputs['value']), ))
    check_funsor(sample_value, expected_inputs, reals())

    if sample_inputs:

        actual_mean = Integrate(sample_value,
                                Variable('value', funsor_dist.inputs['value']),
                                frozenset(['value'
                                           ])).reduce(ops.add,
                                                      frozenset(sample_inputs))

        inputs, tensors = align_tensors(
            *list(funsor_dist.params.values())[:-1])
        raw_dist = funsor_dist.dist_class(
            **dict(zip(funsor_dist._ast_fields[:-1], tensors)))
        expected_mean = Tensor(raw_dist.mean, inputs)

        if statistic == "mean":
            actual_stat, expected_stat = actual_mean, expected_mean
        elif statistic == "variance":
            actual_stat = Integrate(
                sample_value, (Variable('value', funsor_dist.inputs['value']) -
                               actual_mean)**2,
                frozenset(['value'])).reduce(ops.add, frozenset(sample_inputs))
            expected_stat = Tensor(raw_dist.variance, inputs)
        elif statistic == "entropy":
            actual_stat = -Integrate(sample_value, funsor_dist,
                                     frozenset(['value'])).reduce(
                                         ops.add, frozenset(sample_inputs))
            expected_stat = Tensor(raw_dist.entropy(), inputs)
        else:
            raise ValueError("invalid test statistic")

        diff = actual_stat.reduce(ops.add).data - expected_stat.reduce(
            ops.add).data
        return diff.sum(), diff
예제 #6
0
def _check_sample(funsor_dist,
                  sample_inputs,
                  inputs,
                  atol=1e-2,
                  rtol=None,
                  num_samples=100000,
                  statistic="mean",
                  skip_grad=False):
    """utility that compares a Monte Carlo estimate of a distribution mean with the true mean"""
    samples_per_dim = int(num_samples**(1. / max(1, len(sample_inputs))))
    sample_inputs = OrderedDict(
        (k, bint(samples_per_dim)) for k in sample_inputs)

    for tensor in list(funsor_dist.params.values())[:-1]:
        tensor.data.requires_grad_()

    sample_value = funsor_dist.sample(frozenset(['value']), sample_inputs)
    expected_inputs = OrderedDict(
        tuple(sample_inputs.items()) + tuple(inputs.items()) +
        (('value', funsor_dist.inputs['value']), ))
    check_funsor(sample_value, expected_inputs, reals())

    if sample_inputs:

        actual_mean = Integrate(sample_value,
                                Variable('value', funsor_dist.inputs['value']),
                                frozenset(['value'
                                           ])).reduce(ops.add,
                                                      frozenset(sample_inputs))

        inputs, tensors = align_tensors(
            *list(funsor_dist.params.values())[:-1])
        raw_dist = funsor_dist.dist_class(
            **dict(zip(funsor_dist._ast_fields[:-1], tensors)))
        expected_mean = Tensor(raw_dist.mean, inputs)

        check_funsor(actual_mean, expected_mean.inputs, expected_mean.output)
        assert_close(actual_mean, expected_mean, atol=atol, rtol=rtol)

    if sample_inputs and not skip_grad:
        if statistic == "mean":
            actual_stat, expected_stat = actual_mean, expected_mean
        elif statistic == "variance":
            actual_stat = Integrate(
                sample_value, (Variable('value', funsor_dist.inputs['value']) -
                               actual_mean)**2,
                frozenset(['value'])).reduce(ops.add, frozenset(sample_inputs))
            expected_stat = Tensor(raw_dist.variance, inputs)
        elif statistic == "entropy":
            actual_stat = -Integrate(sample_value, funsor_dist,
                                     frozenset(['value'])).reduce(
                                         ops.add, frozenset(sample_inputs))
            expected_stat = Tensor(raw_dist.entropy(), inputs)
        else:
            raise ValueError("invalid test statistic")

        grad_targets = [v.data for v in list(funsor_dist.params.values())[:-1]]
        actual_grads = torch.autograd.grad(actual_stat.reduce(
            ops.add).sum().data,
                                           grad_targets,
                                           allow_unused=True)
        expected_grads = torch.autograd.grad(expected_stat.reduce(
            ops.add).sum().data,
                                             grad_targets,
                                             allow_unused=True)

        assert_close(actual_stat, expected_stat, atol=atol, rtol=rtol)

        for actual_grad, expected_grad in zip(actual_grads, expected_grads):
            if expected_grad is not None:
                assert_close(actual_grad, expected_grad, atol=atol, rtol=rtol)
            else:
                assert_close(actual_grad,
                             torch.zeros_like(actual_grad),
                             atol=atol,
                             rtol=rtol)