Esempio n. 1
0
def eager_integrate(log_measure, integrand, reduced_vars):
    real_vars = frozenset(k for k in reduced_vars if log_measure.inputs[k].dtype == 'real')
    if real_vars:

        lhs_reals = frozenset(k for k, d in log_measure.inputs.items() if d.dtype == 'real')
        rhs_reals = frozenset(k for k, d in integrand.inputs.items() if d.dtype == 'real')
        if lhs_reals == real_vars and rhs_reals <= real_vars:
            inputs = OrderedDict((k, d) for t in (log_measure, integrand)
                                 for k, d in t.inputs.items())
            lhs_info_vec, lhs_precision = align_gaussian(inputs, log_measure)
            rhs_info_vec, rhs_precision = align_gaussian(inputs, integrand)
            lhs = Gaussian(lhs_info_vec, lhs_precision, inputs)

            # Compute the expectation of a non-normalized quadratic form.
            # See "The Matrix Cookbook" (November 15, 2012) ss. 8.2.2 eq. 380.
            # http://www.math.uwaterloo.ca/~hwolkowi/matrixcookbook.pdf
            norm = ops.exp(lhs.log_normalizer.data)
            lhs_cov = ops.cholesky_inverse(lhs._precision_chol)
            lhs_loc = ops.cholesky_solve(ops.unsqueeze(lhs.info_vec, -1), lhs._precision_chol).squeeze(-1)
            vmv_term = _vv(lhs_loc, rhs_info_vec - 0.5 * _mv(rhs_precision, lhs_loc))
            data = norm * (vmv_term - 0.5 * _trace_mm(rhs_precision, lhs_cov))
            inputs = OrderedDict((k, d) for k, d in inputs.items() if k not in reduced_vars)
            result = Tensor(data, inputs)
            return result.reduce(ops.add, reduced_vars - real_vars)

        raise NotImplementedError('TODO implement partial integration')

    return None  # defer to default implementation
Esempio n. 2
0
def eager_integrate(log_measure, integrand, reduced_vars):
    real_vars = frozenset(k for k in reduced_vars if log_measure.inputs[k].dtype == 'real')
    if real_vars == frozenset([integrand.name]):
        loc = ops.cholesky_solve(ops.unsqueeze(log_measure.info_vec, -1), log_measure._precision_chol).squeeze(-1)
        data = loc * ops.unsqueeze(ops.exp(log_measure.log_normalizer.data), -1)
        data = data.reshape(loc.shape[:-1] + integrand.output.shape)
        inputs = OrderedDict((k, d) for k, d in log_measure.inputs.items() if d.dtype != 'real')
        result = Tensor(data, inputs)
        return result.reduce(ops.add, reduced_vars - real_vars)
    return None  # defer to default implementation
Esempio n. 3
0
def gaussian_to_data(funsor_dist, name_to_dim=None, normalized=False):
    if normalized:
        return to_data(funsor_dist.log_normalizer + funsor_dist,
                       name_to_dim=name_to_dim)
    loc = ops.cholesky_solve(ops.unsqueeze(funsor_dist.info_vec, -1),
                             ops.cholesky(funsor_dist.precision)).squeeze(-1)
    int_inputs = OrderedDict(
        (k, d) for k, d in funsor_dist.inputs.items() if d.dtype != "real")
    loc = to_data(Tensor(loc, int_inputs), name_to_dim)
    precision = to_data(Tensor(funsor_dist.precision, int_inputs), name_to_dim)
    backend_dist = import_module(
        BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()])
    return backend_dist.MultivariateNormal.dist_class(
        loc, precision_matrix=precision)
Esempio n. 4
0
def moment_matching_contract_joint(red_op, bin_op, reduced_vars, discrete,
                                   gaussian):

    approx_vars = frozenset(
        k for k in reduced_vars
        if k in gaussian.inputs and gaussian.inputs[k].dtype != 'real')
    exact_vars = reduced_vars - approx_vars

    if exact_vars and approx_vars:
        return Contraction(red_op, bin_op, exact_vars, discrete,
                           gaussian).reduce(red_op, approx_vars)

    if approx_vars and not exact_vars:
        discrete += gaussian.log_normalizer
        new_discrete = discrete.reduce(
            ops.logaddexp, approx_vars.intersection(discrete.inputs))
        new_discrete = discrete.reduce(
            ops.logaddexp, approx_vars.intersection(discrete.inputs))
        num_elements = reduce(ops.mul, [
            gaussian.inputs[k].num_elements
            for k in approx_vars.difference(discrete.inputs)
        ], 1)
        if num_elements != 1:
            new_discrete -= math.log(num_elements)

        int_inputs = OrderedDict(
            (k, d) for k, d in gaussian.inputs.items() if d.dtype != 'real')
        probs = (discrete - new_discrete.clamp_finite()).exp()

        old_loc = Tensor(
            ops.cholesky_solve(ops.unsqueeze(gaussian.info_vec, -1),
                               gaussian._precision_chol).squeeze(-1),
            int_inputs)
        new_loc = (probs * old_loc).reduce(ops.add, approx_vars)
        old_cov = Tensor(ops.cholesky_inverse(gaussian._precision_chol),
                         int_inputs)
        diff = old_loc - new_loc
        outers = Tensor(
            ops.unsqueeze(diff.data, -1) * ops.unsqueeze(diff.data, -2),
            diff.inputs)
        new_cov = ((probs * old_cov).reduce(ops.add, approx_vars) +
                   (probs * outers).reduce(ops.add, approx_vars))

        # Numerically stabilize by adding bogus precision to empty components.
        total = probs.reduce(ops.add, approx_vars)
        mask = ops.unsqueeze(ops.unsqueeze((total.data == 0), -1), -1)
        new_cov.data = new_cov.data + mask * ops.new_eye(
            new_cov.data, new_cov.data.shape[-1:])

        new_precision = Tensor(
            ops.cholesky_inverse(ops.cholesky(new_cov.data)), new_cov.inputs)
        new_info_vec = (
            new_precision.data @ ops.unsqueeze(new_loc.data, -1)).squeeze(-1)
        new_inputs = new_loc.inputs.copy()
        new_inputs.update(
            (k, d) for k, d in gaussian.inputs.items() if d.dtype == 'real')
        new_gaussian = Gaussian(new_info_vec, new_precision.data, new_inputs)
        new_discrete -= new_gaussian.log_normalizer

        return new_discrete + new_gaussian

    return None