def test_reduce_subset(dims, reduced_vars, op): reduced_vars = frozenset(reduced_vars) sizes = {'a': 3, 'b': 4, 'c': 5} shape = tuple(sizes[d] for d in dims) inputs = OrderedDict((d, bint(sizes[d])) for d in dims) data = torch.rand(shape) + 0.5 dtype = 'real' if op in [ops.and_, ops.or_]: data = data.byte() dtype = 2 x = Tensor(data, inputs, dtype) actual = x.reduce(op, reduced_vars) expected_inputs = OrderedDict( (d, bint(sizes[d])) for d in dims if d not in reduced_vars) reduced_vars &= frozenset(dims) if not reduced_vars: assert actual is x else: if reduced_vars == frozenset(dims): if op is ops.logaddexp: # work around missing torch.Tensor.logsumexp() data = data.reshape(-1).logsumexp(0) else: data = REDUCE_OP_TO_TORCH[op](data) else: for pos in reversed(sorted(map(dims.index, reduced_vars))): data = REDUCE_OP_TO_TORCH[op](data, pos) if op in (ops.min, ops.max): data = data[0] check_funsor(actual, expected_inputs, Domain((), dtype)) assert_close(actual, Tensor(data, expected_inputs, dtype), atol=1e-5, rtol=1e-5)
def eager_integrate(log_measure, integrand, reduced_vars): real_vars = frozenset(k for k in reduced_vars if log_measure.inputs[k].dtype == 'real') if real_vars: lhs_reals = frozenset(k for k, d in log_measure.inputs.items() if d.dtype == 'real') rhs_reals = frozenset(k for k, d in integrand.inputs.items() if d.dtype == 'real') if lhs_reals == real_vars and rhs_reals <= real_vars: inputs = OrderedDict((k, d) for t in (log_measure, integrand) for k, d in t.inputs.items()) lhs_info_vec, lhs_precision = align_gaussian(inputs, log_measure) rhs_info_vec, rhs_precision = align_gaussian(inputs, integrand) lhs = Gaussian(lhs_info_vec, lhs_precision, inputs) # Compute the expectation of a non-normalized quadratic form. # See "The Matrix Cookbook" (November 15, 2012) ss. 8.2.2 eq. 380. # http://www.math.uwaterloo.ca/~hwolkowi/matrixcookbook.pdf norm = lhs.log_normalizer.data.exp() lhs_cov = cholesky_inverse(lhs._precision_chol) lhs_loc = lhs.info_vec.unsqueeze(-1).cholesky_solve( lhs._precision_chol).squeeze(-1) vmv_term = _vv(lhs_loc, rhs_info_vec - 0.5 * _mv(rhs_precision, lhs_loc)) data = norm * (vmv_term - 0.5 * _trace_mm(rhs_precision, lhs_cov)) inputs = OrderedDict( (k, d) for k, d in inputs.items() if k not in reduced_vars) result = Tensor(data, inputs) return result.reduce(ops.add, reduced_vars - real_vars) raise NotImplementedError('TODO implement partial integration') return None # defer to default implementation
def eager_integrate(log_measure, integrand, reduced_vars): real_vars = frozenset(k for k in reduced_vars if log_measure.inputs[k].dtype == 'real') if real_vars: lhs_reals = frozenset(k for k, d in log_measure.inputs.items() if d.dtype == 'real') rhs_reals = frozenset(k for k, d in integrand.inputs.items() if d.dtype == 'real') if lhs_reals == real_vars and rhs_reals <= real_vars: inputs = OrderedDict((k, d) for t in (log_measure, integrand) for k, d in t.inputs.items()) lhs_loc, lhs_precision = align_gaussian(inputs, log_measure) rhs_loc, rhs_precision = align_gaussian(inputs, integrand) # Compute the expectation of a non-normalized quadratic form. # See "The Matrix Cookbook" (November 15, 2012) ss. 8.2.2 eq. 380. # http://www.math.uwaterloo.ca/~hwolkowi/matrixcookbook.pdf lhs_scale_tri = torch.inverse( torch.cholesky(lhs_precision)).transpose(-1, -2) lhs_covariance = torch.matmul(lhs_scale_tri, lhs_scale_tri.transpose(-1, -2)) dim = lhs_loc.size(-1) norm = _det_tri(lhs_scale_tri) * (2 * math.pi)**(0.5 * dim) data = -0.5 * norm * (_vmv(rhs_precision, lhs_loc - rhs_loc) + _trace_mm(rhs_precision, lhs_covariance)) inputs = OrderedDict( (k, d) for k, d in inputs.items() if k not in reduced_vars) result = Tensor(data, inputs) return result.reduce(ops.add, reduced_vars - real_vars) raise NotImplementedError('TODO implement partial integration') return None # defer to default implementation
def eager_integrate(log_measure, integrand, reduced_vars): real_vars = frozenset(k for k in reduced_vars if log_measure.inputs[k].dtype == 'real') if real_vars == frozenset([integrand.name]): loc = log_measure.info_vec.unsqueeze(-1).cholesky_solve( log_measure._precision_chol).squeeze(-1) data = loc * log_measure.log_normalizer.data.exp().unsqueeze(-1) data = data.reshape(loc.shape[:-1] + integrand.output.shape) inputs = OrderedDict( (k, d) for k, d in log_measure.inputs.items() if d.dtype != 'real') result = Tensor(data, inputs) return result.reduce(ops.add, reduced_vars - real_vars) return None # defer to default implementation
def test_reduce_all(dims, op): sizes = {'a': 3, 'b': 4, 'c': 5} shape = tuple(sizes[d] for d in dims) inputs = OrderedDict((d, bint(sizes[d])) for d in dims) data = torch.rand(shape) + 0.5 if op in [ops.and_, ops.or_]: data = data.byte() if op is ops.logaddexp: # work around missing torch.Tensor.logsumexp() expected_data = data.reshape(-1).logsumexp(0) else: expected_data = REDUCE_OP_TO_TORCH[op](data) x = Tensor(data, inputs) actual = x.reduce(op) check_funsor(actual, {}, reals(), expected_data)