def random_gaussian(inputs): """ Creates a random :class:`funsor.gaussian.Gaussian` with given inputs. """ assert isinstance(inputs, OrderedDict) batch_shape = tuple(d.dtype for d in inputs.values() if d.dtype != 'real') event_shape = (sum(d.num_elements for d in inputs.values() if d.dtype == 'real'), ) prec_sqrt = randn(batch_shape + event_shape + event_shape) precision = ops.matmul(prec_sqrt, ops.transpose(prec_sqrt, -1, -2)) precision = precision + 0.5 * ops.new_eye(precision, event_shape[:1]) loc = randn(batch_shape + event_shape) info_vec = ops.matmul(precision, ops.unsqueeze(loc, -1)).squeeze(-1) return Gaussian(info_vec, precision, inputs)
def eager_reduce(self, op, reduced_vars): if op is ops.logaddexp: # Marginalize out real variables, but keep mixtures lazy. assert all(v in self.inputs for v in reduced_vars) real_vars = frozenset(k for k, d in self.inputs.items() if d.dtype == "real") reduced_reals = reduced_vars & real_vars reduced_ints = reduced_vars - real_vars if not reduced_reals: return None # defer to default implementation inputs = OrderedDict((k, d) for k, d in self.inputs.items() if k not in reduced_reals) if reduced_reals == real_vars: result = self.log_normalizer else: int_inputs = OrderedDict( (k, v) for k, v in inputs.items() if v.dtype != 'real') offsets, _ = _compute_offsets(self.inputs) a = [] b = [] for key, domain in self.inputs.items(): if domain.dtype == 'real': block = ops.new_arange( self.info_vec, offsets[key], offsets[key] + domain.num_elements, 1) (b if key in reduced_vars else a).append(block) a = ops.cat(-1, *a) b = ops.cat(-1, *b) prec_aa = self.precision[..., a[..., None], a] prec_ba = self.precision[..., b[..., None], a] prec_bb = self.precision[..., b[..., None], b] prec_b = ops.cholesky(prec_bb) prec_a = ops.triangular_solve(prec_ba, prec_b) prec_at = ops.transpose(prec_a, -1, -2) precision = prec_aa - ops.matmul(prec_at, prec_a) info_a = self.info_vec[..., a] info_b = self.info_vec[..., b] b_tmp = ops.triangular_solve(info_b[..., None], prec_b) info_vec = info_a - ops.matmul(prec_at, b_tmp)[..., 0] log_prob = Tensor( 0.5 * len(b) * math.log(2 * math.pi) - _log_det_tri(prec_b) + 0.5 * (b_tmp[..., 0]**2).sum(-1), int_inputs) result = log_prob + Gaussian(info_vec, precision, inputs) return result.reduce(ops.logaddexp, reduced_ints) elif op is ops.add: for v in reduced_vars: if self.inputs[v].dtype == 'real': raise ValueError( "Cannot sum along a real dimension: {}".format( repr(v))) # Fuse Gaussians along a plate. Compare to eager_add_gaussian_gaussian(). old_ints = OrderedDict( (k, v) for k, v in self.inputs.items() if v.dtype != 'real') new_ints = OrderedDict( (k, v) for k, v in old_ints.items() if k not in reduced_vars) inputs = OrderedDict((k, v) for k, v in self.inputs.items() if k not in reduced_vars) info_vec = Tensor(self.info_vec, old_ints).reduce(ops.add, reduced_vars) precision = Tensor(self.precision, old_ints).reduce(ops.add, reduced_vars) assert info_vec.inputs == new_ints assert precision.inputs == new_ints return Gaussian(info_vec.data, precision.data, inputs) return None # defer to default implementation
def _mv(mat, vec): return ops.matmul(mat, ops.unsqueeze(vec, -1)).squeeze(-1)
def _vv(vec1, vec2): """ Computes the inner product ``< vec1 | vec 2 >``. """ return ops.matmul(ops.unsqueeze(vec1, -2), ops.unsqueeze(vec2, -1)).squeeze(-1).squeeze(-1)