def m2(self): """matrix: Second moment.""" if self._m2 is None: self._m2 = B.cholsolve(B.chol(self.prec), self.prec + B.outer(self.lam)) self._m2 = B.cholsolve(B.chol(self.prec), B.transpose(self._m2)) return self._m2
def kl(self, other: "NaturalNormal"): """Compute the Kullback-Leibler divergence with respect to another normal parametrised by its natural parameters. Args: other (:class:`.NaturalNormal`): Other. Returns: scalar: KL divergence with respect to `other`. """ ratio = B.solve(B.chol(self.prec), B.chol(other.prec)) diff = self.mean - other.mean return 0.5 * (B.sum(ratio**2) - B.logdet(B.mm( ratio, ratio, tr_a=True)) + B.sum(B.mm(other.prec, diff) * diff) - B.cast(self.dtype, self.dim))
def sample(model, t, noise_f): """Sample from a model. Args: model (:class:`gpcm.model.AbstractGPCM`): Model to sample from. t (vector): Time points to sample at. noise_f (vector): Noise for the sample of the function. Should have the same size as `t`. Returns: tuple[vector]: Tuple containing kernel samples and function samples. """ ks, fs = [], [] with wbml.out.Progress(name="Sampling", total=5) as progress: for i in range(5): # Sample kernel. u = B.sample(model.compute_K_u())[:, 0] K = model.kernel_approx(t, t, u) wbml.out.kv("Sampled variance", K[0, 0]) K = K / K[0, 0] ks.append(K[0, :]) # Sample function. f = B.matmul(B.chol(closest_psd(K)), noise_f) fs.append(f) progress() return ks, fs
def pinv(a: AbstractMatrix): """Compute the left pseudo-inverse. Args: a (matrix): Matrix to compute left pseudo-inverse of. Returns: matrix: Left pseudo-inverse of `a`. """ return B.cholsolve(B.chol(B.matmul(a, a, tr_a=True)), B.transpose(a))
def sample(model, t, noise_f): """Sample from a model. Args: model (:class:`gpcm.model.AbstractGPCM`): Model to sample from. t (vector): Time points to sample at. noise_f (vector): Noise for the sample of the function. Should have the same size as `t`. Returns: tuple[vector, ...]: Tuple containing kernel samples, filter samples, and function samples. """ ks, us, fs = [], [], [] # In the below, we look at the third inducing point, because that is the one # determining the value of the filter at zero: the CGPCM adds two extra inducing # points to the left. # Get a smooth sample. u1 = B.ones(model.n_u) while B.abs(u1[2]) > 1e-2: u1 = B.sample(model.compute_K_u())[:, 0] u = GP(model.k_h()) u = u | (u(model.t_u), u1) u1_full = u(t).mean.flatten() # Get a rough sample. u2 = B.zeros(model.n_u) while u2[2] < 0.5: u2 = B.sample(model.compute_K_u())[:, 0] u = GP(model.k_h()) u = u | (u(model.t_u), u2) u2_full = u(t).mean.flatten() with wbml.out.Progress(name="Sampling", total=5) as progress: for c in [0, 0.1, 0.23, 0.33, 0.5]: # Sample kernel. K = model.kernel_approx(t, t, c * u2 + (1 - c) * u1) wbml.out.kv("Sampled variance", K[0, 0]) K = K / K[0, 0] ks.append(K[0, :]) # Store filter. us.append(c * u2_full + (1 - c) * u1_full) # Sample function. f = B.matmul(B.chol(closest_psd(K)), noise_f) fs.append(f) progress() return ks, us, fs
def from_normal(cls, dist): """Construct from a normal distribution. Args: dist (distribution): Normal distribution to construct from. Returns: :class:`.NaturalNormal`: Normal distribution parametrised by the natural parameters of `dist`. """ return cls(B.cholsolve(B.chol(dist.var), dist.mean), B.pd_inv(dist.var))
def sample(self, state: B.RandomState, num: int = 1): """Sample. Args: state (random state): Random state. num (int): Number of samples. Returns: tuple[random state, tensor]: Random state and sample. """ state, noise = Normal(self.prec).sample(state, num) sample = B.cholsolve(B.chol(self.prec), B.add(noise, self.lam)) # Remove the matrix type if there is no structure. This eases working with # JITs, which aren't happy with matrix types. if not structured(sample): sample = B.dense(sample) return state, sample
def mean(self): """column vector: Mean.""" if self._mean is None: self._mean = B.cholsolve(B.chol(self.prec), self.lam) return self._mean