def cheatPrecisionHelper(x, N): # A quick fix for numerical precision issues. In the future, don't # use this and use a more stable algorithm. # Assumes that x is psd matrix x = (x + x.T) / 2.0 x[np.diag_indices(N)] += np.ones(N) * 1e-8 return x
def params2chol(params, D): R = np.zeros((D, D)) triu_inds = np.triu_indices(D) diag_inds = np.diag_indices(D) R[triu_inds] = params.copy() R[diag_inds] = np.exp(R[diag_inds]) return R
def add_jitter(kernel, jitter=1e-5): # Add the jitter diag_indices = np.diag_indices(np.min(kernel.shape[:2])) to_add = np.zeros_like(kernel) to_add[diag_indices] += jitter kernel = kernel + to_add return kernel
def KL_via_sampling(params,eps): #also need to include lognormal as a replacement for gamma distribution #this is giving log of negatives d = np.shape(params)[0]-1 mu = params[0:d,0] Sigma = params[0:d,1:d+1] di = np.diag_indices(d) Sigma[di] = np.exp(Sigma[di]) muPrior = np.zeros(d) sigmaPrior = np.identity(d) E = 0 for j in range(np.shape(eps)[0]): beta = mu+np.dot(Sigma,eps[j,:]) E+= np.log(normal_pdf(beta,mu,Sigma)/normal_pdf(beta,muPrior,sigmaPrior)) E = np.mean(E) return E
def KL_via_sampling(params, eps): #also need to include lognormal as a replacement for gamma distribution #this is giving log of negatives d = np.shape(params)[0] - 1 mu = params[0:d, 0] Sigma = params[0:d, 1:d + 1] di = np.diag_indices(d) Sigma[di] = np.exp(Sigma[di]) muPrior = np.zeros(d) sigmaPrior = np.identity(d) E = 0 for j in range(np.shape(eps)[0]): beta = mu + np.dot(Sigma, eps[j, :]) E += np.log( normal_pdf(beta, mu, Sigma) / normal_pdf(beta, muPrior, sigmaPrior)) E = np.mean(E) return E
def backwardStep(self, t, beta): J, h, logZ = beta u = self.u[t] J = J + self.Jy h = h + self.hy[t + 1] # H = np.linalg.solve( J.T + self.J11.T, J.T ).T H = rightSolve(J + self.J11, J) L = -H L[np.diag_indices(L.shape[0])] += 1 _J = L @ J @ L.T _J += H @ self.J11 @ H.T _J = self.A.T @ _J @ self.A _h = h - J @ u _h = self.A.T @ L @ _h if (self.computeMarginal): # Transition, emission and last _logZ = 0.5 * u.dot( self.J11.dot(u)) + self.log_Z + self.log_Zy[t + 1] + logZ JInt = J + self.J11 hInt = self.J11.dot(u) + h JChol = cho_factor(JInt, lower=True) JInvh = cho_solve(JChol, hInt) # Marginalization _logZ += -0.5 * hInt.dot( JInvh ) + \ np.log( np.diag( JChol[ 0 ] ) ).sum() - \ self.D_latent * _HALF_LOG_2_PI else: _logZ = 0 return _J, _h, _logZ
def mat_from_diag_triu_tril(diag, tri_upp, tri_low): """Build matrix from given components. Forms a matrix from diagonal, strictly upper triangular and strictly lower traingular parts. Parameters ---------- diag : array_like, shape=[..., n] tri_upp : array_like, shape=[..., (n * (n - 1)) / 2] tri_low : array_like, shape=[..., (n * (n - 1)) / 2] Returns ------- mat : array_like, shape=[..., n, n] """ n = diag.shape[-1] (i, ) = np.diag_indices(n, ndim=1) j, k = np.triu_indices(n, k=1) mat = np.zeros(diag.shape + (n, )) mat[..., i, i] = diag mat[..., j, k] = tri_upp mat[..., k, j] = tri_low return mat
def forwardStep(self, t, alpha): J, h, logZ = alpha u = self.u[t - 1] M = self.AInv.T @ J @ self.AInv # H = np.linalg.solve( M.T + self.J11.T, M.T ).T H = rightSolve(M + self.J11, M) L = -H L[np.diag_indices(L.shape[0])] += 1 _J = L @ M @ L.T _J += H @ self.J11 @ H.T _J += self.Jy _h = h + J @ self.AInv.dot(u) _h = L @ self.AInv.T @ _h _h += self.hy[t] # Transition and last _logZ = 0.5 * u.dot(self.J11.dot(u)) + self.log_Z + logZ JInt = J + self.J22 hInt = self.J12.T.dot(u) + h JChol = cho_factor(JInt, lower=True) JInvh = cho_solve(JChol, hInt) # Marginalization _logZ += -0.5 * hInt.dot( JInvh ) + \ np.log( np.diag( JChol[ 0 ] ) ).sum() - \ self.D_latent * _HALF_LOG_2_PI # Emission _logZ += self.log_Zy[t] return _J, _h, _logZ
def chol2params(chol, dchol, D): triu_inds = np.triu_indices(D) diag_inds = np.diag_indices(D) dchol[diag_inds] = chol[diag_inds] * dchol[diag_inds] params = dchol[triu_inds].copy() return params