def __init__(self, offset, scale, delta, X): t = DesignMatrixTrans(X) X = t.transform(X) self._offset = offset self._cov = X.dot(X.T) * (1 - delta) sum2diag(self._cov, delta, out=self._cov) self._cov *= scale
def sample(self, random_state=None): r"""Sample from the specified distribution. Parameters ---------- random_state : random_state Set the initial random state. Returns ------- numpy.ndarray Sample. """ from numpy_sugar import epsilon from numpy_sugar.linalg import sum2diag from numpy_sugar.random import multivariate_normal if random_state is None: random_state = RandomState() m = self._mean.value() K = self._cov.value().copy() sum2diag(K, +epsilon.small, out=K) return self._lik.sample(multivariate_normal(m, K, random_state), random_state)
def _normal_lml(self): self._update() m = self.m() ttau = self._sitelik_tau teta = self._sitelik_eta # NEW PHENOTYPE y = teta.copy() # NEW MEAN m = ttau * m # NEW COVARIANCE K = self.K() K = ddot(ttau, ddot(K, ttau, left=False), left=True) sum2diag(K, ttau, out=K) (Q, S0) = economic_qs(K) Q0, Q1 = Q from ...lmm import FastLMM from numpy import newaxis fastlmm = FastLMM(y, Q0, Q1, S0, covariates=m[:, newaxis]) fastlmm.learn(progress=False) return fastlmm.lml()
def sample(self, random_state=None): if random_state is None: random_state = RandomState() m = self._mean.feed('sample').value() K = self._cov.feed('sample').value() sum2diag(K, +epsilon.small, out=K) u = multivariate_normal(m, K, random_state) sum2diag(K, -epsilon.small, out=K) return self._lik.sample(u, random_state)
def value(self): r"""Log of the marginal likelihood. Formally, .. math:: - \frac{n}{2}\log{2\pi} - \frac{1}{2} \log{\left| v_0 \mathrm K + v_1 \mathrm I + \tilde{\Sigma} \right|} - \frac{1}{2} \left(\tilde{\boldsymbol\mu} - \mathrm X\boldsymbol\beta\right)^{\intercal} \left( v_0 \mathrm K + v_1 \mathrm I + \tilde{\Sigma} \right)^{-1} \left(\tilde{\boldsymbol\mu} - \mathrm X\boldsymbol\beta\right) Returns ------- float :math:`\log{p(\tilde{\boldsymbol\mu})}` """ from numpy_sugar.linalg import ddot, sum2diag if self._cache["value"] is not None: return self._cache["value"] scale = exp(self.logscale) delta = 1 / (1 + exp(-self.logitdelta)) v0 = scale * (1 - delta) v1 = scale * delta mu = self.eta / self.tau n = len(mu) if self._QS is None: K = zeros((n, n)) else: Q0 = self._QS[0][0] S0 = self._QS[1] K = dot(ddot(Q0, S0), Q0.T) A = sum2diag(sum2diag(v0 * K, v1), 1 / self.tau) m = mu - self.mean() v = -n * log(2 * pi) v -= slogdet(A)[1] v -= dot(m, solve(A, m)) self._cache["value"] = v / 2 return self._cache["value"]
def gradient(self): from numpy_sugar.linalg import ddot, sum2diag if self._cache["grad"] is not None: return self._cache["grad"] scale = exp(self.logscale) delta = 1 / (1 + exp(-self.logitdelta)) v0 = scale * (1 - delta) v1 = scale * delta mu = self.eta / self.tau n = len(mu) if self._QS is None: K = zeros((n, n)) else: Q0 = self._QS[0][0] S0 = self._QS[1] K = dot(ddot(Q0, S0), Q0.T) A = sum2diag(sum2diag(v0 * K, v1), 1 / self.tau) X = self._X m = mu - self.mean() g = dict() Aim = solve(A, m) g["beta"] = dot(m, solve(A, X)) Kd0 = sum2diag((1 - delta) * K, delta) Kd1 = sum2diag(-scale * K, scale) g["scale"] = -trace(solve(A, Kd0)) g["scale"] += dot(Aim, dot(Kd0, Aim)) g["scale"] *= 1 / 2 g["delta"] = -trace(solve(A, Kd1)) g["delta"] += dot(Aim, dot(Kd1, Aim)) g["delta"] *= 1 / 2 ed = exp(-self.logitdelta) es = exp(self.logscale) grad = dict() grad["logitdelta"] = g["delta"] * (ed / (1 + ed)) / (1 + ed) grad["logscale"] = g["scale"] * es grad["beta"] = g["beta"] self._cache["grad"] = grad return self._cache["grad"]
def _L(self): r"""Returns the Cholesky factorization of :math:`\mathcal B`. .. math:: \mathcal B = \mathrm Q^{\intercal}\mathcal A\mathrm Q (\sigma_b^2 \mathrm S)^{-1} """ Q = self._Q A = self._A() B = dot(Q.T, ddot(A, Q, left=True)) sum2diag(B, 1. / (self.sigma2_b * self._S), out=B) return cho_factor(B, lower=True)[0]
def K(self): r"""Covariance matrix of the prior. Returns: :math:`\sigma_b^2 \mathrm Q_0 \mathrm S_0 \mathrm Q_0^{\intercal} + \sigma_{\epsilon}^2 \mathrm I`. """ return sum2diag(self.sigma2_b * self._QSQt(), self.sigma2_epsilon)
def test_check_definite_positiveness(): random = RandomState(6) A = random.randn(3, 3) A = dot(A, A.T) A = sum2diag(A, 1e-4) assert_(check_definite_positiveness(A)) assert_(not check_definite_positiveness(zeros((4, 4))))
def nice_inv(A): """ Nice inverse. """ from numpy_sugar.linalg import sum2diag return pinv(sum2diag(A, 1e-12))
def test_sum2diag(): random = RandomState(0) A = random.randn(2, 2) b = random.randn(2) C = A.copy() C[0, 0] = C[0, 0] + b[0] C[1, 1] = C[1, 1] + b[1] assert_allclose(sum2diag(A, b), C) want = array([[2.76405235, 0.40015721], [0.97873798, 3.2408932]]) assert_allclose(sum2diag(A, 1), want) D = empty((2, 2)) sum2diag(A, b, out=D) assert_allclose(C, D)
def LU(self): r"""LU factor of :math:`\mathrm B`. .. math:: \mathrm B = \mathrm Q^{\intercal}\tilde{\mathrm{T}}\mathrm Q + \mathrm{S}^{-1} """ from numpy_sugar.linalg import ddot, sum2diag if self._LU_cache is not None: return self._LU_cache Q = self._cov["QS"][0][0] S = self._cov["QS"][1] B = dot(Q.T, ddot(self._site.tau, Q, left=True)) sum2diag(B, 1.0 / S, out=B) self._LU_cache = lu_factor(B, overwrite_a=True, check_finite=False) return self._LU_cache
def L(self): r"""Cholesky decomposition of :math:`\mathrm B`. .. math:: \mathrm B = \mathrm Q^{\intercal}\tilde{\mathrm{T}}\mathrm Q + \mathrm{S}^{-1} """ from scipy.linalg import cho_factor from numpy_sugar.linalg import ddot, sum2diag if self._L_cache is not None: return self._L_cache Q = self._cov["QS"][0][0] S = self._cov["QS"][1] B = dot(Q.T, ddot(self._site.tau, Q, left=True)) sum2diag(B, 1.0 / S, out=B) self._L_cache = cho_factor(B, lower=True)[0] return self._L_cache
def covariance(self): r"""Covariance of the prior. Returns ------- :class:`numpy.ndarray` :math:`v_0 \mathrm K + v_1 \mathrm I`. """ from numpy_sugar.linalg import ddot, sum2diag Q0 = self._QS[0][0] S0 = self._QS[1] return sum2diag(dot(ddot(Q0, self.v0 * S0), Q0.T), self.v1)
def get_normal_likelihood_trick(self): # Covariance: nK = K + \tilde\Sigma = K + 1/self._sitelik_tau # via (K + 1/self._sitelik_tau)^{-1} = A1 - A1QB1^-1QTA1 # Mean: \mathbf m # New phenotype: \tilde\mu # # I.e.: \tilde\mu \sim N(\mathbf m, K + \tilde\Sigma) # # # We transform the above Normal in an equivalent but more robust # one: \tilde\y \sim N(\tilde\m, \tilde\nK + \Sigma^{-1}) # # \tilde\y = \tilde\Sigma^{-1} \tilde\mu # \tilde\m = \tilde\Sigma^{-1} \tilde\m # \tilde\nK = \tilde\Sigma^{-1} \nK \tilde\Sigma^{-1} m = self.m() ttau = self._sitelik_tau teta = self._sitelik_eta # NEW PHENOTYPE y = teta.copy() # NEW MEAN m = ttau * m # NEW COVARIANCE K = self.K() K = ddot(ttau, ddot(K, ttau, left=False), left=True) sum2diag(K, ttau, out=K) (Q, S0) = economic_qs(K) Q0, Q1 = Q from ...lmm import FastLMM from numpy import newaxis fastlmm = FastLMM(y, Q0, Q1, S0, covariates=m[:, newaxis]) fastlmm.learn(progress=False) return fastlmm.get_normal_likelihood_trick()
def covariance(self): """ Covariance of the prior. Returns ------- covariance : ndarray v₀𝙺 + v₁𝙸. """ from numpy_sugar.linalg import ddot, sum2diag Q0 = self._Q0 S0 = self._S0 return sum2diag(dot(ddot(Q0, self.v0 * S0), Q0.T), self.v1)
def predictive_covariance(self, Xstar, ks, kss): from numpy_sugar.linalg import sum2diag kss = self.variance_star(kss) ks = self.covariance_star(ks) tau = self._ep.posterior.tau K = GLMM.covariance(self) KT = sum2diag(K, 1 / tau) ktk = solve(KT, ks.T) b = [] for i in range(len(kss)): b += [dot(ks[i, :], ktk[:, i])] b = asarray(b) return kss - b
def L(self): r"""Cholesky decomposition of :math:`\mathrm B`. .. math:: \mathrm B = \mathrm Q^{\intercal}\tilde{\mathrm{T}}\mathrm Q + \mathrm{S}^{-1} """ from numpy_sugar.linalg import ddot, sum2diag if self._L_cache is not None: return self._L_cache s = self._cov["scale"] d = self._cov["delta"] Q = self._cov["QS"][0][0] S = self._cov["QS"][1] ddot(self.A * self._site.tau, Q, left=True, out=self._NxR) B = dot(Q.T, self._NxR, out=self._RxR) B *= 1 - d sum2diag(B, 1.0 / S / s, out=B) self._L_cache = _cho_factor(B) return self._L_cache
def get_fast_scanner(self): r"""Return :class:`glimix_core.lmm.FastScanner` for the current delta.""" from numpy_sugar.linalg import ddot, economic_qs, sum2diag y = self.eta / self.tau if self._QS is None: K = eye(y.shape[0]) / self.tau else: Q0 = self._QS[0][0] S0 = self._QS[1] K = dot(ddot(Q0, self.v0 * S0), Q0.T) K = sum2diag(K, 1 / self.tau) return FastScanner(y, self._X, economic_qs(K), self.v1)
def predict(self, covariates, Cp, Cpp): delta = self.delta diag0 = self._diag0 diag1 = self._diag1 CpQ0 = Cp.dot(self._Q0) CpQ1 = Cp.dot(self._Q1) m = covariates.dot(self.beta) mean = m + (1 - delta) * CpQ0.dot(self._Q0tymD0()) mean += (1 - delta) * CpQ1.dot(self._Q1tymD1()) cov = sum2diag(Cpp * (1 - self.delta), self.delta) cov -= (1 - delta)**2 * CpQ0.dot((CpQ0 / diag0).T) cov -= (1 - delta)**2 * CpQ1.dot((CpQ1 / diag1).T) cov *= self.scale return FastLMMPredictor(mean, cov)
def covariance(self): K = self.K() return sum2diag(K, 1 / self._sitelik_tau)
def _terms(self): from numpy_sugar.linalg import ddot, lu_slogdet, sum2diag if self._cache["terms"] is not None: return self._cache["terms"] L0 = self._cov.C0.L S, U = self._cov.C1.eigh() W = ddot(U, 1 / S) @ U.T S = 1 / sqrt(S) Y = self._Y A = self._mean.A WL0 = W @ L0 YW = Y @ W WA = W @ A L0WA = L0.T @ WA Z = kron(L0.T @ WL0, self._GG) Z = sum2diag(Z, 1) Lz = lu_factor(Z, check_finite=False) # Юљ▓рхђRРЂ╗┬╣Юљ▓ = vec(YW)рхђЮљ▓ yRiy = (YW * self._Y).sum() # MрхђRРЂ╗┬╣M = AрхђWA РіЌ XрхђX MRiM = kron(A.T @ WA, self._XX) # XрхђRРЂ╗┬╣Юљ▓ = vec(GрхђYWLРѓђ) XRiy = vec(self._GY @ WL0) # XрхђRРЂ╗┬╣M = (LРѓђрхђWA) РіЌ (GрхђX) XRiM = kron(L0WA, self._GX) # MрхђRРЂ╗┬╣Юљ▓ = vec(XрхђYWA) MRiy = vec(self._XY @ WA) ZiXRiM = lu_solve(Lz, XRiM) ZiXRiy = lu_solve(Lz, XRiy) MRiXZiXRiy = ZiXRiM.T @ XRiy MRiXZiXRiM = XRiM.T @ ZiXRiM yKiy = yRiy - XRiy @ ZiXRiy MKiy = MRiy - MRiXZiXRiy H = MRiM - MRiXZiXRiM Lh = lu_factor(H, check_finite=False) b = lu_solve(Lh, MKiy) B = unvec(b, (self.ncovariates, -1)) self._mean.B = B XRim = XRiM @ b ZiXRim = ZiXRiM @ b mRiy = b.T @ MRiy mRim = b.T @ MRiM @ b logdetK = lu_slogdet(Lz)[1] logdetK -= 2 * log(S).sum() * self.nsamples mKiy = mRiy - XRim.T @ ZiXRiy mKim = mRim - XRim.T @ ZiXRim self._cache["terms"] = { "logdetK": logdetK, "mKiy": mKiy, "mKim": mKim, "b": b, "Z": Z, "B": B, "Lz": Lz, "S": S, "W": W, "WA": WA, "YW": YW, "WL0": WL0, "yRiy": yRiy, "MRiM": MRiM, "XRiy": XRiy, "XRiM": XRiM, "ZiXRiM": ZiXRiM, "ZiXRiy": ZiXRiy, "ZiXRim": ZiXRim, "MRiy": MRiy, "mRim": mRim, "mRiy": mRiy, "XRim": XRim, "yKiy": yKiy, "H": H, "Lh": Lh, "MRiXZiXRiy": MRiXZiXRiy, "MRiXZiXRiM": MRiXZiXRiM, } return self._cache["terms"]