def test_beta(): for matrix in [real_matrix(), complex_matrix()]: for beta in [0, 1, 3.4]: matrix_plus_beta = matrix + beta * sparse.eye(*matrix.shape) for mode in ["auto", "supernodal", "simplicial"]: L = cholesky(matrix, beta=beta).L() assert factor_of(cholesky(matrix, beta=beta), matrix_plus_beta)
def test_complex(): c = complex_matrix() fc = cholesky(c) r = real_matrix() fr = cholesky(r) assert factor_of(fc, c) assert_allclose(fc(np.arange(4))[:, None], c.todense().I * np.arange(4)[:, None]) assert_allclose(fc(np.arange(4) * 1j)[:, None], c.todense().I * (np.arange(4) * 1j)[:, None]) assert_allclose(fr(np.arange(4))[:, None], r.todense().I * np.arange(4)[:, None]) # If we did a real factorization, we can't do solves on complex arrays: assert_raises(CholmodError, fr, np.arange(4) * 1j)
def solve(self, other, left_array=None, logdet=False): cf = cholesky(self) mult = cf(other) if left_array is not None: mult = np.dot(left_array.T, mult) ret = (mult, cf.logdet()) if logdet else mult return ret
def test_convenience(): A_dense_seed = np.array([[10, 0, 3, 0], [0, 5, 0, -2], [3, 0, 5, 0], [0, -2, 0, 2]]) for dtype in (float, complex): A_dense = np.array(A_dense_seed, dtype=dtype) A_sp = sparse.csc_matrix(A_dense) for use_long in [False, True]: if use_long: A_sp = convert_matrix_indices_to_long_indices(A_sp) for ordering_method in ("natural", "amd", "metis", "nesdis", "colamd", "default", "best"): for mode in ("simplicial", "supernodal"): print('----') print(dtype) print(A_sp.indices.dtype) print(use_long) print(ordering_method) print(mode) print('----') f = cholesky(A_sp, mode=mode, ordering_method=ordering_method) print(f.D()) assert_allclose(f.det(), np.linalg.det(A_dense)) assert_allclose(f.logdet(), np.log(np.linalg.det(A_dense))) assert_allclose(f.slogdet(), [1, np.log(np.linalg.det(A_dense))]) assert_allclose((f.inv() * A_sp).todense(), np.eye(4))
def test_convenience(): A_dense_seed = np.array([[10, 0, 3, 0], [0, 5, 0, -2], [3, 0, 5, 0], [0, -2, 0, 2]]) for mode in ("simplicial", "supernodal"): for dtype in (float, complex): A_dense = np.array(A_dense_seed, dtype=dtype) A_sp = sparse.csc_matrix(A_dense) f = cholesky(A_sp, mode=mode) assert_allclose(f.det(), np.linalg.det(A_dense)) assert_allclose(f.logdet(), np.log(np.linalg.det(A_dense))) assert_allclose(f.slogdet(), [1, np.log(np.linalg.det(A_dense))]) assert_allclose((f.inv() * A_sp).todense(), np.eye(4))
def __init__(self, A): """Compute a sparse cholesky decomposition of the potential. Parameters ---------- A : matrix, ndim = 2 scaling matrix for the potential vector """ self.A = A self.size = A.shape[0] self.factor = factor = cholmod.cholesky(A) self.d_sqrt = np.sqrt(factor.D())
def test_beta(): for matrix in [real_matrix(), complex_matrix()]: for beta in [0, 1, 3.4]: matrix_plus_beta = matrix + beta * sparse.eye(*matrix.shape) for use_long in [False, True]: if use_long: matrix_plus_beta = convert_matrix_indices_to_long_indices(matrix_plus_beta) for ordering_method in ("natural", "amd", "metis", "nesdis", "colamd", "default", "best"): for mode in ["auto", "supernodal", "simplicial"]: f = cholesky(matrix, beta=beta, mode=mode, ordering_method=ordering_method) L = f.L() assert factor_of(f, matrix_plus_beta)
def test_update_downdate(): m = real_matrix() f = cholesky(m) L = f.L()[f.P(), :] assert factor_of(f, m) f.update_inplace(L) assert factor_of(f, 2 * m) f.update_inplace(L) assert factor_of(f, 3 * m) f.update_inplace(L, subtract=True) assert factor_of(f, 2 * m) f.update_inplace(L, subtract=True) assert factor_of(f, m)
def get_phiinv_sparse(self, params, logdet=False): phi = self.get_phi(params) if isinstance(phi, list): return [None if phivec is None else phivec.inv(logdet) for phivec in phi] else: phisparse = sps.csc_matrix(phi) cf = cholesky(phisparse) if logdet: return (cf.inv(), cf.logdet()) else: return cf.inv()
def test_cholesky_smoke_test(): f = cholesky(sparse.eye(10, 10)) d = np.arange(20).reshape(10, 2) print("dense") assert_allclose(f(d), d) print("sparse") s_csc = sparse.csc_matrix(np.eye(10)[:, :2]) assert sparse.issparse(f(s_csc)) assert_allclose(f(s_csc).todense(), s_csc.todense()) print("csr") s_csr = s_csc.tocsr() assert sparse.issparse(f(s_csr)) assert_allclose(f(s_csr).todense(), s_csr.todense()) print("extract") assert np.all(f.P() == np.arange(10))
def test_solve_edge_cases(): m = real_matrix() f = cholesky(m) # sparse matrices give a sparse back: assert sparse.issparse(f(sparse.eye(*m.shape).tocsc())) # dense matrices give a dense back: assert not sparse.issparse(f(np.eye(*m.shape))) # 1d dense matrices are accepted and a 1d vector is returned (this matches # the behavior of np.dot): assert f(np.arange(m.shape[0])).shape == (m.shape[0],) # 2d dense matrices are also accepted: assert f(np.arange(m.shape[0])[:, np.newaxis]).shape == (m.shape[0], 1) # But not if the dimensions are wrong...: assert_raises(CholmodError, f, np.arange(m.shape[0] + 1)[:, np.newaxis]) assert_raises(CholmodError, f, np.arange(m.shape[0])[np.newaxis, :]) assert_raises(CholmodError, f, np.arange(m.shape[0])[:, np.newaxis, np.newaxis]) # And ditto for the sparse version: assert_raises(CholmodError, f, sparse.eye(m.shape[0] + 1, m.shape[1]).tocsc())
def __call__(self, xs, phiinv_method='partition'): # map parameter vector if needed params = xs if isinstance(xs,dict) else self.pta.map_params(xs) # phiinvs will be a list or may be a big matrix if spatially # correlated signals TNrs = self.pta.get_TNr(params) TNTs = self.pta.get_TNT(params) phiinvs = self.pta.get_phiinv(params, logdet=True, method=phiinv_method) # get -0.5 * (rNr + logdet_N) piece of likelihood loglike = -0.5 * np.sum([l for l in self.pta.get_rNr_logdet(params)]) # red noise piece if self.pta._commonsignals: phiinv, logdet_phi = phiinvs Sigma = self._make_sigma(TNTs, phiinv) TNr = np.concatenate(TNrs) cf = cholesky(Sigma) expval = cf(TNr) logdet_sigma = cf.logdet() loglike += 0.5*(np.dot(TNr, expval) - logdet_sigma - logdet_phi) else: for TNr, TNT, (phiinv, logdet_phi) in zip(TNrs, TNTs, phiinvs): Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv) try: cf = sl.cho_factor(Sigma) expval = sl.cho_solve(cf, TNr) except: return -np.inf logdet_sigma = np.sum(2 * np.log(np.diag(cf[0]))) loglike += 0.5*(np.dot(TNr, expval) - logdet_sigma - logdet_phi) return loglike
def f(s,y,C,D): # s=[s2,a2] O=s[0]*C+s[1]*D factor = cholesky(O) return 0.5*factor.logdet()+0.5*y.dot(cg(O,y)[0])
def __call__(self, mtx_m, mtx_d, mtx_k, n_eigs=None, eigenvectors=None, status=None, conf=None): if conf.debug: ssym = status['matrix_info'] = {} ssym['|M - M^T|'] = max_diff_csr(mtx_m, mtx_m.T) ssym['|D - D^T|'] = max_diff_csr(mtx_d, mtx_d.T) ssym['|K - K^T|'] = max_diff_csr(mtx_k, mtx_k.T) ssym['|M - M^H|'] = max_diff_csr(mtx_m, mtx_m.H) ssym['|D - D^H|'] = max_diff_csr(mtx_d, mtx_d.H) ssym['|K - K^H|'] = max_diff_csr(mtx_k, mtx_k.H) if conf.method == 'companion': mtx_eye = -sps.eye(mtx_m.shape[0], dtype=mtx_m.dtype) mtx_a = sps.bmat([[mtx_d, mtx_k], [mtx_eye, None]]) mtx_b = sps.bmat([[-mtx_m, None], [None, mtx_eye]]) elif conf.method == 'cholesky': from sksparse.cholmod import cholesky factor = cholesky(mtx_m) perm = factor.P() ir = nm.arange(len(perm)) mtx_p = sps.coo_matrix((nm.ones_like(perm), (ir, perm))) mtx_l = mtx_p.T * factor.L() if conf.debug: ssym['|S - LL^T|'] = max_diff_csr(mtx_m, mtx_l * mtx_l.T) mtx_eye = sps.eye(mtx_l.shape[0], dtype=nm.float64) mtx_a = sps.bmat([[-mtx_k, None], [None, mtx_eye]]) mtx_b = sps.bmat([[mtx_d, mtx_l], [mtx_l.T, None]]) else: raise ValueError('unknown method! (%s)' % conf.method) if conf.debug: ssym['|A - A^T|'] = max_diff_csr(mtx_a, mtx_a.T) ssym['|A - A^H|'] = max_diff_csr(mtx_a, mtx_a.H) ssym['|B - B^T|'] = max_diff_csr(mtx_b, mtx_b.T) ssym['|B - B^H|'] = max_diff_csr(mtx_b, mtx_b.H) for key, val in sorted(ssym.items()): output('{}: {}'.format(key, val)) if conf.mode == 'normal': out = self.solver(mtx_a, mtx_b, n_eigs=n_eigs, eigenvectors=eigenvectors, status=status) if eigenvectors: eigs, vecs = out out = (eigs, vecs[:mtx_m.shape[0], :]) if conf.debug: res = mtx_a.dot(vecs) - eigs * mtx_b.dot(vecs) status['lin. error'] = nm.linalg.norm(res, nm.inf) else: out = self.solver(mtx_b, mtx_a, n_eigs=n_eigs, eigenvectors=eigenvectors, status=status) if eigenvectors: eigs, vecs = out out = (1.0 / eigs, vecs[:mtx_m.shape[0], :]) if conf.debug: res = (1.0 / eigs) * mtx_b.dot(vecs) - mtx_a.dot(vecs) status['lin. error'] = nm.linalg.norm(res, nm.inf) else: out = 1.0 / out if conf.debug and eigenvectors: eigs, vecs = out res = ((eigs**2 * (mtx_m.dot(vecs))) + (eigs * (mtx_d.dot(vecs))) + (mtx_k.dot(vecs))) status['error'] = nm.linalg.norm(res, nm.inf) return out
def __init__(self, A): self.A = A self.size = A.shape[0] self.factor = factor = cholmod.cholesky(A) self.d_sqrt = np.sqrt(factor.D())
def test_writeability(): t = cholesky(sparse.eye(10, 10))(np.arange(10)) assert t.flags["WRITEABLE"]
def test_CholmodNotPositiveDefiniteError(): A = -sparse.eye(4).tocsc() f = cholesky(A) assert_raises(CholmodNotPositiveDefiniteError, f.L)
def test_cholesky_matrix_market(): for problem in ("well1033", "illc1033", "well1850", "illc1850"): X = mm_matrix(problem) y = mm_matrix(problem + "_rhs1") answer = np.linalg.lstsq(X.todense(), y)[0] XtX = (X.T * X).tocsc() Xty = X.T * y for mode in ("auto", "simplicial", "supernodal"): assert_allclose(cholesky(XtX, mode=mode)(Xty), answer) assert_allclose(cholesky_AAt(X.T, mode=mode)(Xty), answer) assert_allclose(cholesky(XtX, mode=mode).solve_A(Xty), answer) assert_allclose(cholesky_AAt(X.T, mode=mode).solve_A(Xty), answer) f1 = analyze(XtX, mode=mode) f2 = f1.cholesky(XtX) assert_allclose(f2(Xty), answer) assert_raises(CholmodError, f1, Xty) assert_raises(CholmodError, f1.solve_A, Xty) assert_raises(CholmodError, f1.solve_LDLt, Xty) assert_raises(CholmodError, f1.solve_LD, Xty) assert_raises(CholmodError, f1.solve_DLt, Xty) assert_raises(CholmodError, f1.solve_L, Xty) assert_raises(CholmodError, f1.solve_D, Xty) assert_raises(CholmodError, f1.apply_P, Xty) assert_raises(CholmodError, f1.apply_Pt, Xty) f1.P() assert_raises(CholmodError, f1.L) assert_raises(CholmodError, f1.LD) assert_raises(CholmodError, f1.L_D) assert_raises(CholmodError, f1.L_D) f1.cholesky_inplace(XtX) assert_allclose(f1(Xty), answer) f3 = analyze_AAt(X.T, mode=mode) f4 = f3.cholesky(XtX) assert_allclose(f4(Xty), answer) assert_raises(CholmodError, f3, Xty) f3.cholesky_AAt_inplace(X.T) assert_allclose(f3(Xty), answer) print(problem, mode) for f in (f1, f2, f3, f4): pXtX = XtX.todense()[f.P()[:, np.newaxis], f.P()[np.newaxis, :]] assert_allclose(np.prod(f.D()), np.linalg.det(XtX.todense())) assert_allclose((f.L() * f.L().T).todense(), pXtX) L, D = f.L_D() assert_allclose((L * D * L.T).todense(), pXtX) b = np.arange(XtX.shape[0])[:, np.newaxis] assert_allclose(f.solve_A(b), np.dot(XtX.todense().I, b)) assert_allclose(f(b), np.dot(XtX.todense().I, b)) assert_allclose(f.solve_LDLt(b), np.dot((L * D * L.T).todense().I, b)) assert_allclose(f.solve_LD(b), np.dot((L * D).todense().I, b)) assert_allclose(f.solve_DLt(b), np.dot((D * L.T).todense().I, b)) assert_allclose(f.solve_L(b), np.dot(L.todense().I, b)) assert_allclose(f.solve_Lt(b), np.dot(L.T.todense().I, b)) assert_allclose(f.solve_D(b), np.dot(D.todense().I, b)) assert_allclose(f.apply_P(b), b[f.P(), :]) assert_allclose(f.apply_P(b), b[f.P(), :]) # Pt is the inverse of P, and argsort inverts permutation # vectors: assert_allclose(f.apply_Pt(b), b[np.argsort(f.P()), :]) assert_allclose(f.apply_Pt(b), b[np.argsort(f.P()), :])