Exemplo n.º 1
0
def gmrf_learn_cov_cholmod(
    R,
    U,
    rows,
    cols,
    edge_count,
    k,
    min_variance=1e-2,
    min_edge_count=10,
    num_iterations=50,
    psd_tolerance=1e-3,
    finish_early=True,
):
    n = len(R)
    m = len(U)
    mask = edge_count >= min_edge_count
    active_m = np.sum(mask)
    tic("m={0}, active m={1}".format(m, active_m), "gmrf_learn_cov_cholmod")
    active_U = U[mask]
    active_rows = rows[mask]
    active_cols = cols[mask]
    # A number of variables hare independant (due to lack of observations)
    independent_mask = independent_variables(n, active_rows, active_cols)
    # Put them aside and use the independent strategy to solve them.
    indep_idxs = np.arange(n)[independent_mask]
    R_indep = R[indep_idxs]
    # Solve the regularized version for independent variables
    D_indep = 1.0 / np.maximum(min_variance * np.ones_like(R_indep), R_indep)
    # Putting together the dependent and independent parts
    D = np.zeros_like(R)
    D[independent_mask] = D_indep
    P = np.zeros_like(U)
    # No need to solve for the outer diagonal terms, they are all zeros.
    # Solve for the dependent terms
    dependent_mask = ~independent_mask
    n_dep = np.sum(dependent_mask)
    if n_dep > 0:
        idxs_dep = np.arange(n)[dependent_mask]
        reverse_idxs_dep = np.zeros(n, dtype=np.int64)
        reverse_idxs_dep[dependent_mask] = np.arange(n_dep)
        rows_dep = reverse_idxs_dep[active_rows]
        cols_dep = reverse_idxs_dep[active_cols]
        R_dep = R[idxs_dep]
        U_dep = active_U
        (M, R_hat, U_hat) = normalized_problem(R_dep, U_dep, rows_dep, cols_dep)
        tic("Computing symbolic cholesky factorization of the graph...", "gmrf_learn_cov_cholmod")
        # Doing delayed import so that the rest of the code runs without sk-learn
        from scikits.sparse.cholmod import analyze

        Xs_dep = build_sparse(np.ones_like(R_hat), np.ones_like(U_hat), rows_dep, cols_dep)
        factor = analyze(Xs_dep)
        tic("Cholesky done", "gmrf_learn_cov_cholmod")
        # TODO add the other parameters
        (D_norm_dep, P_norm_dep) = covsel_cvx_cholmod(
            R_hat, U_hat, rows_dep, cols_dep, k, psd_tolerance, factor, num_iterations, finish_early
        )
        D[dependent_mask] = D_norm_dep / (M ** 2)
        P[mask] = P_norm_dep / (M[rows_dep] * M[cols_dep])
    return (D, P)
Exemplo n.º 2
0
    def _get_state_update(self):
        W, J, r = self._graph.get_linearization()

        Jt = J.T.tocsc()
        J = J.tocsc()

        # Decompose W such that W = U * U.T
        if self._sym_decomp_W is None:
            self._sym_decomp_W = analyze(W, mode='auto')

        chol_decomp_W = self._sym_decomp_W.cholesky(W)
        U = chol_decomp_W.L()
        JtU = Jt.dot(U)

        # A = J.T * W * J
        #   = J.T * U * U.T * J
        if self._sym_decomp_JtWJ is None:
            self._sym_decomp_JtWJ = analyze_AAt(JtU, mode='auto')

        chol_decomp_JtWJ = self._sym_decomp_JtWJ.cholesky_AAt(JtU)

        b = Jt.dot(W.dot(r))
        x = chol_decomp_JtWJ.solve_A(b)
        return x
Exemplo n.º 3
0
def test_cholesky_matrix_market():
    for problem in ("well1033", "illc1033", "well1850", "illc1850"):
        X = mm_matrix(problem)
        y = mm_matrix(problem + "_rhs1")
        answer = np.linalg.lstsq(X.todense(), y)[0]
        XtX = (X.T * X).tocsc()
        Xty = X.T * y
        for mode in ("auto", "simplicial", "supernodal"):
            assert np.allclose(cholesky(XtX, mode=mode)(Xty), answer)
            assert np.allclose(cholesky_AAt(X.T, mode=mode)(Xty), answer)
            assert np.allclose(cholesky(XtX, mode=mode).solve_A(Xty), answer)
            assert np.allclose(
                cholesky_AAt(X.T, mode=mode).solve_A(Xty), answer)

            f1 = analyze(XtX, mode=mode)
            f2 = f1.cholesky(XtX)
            assert np.allclose(f2(Xty), answer)
            assert_raises(CholmodError, f1, Xty)
            assert_raises(CholmodError, f1.solve_A, Xty)
            assert_raises(CholmodError, f1.solve_LDLt, Xty)
            assert_raises(CholmodError, f1.solve_LD, Xty)
            assert_raises(CholmodError, f1.solve_DLt, Xty)
            assert_raises(CholmodError, f1.solve_L, Xty)
            assert_raises(CholmodError, f1.solve_D, Xty)
            assert_raises(CholmodError, f1.apply_P, Xty)
            assert_raises(CholmodError, f1.apply_Pt, Xty)
            f1.P()
            assert_raises(CholmodError, f1.L)
            assert_raises(CholmodError, f1.LD)
            assert_raises(CholmodError, f1.L_D)
            assert_raises(CholmodError, f1.L_D)
            f1.cholesky_inplace(XtX)
            assert np.allclose(f1(Xty), answer)

            f3 = analyze_AAt(X.T, mode=mode)
            f4 = f3.cholesky(XtX)
            assert np.allclose(f4(Xty), answer)
            assert_raises(CholmodError, f3, Xty)
            f3.cholesky_AAt_inplace(X.T)
            assert np.allclose(f3(Xty), answer)

            print problem, mode
            for f in (f1, f2, f3, f4):
                pXtX = XtX.todense()[f.P()[:, np.newaxis],
                                     f.P()[np.newaxis, :]]
                assert np.allclose(np.prod(f.D()),
                                   np.linalg.det(XtX.todense()))
                assert np.allclose((f.L() * f.L().T).todense(), pXtX)
                L, D = f.L_D()
                assert np.allclose((L * D * L.T).todense(), pXtX)

                b = np.arange(XtX.shape[0])[:, np.newaxis]
                assert np.allclose(f.solve_A(b), np.dot(XtX.todense().I, b))
                assert np.allclose(f(b), np.dot(XtX.todense().I, b))
                assert np.allclose(f.solve_LDLt(b),
                                   np.dot((L * D * L.T).todense().I, b))
                assert np.allclose(f.solve_LD(b), np.dot((L * D).todense().I,
                                                         b))
                assert np.allclose(f.solve_DLt(b),
                                   np.dot((D * L.T).todense().I, b))
                assert np.allclose(f.solve_L(b), np.dot(L.todense().I, b))
                assert np.allclose(f.solve_Lt(b), np.dot(L.T.todense().I, b))
                assert np.allclose(f.solve_D(b), np.dot(D.todense().I, b))

                assert np.allclose(f.apply_P(b), b[f.P(), :])
                assert np.allclose(f.solve_P(b), b[f.P(), :])
                # Pt is the inverse of P, and argsort inverts permutation
                # vectors:
                assert np.allclose(f.apply_Pt(b), b[np.argsort(f.P()), :])
                assert np.allclose(f.solve_Pt(b), b[np.argsort(f.P()), :])
Exemplo n.º 4
0
def pattern_to_products(pattern):
    """Takes a sparse matrix with the correct sparsity pattern, but not necessarily meaningful values, and returns the symbolic Cholesky factorization of it, computed by CHOLMOD via scikits.sparse. The symbolic factorization is stored in a Factor object. Its method P can be called to obtain the permutation vector. I don't know if there's any way to get the actual sparsity pattern out, but it can surely be done.
    
    The return value is stored in a singleton dictionary with one key, 'symbolic'. It is stored in a dictionary to make it possible to have a uniform return type across all backends."""
    return {'symbolic': cholmod.analyze(pattern)}
Exemplo n.º 5
0
def pattern_to_products(pattern):
    """Takes a sparse matrix with the correct sparsity pattern, but not necessarily meaningful values, and returns the symbolic Cholesky factorization of it, computed by CHOLMOD via scikits.sparse. The symbolic factorization is stored in a Factor object. Its method P can be called to obtain the permutation vector. I don't know if there's any way to get the actual sparsity pattern out, but it can surely be done.
    
    The return value is stored in a singleton dictionary with one key, 'symbolic'. It is stored in a dictionary to make it possible to have a uniform return type across all backends."""
    return {'symbolic': cholmod.analyze(pattern)}
Exemplo n.º 6
0
def test_cholesky_matrix_market():
    for problem in ("well1033", "illc1033", "well1850", "illc1850"):
        X = mm_matrix(problem)
        y = mm_matrix(problem + "_rhs1")
        answer = np.linalg.lstsq(X.todense(), y)[0]
        XtX = (X.T * X).tocsc()
        Xty = X.T * y
        for mode in ("auto", "simplicial", "supernodal"):
            assert np.allclose(cholesky(XtX, mode=mode)(Xty), answer)
            assert np.allclose(cholesky_AAt(X.T, mode=mode)(Xty), answer)
            assert np.allclose(cholesky(XtX, mode=mode).solve_A(Xty), answer)
            assert np.allclose(cholesky_AAt(X.T, mode=mode).solve_A(Xty), answer)

            f1 = analyze(XtX, mode=mode)
            f2 = f1.cholesky(XtX)
            assert np.allclose(f2(Xty), answer)
            assert_raises(CholmodError, f1, Xty)
            assert_raises(CholmodError, f1.solve_A, Xty)
            assert_raises(CholmodError, f1.solve_LDLt, Xty)
            assert_raises(CholmodError, f1.solve_LD, Xty)
            assert_raises(CholmodError, f1.solve_DLt, Xty)
            assert_raises(CholmodError, f1.solve_L, Xty)
            assert_raises(CholmodError, f1.solve_D, Xty)
            assert_raises(CholmodError, f1.apply_P, Xty)
            assert_raises(CholmodError, f1.apply_Pt, Xty)
            f1.P()
            assert_raises(CholmodError, f1.L)
            assert_raises(CholmodError, f1.LD)
            assert_raises(CholmodError, f1.L_D)
            assert_raises(CholmodError, f1.L_D)
            f1.cholesky_inplace(XtX)
            assert np.allclose(f1(Xty), answer)

            f3 = analyze_AAt(X.T, mode=mode)
            f4 = f3.cholesky(XtX)
            assert np.allclose(f4(Xty), answer)
            assert_raises(CholmodError, f3, Xty)
            f3.cholesky_AAt_inplace(X.T)
            assert np.allclose(f3(Xty), answer)

            print problem, mode
            for f in (f1, f2, f3, f4):
                pXtX = XtX.todense()[f.P()[:, np.newaxis],
                                     f.P()[np.newaxis, :]]
                assert np.allclose(np.prod(f.D()),
                                   np.linalg.det(XtX.todense()))
                assert np.allclose((f.L() * f.L().T).todense(),
                                   pXtX)
                L, D = f.L_D()
                assert np.allclose((L * D * L.T).todense(),
                                   pXtX)

                b = np.arange(XtX.shape[0])[:, np.newaxis]
                assert np.allclose(f.solve_A(b),
                                   np.dot(XtX.todense().I, b))
                assert np.allclose(f(b),
                                   np.dot(XtX.todense().I, b))
                assert np.allclose(f.solve_LDLt(b),
                                   np.dot((L * D * L.T).todense().I, b))
                assert np.allclose(f.solve_LD(b),
                                   np.dot((L * D).todense().I, b))
                assert np.allclose(f.solve_DLt(b),
                                   np.dot((D * L.T).todense().I, b))
                assert np.allclose(f.solve_L(b),
                                   np.dot(L.todense().I, b))
                assert np.allclose(f.solve_Lt(b),
                                   np.dot(L.T.todense().I, b))
                assert np.allclose(f.solve_D(b),
                                   np.dot(D.todense().I, b))

                assert np.allclose(f.apply_P(b), b[f.P(), :])
                assert np.allclose(f.solve_P(b), b[f.P(), :])
                # Pt is the inverse of P, and argsort inverts permutation
                # vectors:
                assert np.allclose(f.apply_Pt(b), b[np.argsort(f.P()), :])
                assert np.allclose(f.solve_Pt(b), b[np.argsort(f.P()), :])
Exemplo n.º 7
0
#Q = random_projection_cholmod(D, U, rows, cols, k, factor)
Q = random_projection_cholmod_csc(Xs, k=1000)
A = Q.T
print A.shape
R = np.sum(A*A,axis=1)
U = np.sum(A[rows]*A[cols],axis=1)
R_ = W.diagonal()
U_ = W[rows,cols]

#X = build_sparse(D, P, rows, cols)
#(eis,_)=eigsh(X, k=1, sigma=-1, which='LM')
#ei = eis[0]

#is_psd_dense(R, U, rows, cols)
#is_psd_cholmod(R, U, rows, cols)

(R,U) = inv_dense(D, P, rows, cols)

W = la.inv(X)

(D1,P1) = covsel_quick(R, U, rows, cols)

(D2,P2) = covsel_cvx_dense(R, U, rows, cols,num_iterations=150)
X2 = build_dense(D2, P2, rows, cols)

factor = analyze(Xs)
(D3,P3) = covsel_cvx_cholmod(R, U, rows, cols,
                             k=10000,num_iterations=100,
                             factor=factor,finish_early=False)