def test_splu_solve(self):
        # Prefactorize (with UMFPACK) matrix for solving with multiple rhs
        a = self.a.astype('d')
        lu = um.splu(a)

        x1 = lu.solve(self.b)
        assert_allclose(a * x1, self.b)
        x2 = lu.solve(self.b2)
        assert_allclose(a * x2, self.b2)
    def test_splu_solve(self):
        # Prefactorize (with UMFPACK) matrix for solving with multiple rhs
        a = self.a.astype('d')
        lu = um.splu(a)

        x1 = lu.solve(self.b)
        assert_allclose(a*x1, self.b)
        x2 = lu.solve(self.b2)
        assert_allclose(a*x2, self.b2)
    def test_splu_solve_int64(self):
        # Prefactorize (with UMFPACK) matrix with int64 indices for solving with
        # multiple rhs
        a = _to_int64(self.a.astype('d'))
        lu = um.splu(a)

        x1 = lu.solve(self.b)
        assert_allclose(a * x1, self.b)
        x2 = lu.solve(self.b2)
        assert_allclose(a * x2, self.b2)
    def lu_factorize(self):
        ## 重新做LU分解
        self.B = self.A[:, self.idxB]
        self.etas = []
        self.eta_count = 0

        if LINEAR_SOLVER_TYPE == LinearSolver.SUPERLU:
            self.invB = splinalg.splu(self.B)
        else:
            self.invB = umfpack.splu(self.B)
    def test_splu_solve_int64(self):
        # Prefactorize (with UMFPACK) matrix with int64 indices for solving with
        # multiple rhs
        a = _to_int64(self.a.astype('d'))
        lu = um.splu(a)

        x1 = lu.solve(self.b)
        assert_allclose(a*x1, self.b)
        x2 = lu.solve(self.b2)
        assert_allclose(a*x2, self.b2)
    def test_splu_solve_sparse(self):
        # Prefactorize (with UMFPACK) matrix for solving with multiple rhs
        A = self.a.astype('d')
        lu = um.splu(A)

        b = csc_matrix(self.b.reshape(self.b.shape[0], 1))
        b2 = csc_matrix(self.b2.reshape(self.b2.shape[0], 1))
        B = hstack((b, b2))

        X = lu.solve_sparse(B)
        assert dense_norm(((A * X) - B).todense()) < 1e-14
        assert_allclose((A * X).todense(), B.todense())
    def test_splu_solve_sparse(self):
        # Prefactorize (with UMFPACK) matrix for solving with multiple rhs
        A = self.a.astype('d')
        lu = um.splu(A)

        b = csc_matrix(self.b.reshape(self.b.shape[0], 1))
        b2 = csc_matrix(self.b2.reshape(self.b2.shape[0], 1))
        B = hstack((b, b2))

        X = lu.solve_sparse(B)
        assert dense_norm(((A*X) - B).todense()) < 1e-14
        assert_allclose((A*X).todense(), B.todense())
def pagerank_umf(G, personal_vec):
    M = nx.to_scipy_sparse_matrix(G,
                                  nodelist=G.nodes(),
                                  weight='weight',
                                  dtype=float)
    S = scipy.array(M.sum(axis=1)).flatten()
    S[S != 0] = 1.0 / S[S != 0]
    Q = scipy.sparse.spdiags(S.T, 0, *M.shape, format='csr')
    M = Q * M

    alpha = 0.85
    A = scipy.sparse.eye(M.shape[0]) - alpha * M
    LU = umfpack.splu(A)
    ppr_mat = []
    for i in range(personal_vec.shape[1]):
        pr = LU.solve(personal_vec[:, i])
        pr = linalg.spsolve(A, personal_vec[:, i])
        ppr_mat.append(pr)
    def test_splu_lu(self):
        A = csc_matrix([[1,2,0,4],[1,0,0,1],[1,0,2,1],[2,2,1,0.]])

        lu = um.splu(A)

        Pr = np.zeros((4, 4))
        Pr[lu.perm_r, np.arange(4)] = 1
        Pr = csc_matrix(Pr)
        Pc = np.zeros((4, 4))
        Pc[np.arange(4), lu.perm_c] = 1
        Pc = csc_matrix(Pc)

        R = csc_matrix((4, 4))
        R.setdiag(lu.R)

        A2 = (R * Pr.T * (lu.L * lu.U) * Pc.T).A

        assert_allclose(A2, A.A, atol=1e-13)
    def test_splu_lu(self):
        A = csc_matrix([[1, 2, 0, 4], [1, 0, 0, 1], [1, 0, 2, 1],
                        [2, 2, 1, 0.]])

        lu = um.splu(A)

        Pr = np.zeros((4, 4))
        Pr[lu.perm_r, np.arange(4)] = 1
        Pr = csc_matrix(Pr)
        Pc = np.zeros((4, 4))
        Pc[np.arange(4), lu.perm_c] = 1
        Pc = csc_matrix(Pc)

        R = csc_matrix((4, 4))
        R.setdiag(lu.R)

        A2 = (R * Pr.T * (lu.L * lu.U) * Pc.T).A

        assert_allclose(A2, A.A, atol=1e-13)
Exemple #11
0
    def Solve(self, A, b, reuse_factorisation=False):
        """Solves the linear system of equations"""

        if not issparse(A):
            raise ValueError("Linear system is not of sparse type")

        if A.shape == (0,0) and b.shape[0] == 0:
            warn("Empty linear system!!! Nothing to solve!!!")
            return np.copy(b)


        self.reuse_factorisation = reuse_factorisation
        if self.solver_type != "direct" and self.reuse_factorisation is True:
            warn("Re-using factorisation for non-direct solvers is not possible. The pre-conditioner is going to be reused instead")

        # DECIDE IF THE SOLVER TYPE IS APPROPRIATE FOR THE PROBLEM
        if self.switcher_message is False and self.dont_switch_solver is False:
            # PREFER PARDISO OR MUMPS OVER AMG IF AVAILABLE
            if self.has_pardiso:
                self.solver_type = "direct"
                self.solver_subtype = "pardiso"
            elif self.has_mumps:
                self.solver_type = "direct"
                self.solver_subtype = "mumps"
            elif b.shape[0] > 100000 and self.has_amg_solver:
                self.solver_type = "amg"
                self.solver_subtype = "gmres"
                print('Large system of equations. Switching to algebraic multigrid solver')
                self.switcher_message = True
            # elif mesh.points.shape[0]*MainData.nvar > 50000 and MainData.C < 4:
                # self.solver_type = "direct"
                # self.solver_subtype = "MUMPS"
                # print 'Large system of equations. Switching to MUMPS solver'
            elif b.shape[0] > 70000 and self.geometric_discretisation=="hex" and self.has_amg_solver:
                self.solver_type = "amg"
                self.solver_subtype = "gmres"
                print('Large system of equations. Switching to algebraic multigrid solver')
                self.switcher_message = True
            else:
                self.solver_type = "direct"
                self.solver_subtype = "umfpack"


        if self.solver_type == 'direct':
            # CALL DIRECT SOLVER
            if self.solver_subtype=='umfpack' and self.has_umfpack:
                if A.dtype != np.float64:
                    A = A.astype(np.float64)

                if self.solver_context_manager is None:
                    if self.reuse_factorisation is False:
                        sol = spsolve(A,b,permc_spec='MMD_AT_PLUS_A',use_umfpack=True)
                        # from scikits import umfpack
                        # sol = umfpack.spsolve(A, b)
                    else:
                        from scikits import umfpack
                        lu = umfpack.splu(A)
                        sol = lu.solve(b)
                        self.solver_context_manager = lu
                else:
                    sol = self.solver_context_manager.solve(b)


            elif self.solver_subtype=='mumps' and self.has_mumps:

                from mumps.mumps_context import MUMPSContext
                t_solve = time()
                A = A.tocoo()
                # False means non-symmetric - Do not change it to True. True means symmetric pos def
                # which is not the case for electromechanics
                if self.solver_context_manager is None:
                    context = MUMPSContext((A.shape[0], A.row, A.col, A.data, False), verbose=False)
                    context.analyze()
                    context.factorize()
                    sol = context.solve(rhs=b)

                    if self.reuse_factorisation:
                        self.solver_context_manager = context
                else:
                    sol = self.solver_context_manager.solve(rhs=b)

                print("MUMPS solver time is {}".format(time() - t_solve))

                return sol


            elif self.solver_subtype == "pardiso" and self.has_pardiso:
                # NOTE THAT THIS PARDISO SOLVER AUTOMATICALLY SAVES THE RIGHT FACTORISATION
                import pypardiso
                from pypardiso.scipy_aliases import pypardiso_solver as ps
                A = A.tocsr()
                t_solve = time()
                sol = pypardiso.spsolve(A,b)
                if self.reuse_factorisation is False:
                    ps.remove_stored_factorization()
                    ps.free_memory()
                print("Pardiso solver time is {}".format(time() - t_solve))


            else:
                # FOR 'super_lu'
                if A.dtype != np.float64:
                    A = A.astype(np.float64)
                A = A.tocsc()

                if self.solver_context_manager is None:
                    if self.reuse_factorisation is False:
                        sol = spsolve(A,b,permc_spec='MMD_AT_PLUS_A',use_umfpack=True)
                    else:
                        lu = splu(A)
                        sol = lu.solve(b)
                        self.solver_context_manager = lu
                else:
                    sol = self.solver_context_manager.solve(b)



        elif self.solver_type == "iterative":
            # CALL ITERATIVE SOLVER
            if self.solver_subtype == "gmres":
                sol = gmres(A,b,tol=self.iterative_solver_tolerance)[0]
            if self.solver_subtype == "lgmres":
                sol = lgmres(A,b,tol=self.iterative_solver_tolerance)[0]
            elif self.solver_subtype == "bicgstab":
                sol = bicgstab(A,b,tol=self.iterative_solver_tolerance)[0]
            else:
                sol = cg(A,b,tol=self.iterative_solver_tolerance)[0]

            # PRECONDITIONED ITERATIVE SOLVER - CHECK
            # P = spilu(A.tocsc(), drop_tol=1e-5)
            # M_x = lambda x: P.solve(x)
            # m = A.shape[1]
            # n = A.shape[0]
            # M = LinearOperator((n * m, n * m), M_x)
            # sol = cg(A, b, tol=self.iterative_solver_tolerance, M=M)[0]

        elif self.solver_type == "amg":
            if self.has_amg_solver is False:
                raise ImportError('Algebraic multigrid solver was not found. Please install it using "pip install pyamg"')
            from pyamg import ruge_stuben_solver, rootnode_solver, smoothed_aggregation_solver

            if A.dtype != b.dtype:
                # DOWN-CAST
                b = b.astype(A.dtype)

            if not isspmatrix_csr(A):
                A = A.tocsr()

            t_solve = time()

            if self.iterative_solver_tolerance > 1e-9:
                self.iterative_solver_tolerance = 1e-10

            # AMG METHOD
            amg_func = None
            if self.preconditioner_type=="smoothed_aggregation":
                # THIS IS TYPICALLY FASTER BUT THE TOLERANCE NEED TO BE SMALLER, TYPICALLY 1e-10
                amg_func = smoothed_aggregation_solver
            elif self.preconditioner_type == "ruge_stuben":
                amg_func = ruge_stuben_solver
            elif self.preconditioner_type == "rootnode":
                amg_func = rootnode_solver
            else:
                amg_func = rootnode_solver

            ml = amg_func(A)
            # ml = amg_func(A, smooth=('energy', {'degree':2}), strength='evolution' )
            # ml = amg_func(A, max_levels=3, diagonal_dominance=True)
            # ml = amg_func(A, coarse_solver=spsolve)
            # ml = amg_func(A, coarse_solver='cholesky')

            if self.solver_context_manager is None:
                # M = ml.aspreconditioner(cycle='V')
                M = ml.aspreconditioner()
                if self.reuse_factorisation:
                    self.solver_context_manager = M
            else:
                M = self.solver_context_manager

            # EXPLICIT CALL TO KYROLOV SOLVERS WITH AMG PRECONDITIONER
            # sol, info = bicgstab(A, b, M=M, tol=self.iterative_solver_tolerance)
            # sol, info = cg(A, b, M=M, tol=self.iterative_solver_tolerance)
            # sol, info = gmres(A, b, M=M, tol=self.iterative_solver_tolerance)

            # IMPLICIT CALL TO KYROLOV SOLVERS WITH AMG PRECONDITIONER
            residuals = []
            sol = ml.solve(b, tol=self.iterative_solver_tolerance, accel=self.solver_subtype, residuals=residuals)

            print("AMG solver time is {}".format(time() - t_solve))



        elif self.solver_type == "petsc" and self.has_petsc:
            if self.solver_subtype != "gmres" and self.solver_subtype != "minres" and self.solver_subtype != "cg":
                self.solver_subtype == "cg"
            if self.iterative_solver_tolerance < 1e-9:
                self.iterative_solver_tolerance = 1e-7

            from petsc4py import PETSc
            t_solve = time()
            pA = PETSc.Mat().createAIJ(size=A.shape, csr=(A.indptr, A.indices, A.data))
            pb = PETSc.Vec().createWithArray(b)

            ksp = PETSc.KSP()
            ksp.create(PETSc.COMM_WORLD)
            # ksp.create()
            ksp.setType(self.solver_subtype)
            ksp.setTolerances(atol=self.iterative_solver_tolerance,
                    rtol=self.iterative_solver_tolerance)
            # ILU
            ksp.getPC().setType('icc')

            # CREATE INITIAL GUESS
            psol = PETSc.Vec().createWithArray(np.ones(b.shape[0]))
            # SOLVE
            ksp.setOperators(pA)
            ksp.setFromOptions()
            ksp.solve(pb, psol)
            sol = psol.getArray()

            # print('Converged in', ksp.getIterationNumber(), 'iterations.')
            print("Petsc linear iterative solver time is {}".format(time() - t_solve))

        else:
            warn("{} solver is not available. Default solver is going to be used".format(self.solver_type))
            # FOR 'super_lu'
            if A.dtype != np.float64:
                A = A.astype(np.float64)
            A = A.tocsc()

            if self.solver_context_manager is None:
                if self.reuse_factorisation is False:
                    sol = spsolve(A,b,permc_spec='MMD_AT_PLUS_A',use_umfpack=True)
                else:
                    lu = splu(A)
                    sol = lu.solve(b)
                    self.solver_context_manager = lu
            else:
                sol = self.solver_context_manager.solve(b)


        return sol
def solve_sparse_mat_mat_lu(A, B, solver="petsc"):
    """
    Solves AX=B for X

    Parameters
    ----------

    A: scipy sparse matrix
        NxN Matrix
    B: scipy sparse matrix
        NxP Matrix
    solver: string
        Choice of direct solver. "petsc" or "scipy"

    Returns
    _______

    out: scipy sparse matrix
        solution X

    Notes
    _____

    Ignores zero columns in B, and hence is faster than the existing scipy implementation
    """

    sf = 1.e30  # scaling factor for petsc

    assert solver in ["petsc", "scipy"]

    if solver == "petsc":
        lu_A = get_petsc_ksp(A * sf, pctype="lu", ksptype="preonly", tol=1e-25, max_it=100)
    elif solver == "scipy":
        lu_A = splu(A)

    B = B.tocsc()  # Convert to csc to extract columns efficiently

    # Create a sparse output matrix by repeatedly applying
    # the sparse factorization to solve columns of b.
    # Adapted from scipy.sparse.linalg.dsolve.linsolve

    ind_of_nonzero_cols = np.unique(B.nonzero()[1])

    data_segs = []
    row_segs = []
    col_segs = []
    for j in ind_of_nonzero_cols:
        Bj = B[:, j].A.ravel()

        if solver == "scipy":
            xj = lu_A.solve(Bj)
        elif solver == "petsc":
            xj = petsc_solve_from_ksp(lu_A, Bj * sf, x=None, tol=1e-5)

        w = np.flatnonzero(xj)
        segment_length = w.shape[0]

        row_segs.append(w)
        col_segs.append(np.ones(segment_length, dtype=int) * j)
        data_segs.append(np.asarray(xj[w], dtype=A.dtype))

    sparse_data = np.concatenate(data_segs)
    sparse_row = np.concatenate(row_segs)
    sparse_col = np.concatenate(col_segs)
    x = csc_matrix((sparse_data, (sparse_row, sparse_col)), shape=B.shape, dtype=B.dtype)

    return x
    def solve(self, solver="lu", x0=None, tol=1.e-6):
        solver = solver.lower()
        sf = 1e30
        pores = self.network.pores
        A = self.csr_solver_matrix
        self.solver_matrix.set_csr_singular_rows_to_dirichlet(A)
        self.A = A

        mass_residual = 1.0

        if x0 is not None:
            self.sol[:] = x0

        if solver == "lu":
            lu_A = splu(A)

        elif solver == "amg":
            ml = pyamg.rootnode_solver(A, max_coarse=10)

        elif solver == "petsc":
            comm = MPI.COMM_SELF
            ksp = get_petsc_ksp(A=A * sf, ksptype="minres", tol=tol, max_it=1000)
            petsc_rhs = PETSc.Vec().createWithArray(self.rhs * sf, comm=comm)

        elif "trilinos" in solver:
            epetra_mat = matrix_scipy_to_epetra(A * sf)
            epetra_rhs = vector_numpy_to_epetra(self.rhs * sf)

            if "ml" in solver:
                epetra_prec = trilinos_ml_prec(epetra_mat)

        def inner_loop_solve(tol):
            if solver == "lu":
                self.sol[:] = lu_A.solve(self.rhs)

            elif solver == "amg":
                self.sol[:] = ml.solve(b=self.rhs, x0=self.sol, tol=tol, accel='gmres')

            elif solver == "petsc":
                ksp.setTolerances(rtol=tol)
                ksp.setFromOptions()
                petsc_sol = PETSc.Vec().createWithArray(self.sol, comm=comm)
                ksp.setInitialGuessNonzero(True)
                ksp.solve(petsc_rhs, petsc_sol)

                self.sol[:] = petsc_sol.getArray()

            elif "trilinos" in solver:
                epetra_sol = vector_numpy_to_epetra(self.sol)

                if "ml" in solver:
                    x = trilinos_solve(epetra_mat, epetra_rhs, epetra_prec, x=epetra_sol, tol=tol)
                else:
                    x = solve_aztec(epetra_mat, epetra_rhs, x=epetra_sol, tol=tol)
                self.sol[:] = x

            pores.p_w[:] = self.sol[:]  # This side-effect is required for the compute_mass_residual function
            pores.p_n[:] = pores.p_w + pores.p_c

        count = 0
        while (mass_residual > 1e-5) and (count < 100):
            count += 1
            inner_loop_solve(tol)
            mass_residual = self.compute_mass_residual(A, self.rhs, self.sol)
            logger.debug("Mass flux residual %e", mass_residual)
            if count == 99:
                logger.warn("Failed to converge. Residual %e. Falling back to mltrilinos solver", mass_residual)
                return self.solve(solver="mltrilinos") # Fall back to reliable solver

            tol /= 10.0

        if "ml" in solver:
            epetra_prec.DestroyPreconditioner();

        return np.copy(self.sol)