コード例 #1
0
ファイル: LaplacianTrilinos.py プロジェクト: wathen/Parallel
def edgeAMG(Anode,Acurl,D):
    nodalAMG = smoothed_aggregation_solver(Anode,max_coarse=10,keep=True)


    ##
    # construct multilevel structure
    levels = []
    levels.append( multilevel_solver.level() )
    levels[-1].A = Acurl
    levels[-1].D = D
    for i in range(1,len(nodalAMG.levels)):
        A = levels[-1].A
        Pnode = nodalAMG.levels[i-1].AggOp
        P = findPEdge(D, Pnode)
        R = P.T
        levels[-1].P = P
        levels[-1].R = R
        levels.append( multilevel_solver.level() )
        A = R*A*P
        D = csr_matrix(dia_matrix((1.0/((P.T*P).diagonal()),0),shape=(P.shape[1],P.shape[1]))*(P.T*D*Pnode))
        levels[-1].A = A
        levels[-1].D = D

    edgeML = multilevel_solver(levels)
    for i in range(0,len(edgeML.levels)):
        edgeML.levels[i].presmoother = setup_hiptmair(levels[i])
        edgeML.levels[i].postsmoother = setup_hiptmair(levels[i])
    return edgeML
コード例 #2
0
ファイル: edgeAMG.py プロジェクト: sdalton1/pyamg-examples
def edgeAMG(Anode, Acurl, D):
    nodalAMG = smoothed_aggregation_solver(Anode, max_coarse=10, keep=True)

    ##
    # construct multilevel structure
    levels = []
    levels.append(multilevel_solver.level())
    levels[-1].A = Acurl
    levels[-1].D = D
    for i in range(1, len(nodalAMG.levels)):
        A = levels[-1].A
        Pnode = nodalAMG.levels[i - 1].AggOp
        P = findPEdge(D, Pnode)
        R = P.T
        levels[-1].P = P
        levels[-1].R = R
        levels.append(multilevel_solver.level())
        A = R * A * P
        D = csr_matrix(
            dia_matrix((1.0 / ((P.T * P).diagonal()), 0),
                       shape=(P.shape[1], P.shape[1])) * (P.T * D * Pnode))
        levels[-1].A = A
        levels[-1].D = D

    edgeML = multilevel_solver(levels)
    for i in range(0, len(edgeML.levels)):
        edgeML.levels[i].presmoother = setup_hiptmair(levels[i])
        edgeML.levels[i].postsmoother = setup_hiptmair(levels[i])
    return edgeML
コード例 #3
0
ファイル: test_adaptive.py プロジェクト: jontateixeira/pyamg
    def test_elasticity(self):
        A, B = linear_elasticity((35, 35), format='bsr')

        smoother = ('gauss_seidel', {'sweep': 'symmetric', 'iterations': 2})
        [asa, work] = adaptive_sa_solver(A,
                                         num_candidates=3,
                                         improvement_iters=5,
                                         prepostsmoother=smoother)
        sa = smoothed_aggregation_solver(A, B=B)

        b = sp.rand(A.shape[0])

        residuals0 = []
        residuals1 = []

        sol0 = asa.solve(b, maxiter=20, tol=1e-10, residuals=residuals0)
        sol1 = sa.solve(b, maxiter=20, tol=1e-10, residuals=residuals1)
        del sol0, sol1

        conv_asa = (residuals0[-1] / residuals0[0])**(1.0 / len(residuals0))
        conv_sa = (residuals1[-1] / residuals1[0])**(1.0 / len(residuals1))

        # print "ASA convergence (Elasticity) %1.2e" % (conv_asa)
        # print "SA convergence (Elasticity) %1.2e" % (conv_sa)
        assert (conv_asa < 1.3 * conv_sa)
コード例 #4
0
def test(model, writer, device):
    """Test loop for whole test data set."""
    model.eval()
    _, _, test_loader = init_loaders()
    data = np.zeros((len(test_loader), 5, 4))
    for idx, (features, coors, shape, l_matrix) in enumerate(test_loader):
        print('Evaluating ' + str(idx) + ' out of ' + str(len(test_loader)))
        l_matrix = csr_matrix(l_matrix[0].to_dense().numpy(), dtype=np.float32)
        rhs = np.random.randn(shape[0])

        # Vanilla conjugate gradients without preconditioner as baseline.
        data[idx, 0] = evaluate('vanilla', l_matrix, rhs, eye(shape[0]))

        # Jacobi preconditioner.
        data[idx, 1] = evaluate('jacobi', l_matrix, rhs,
                                diags(1. / l_matrix.diagonal()))

        # Incomplete Cholesky preconditioner.
        lu = sla.spilu(l_matrix.tocsc(), fill_factor=1., drop_tol=0.)
        L = lu.L
        D = diags(lu.U.diagonal())  # https://is.gd/5PJcTp
        Pr = np.zeros(l_matrix.shape)
        Pc = np.zeros(l_matrix.shape)
        Pr[lu.perm_r, np.arange(l_matrix.shape[0])] = 1
        Pc[np.arange(l_matrix.shape[0]), lu.perm_c] = 1
        Pr = lil_matrix(Pr)
        Pc = lil_matrix(Pc)
        preconditioner = sla.inv((Pr.T * (L * D * L.T) * Pc.T).tocsc())
        data[idx, 2] = evaluate('ic(0)', l_matrix, rhs, preconditioner)

        # Algebraic MultiGrid preconditioner.
        preconditioner = smoothed_aggregation_solver(
            l_matrix).aspreconditioner(cycle='V')
        preconditioner = csr_matrix(
            preconditioner.matmat(np.eye(shape[0], dtype=np.float32)))
        data[idx, 3] = evaluate('amg', l_matrix, rhs, preconditioner)

        # Learned preconditioner.
        sp_tensor = SparseConvTensor(features.T.to(device),
                                     coors.int().squeeze(), shape, 1)
        preconditioner = csr_matrix(model(sp_tensor).detach().cpu().numpy())
        data[idx, 4] = evaluate('learned', l_matrix, rhs, preconditioner)

    np.savetxt('./tmp/time.csv', data[:, :, 0], fmt='%.4f')
    np.savetxt('./tmp/iterations.csv', data[:, :, 1], fmt='%.4f')
    np.savetxt('./tmp/condition.csv', data[:, :, 2], fmt='%.4f')

    for m in range(1, 5):
        # Compare time/iterations/condition to baseline CG.
        writer.add_histogram('test/time', data[:, 0, 0] / data[:, m, 0], m)
        writer.add_histogram('test/iterations', data[:, 0, 1] / data[:, m, 1],
                             m)
        writer.add_histogram('test/condition', data[:, 0, 2] / data[:, m, 2],
                             m)
        writer.add_histogram('test/density', data[:, m, 3], m)
コード例 #5
0
def demo():
    """Outline basic demo."""
    A = poisson((100, 100), format='csr')  # 2D FD Poisson problem
    B = None  # no near-null spaces guesses for SA
    b = sp.rand(A.shape[0], 1)  # a random right-hand side

    # use AMG based on Smoothed Aggregation (SA) and display info
    mls = smoothed_aggregation_solver(A, B=B)
    print(mls)

    # Solve Ax=b with no acceleration ('standalone' solver)
    standalone_residuals = []
    x = mls.solve(b, tol=1e-10, accel=None, residuals=standalone_residuals)

    # Solve Ax=b with Conjugate Gradient (AMG as a preconditioner to CG)
    accelerated_residuals = []
    x = mls.solve(b, tol=1e-10, accel='cg', residuals=accelerated_residuals)
    del x

    # Compute relative residuals
    standalone_residuals = \
        np.array(standalone_residuals) / standalone_residuals[0]
    accelerated_residuals = \
        np.array(accelerated_residuals) / accelerated_residuals[0]

    # Compute (geometric) convergence factors
    factor1 = standalone_residuals[-1]**(1.0 / len(standalone_residuals))
    factor2 = accelerated_residuals[-1]**(1.0 / len(accelerated_residuals))

    print("                     MG convergence factor: %g" % (factor1))
    print("MG with CG acceleration convergence factor: %g" % (factor2))

    # Plot convergence history
    try:
        import matplotlib.pyplot as plt
        plt.figure()
        plt.title('Convergence History')
        plt.xlabel('Iteration')
        plt.ylabel('Relative Residual')
        plt.semilogy(standalone_residuals,
                     label='Standalone',
                     linestyle='-',
                     marker='o')
        plt.semilogy(accelerated_residuals,
                     label='Accelerated',
                     linestyle='-',
                     marker='s')
        plt.legend()
        plt.show()
    except ImportError:
        print("\n\nNote: pylab not available on your system.")
コード例 #6
0
ファイル: demo.py プロジェクト: pyamg/pyamg
def demo():
    """Outline basic demo."""
    A = poisson((100, 100), format='csr')  # 2D FD Poisson problem
    B = None                               # no near-null spaces guesses for SA
    b = sp.rand(A.shape[0], 1)          # a random right-hand side

    # use AMG based on Smoothed Aggregation (SA) and display info
    mls = smoothed_aggregation_solver(A, B=B)
    print(mls)

    # Solve Ax=b with no acceleration ('standalone' solver)
    standalone_residuals = []
    x = mls.solve(b, tol=1e-10, accel=None, residuals=standalone_residuals)

    # Solve Ax=b with Conjugate Gradient (AMG as a preconditioner to CG)
    accelerated_residuals = []
    x = mls.solve(b, tol=1e-10, accel='cg', residuals=accelerated_residuals)
    del x

    # Compute relative residuals
    standalone_residuals = \
        np.array(standalone_residuals) / standalone_residuals[0]
    accelerated_residuals = \
        np.array(accelerated_residuals) / accelerated_residuals[0]

    # Compute (geometric) convergence factors
    factor1 = standalone_residuals[-1]**(1.0/len(standalone_residuals))
    factor2 = accelerated_residuals[-1]**(1.0/len(accelerated_residuals))

    print("                     MG convergence factor: %g" % (factor1))
    print("MG with CG acceleration convergence factor: %g" % (factor2))

    # Plot convergence history
    try:
        import matplotlib.pyplot as plt
        plt.figure()
        plt.title('Convergence History')
        plt.xlabel('Iteration')
        plt.ylabel('Relative Residual')
        plt.semilogy(standalone_residuals, label='Standalone',
                     linestyle='-', marker='o')
        plt.semilogy(accelerated_residuals, label='Accelerated',
                     linestyle='-', marker='s')
        plt.legend()
        plt.show()
    except ImportError:
        print("\n\nNote: pylab not available on your system.")
コード例 #7
0
ファイル: test_adaptive.py プロジェクト: gaussWu/pyamg
    def test_poisson(self):
        A = poisson( (50,50), format='csr' )

        [asa,work] = adaptive_sa_solver(A, num_candidates = 1)
        sa  = smoothed_aggregation_solver(A, B = ones((A.shape[0],1)) )

        b = rand(A.shape[0])

        residuals0 = []
        residuals1 = []

        sol0 = asa.solve(b, maxiter=20, tol=1e-10, residuals=residuals0)
        sol1 =  sa.solve(b, maxiter=20, tol=1e-10, residuals=residuals1)
       
        conv_asa = (residuals0[-1]/residuals0[0])**(1.0/len(residuals0))
        conv_sa  = (residuals1[-1]/residuals1[0])**(1.0/len(residuals1))
        
        #print "ASA convergence (Poisson)",conv_asa
        #print "SA convergence (Poisson)",conv_sa
        assert( conv_asa < 1.2 * conv_sa )
コード例 #8
0
ファイル: test_adaptive.py プロジェクト: gaussWu/pyamg
    def test_poisson(self):
        A = poisson((50, 50), format='csr')

        [asa, work] = adaptive_sa_solver(A, num_candidates=1)
        sa = smoothed_aggregation_solver(A, B=ones((A.shape[0], 1)))

        b = rand(A.shape[0])

        residuals0 = []
        residuals1 = []

        sol0 = asa.solve(b, maxiter=20, tol=1e-10, residuals=residuals0)
        sol1 = sa.solve(b, maxiter=20, tol=1e-10, residuals=residuals1)

        conv_asa = (residuals0[-1] / residuals0[0])**(1.0 / len(residuals0))
        conv_sa = (residuals1[-1] / residuals1[0])**(1.0 / len(residuals1))

        #print "ASA convergence (Poisson)",conv_asa
        #print "SA convergence (Poisson)",conv_sa
        assert (conv_asa < 1.2 * conv_sa)
コード例 #9
0
ファイル: test_adaptive.py プロジェクト: gaussWu/pyamg
    def test_elasticity(self):
        A,B = linear_elasticity( (35,35), format='bsr' )

        [asa,work] = adaptive_sa_solver(A, num_candidates = 3, \
                improvement_iters=5,prepostsmoother=('gauss_seidel',{'sweep':'symmetric','iterations':2}))
        sa  = smoothed_aggregation_solver(A, B=B )

        b = rand(A.shape[0])

        residuals0 = []
        residuals1 = []

        sol0 = asa.solve(b, maxiter=20, tol=1e-10, residuals=residuals0)
        sol1 =  sa.solve(b, maxiter=20, tol=1e-10, residuals=residuals1)
       
        conv_asa = (residuals0[-1]/residuals0[0])**(1.0/len(residuals0))
        conv_sa  = (residuals1[-1]/residuals1[0])**(1.0/len(residuals1))
       
        #print "ASA convergence (Elasticity) %1.2e" % (conv_asa)
        #print "SA convergence (Elasticity) %1.2e" % (conv_sa)
        assert( conv_asa < 1.3 * conv_sa ) 
コード例 #10
0
ファイル: test_smooth.py プロジェクト: pyamg/pyamg
    def test_range(self):
        """Check that P*R=B"""
        np.random.seed(0)  # make tests repeatable

        cases = []

        # Simple, real-valued diffusion problems
        X = load_example("airfoil")
        A = X["A"].tocsr()
        B = X["B"]
        cases.append((A, B, ("jacobi", {"filter": True, "weighting": "local"})))
        cases.append((A, B, ("jacobi", {"filter": True, "weighting": "block"})))

        cases.append((A, B, ("energy", {"maxiter": 3})))
        cases.append((A, B, ("energy", {"krylov": "cgnr"})))
        cases.append((A, B, ("energy", {"krylov": "gmres", "degree": 2})))

        A = poisson((10, 10), format="csr")
        B = np.ones((A.shape[0], 1))
        cases.append((A, B, ("jacobi", {"filter": True, "weighting": "diagonal"})))
        cases.append((A, B, ("jacobi", {"filter": True, "weighting": "local"})))

        cases.append((A, B, "energy"))
        cases.append((A, B, ("energy", {"degree": 2})))
        cases.append((A, B, ("energy", {"krylov": "cgnr", "degree": 2})))
        cases.append((A, B, ("energy", {"krylov": "gmres"})))

        # Simple, imaginary-valued problems
        iA = 1.0j * A
        iB = 1.0 + np.random.rand(iA.shape[0], 2) + 1.0j * (1.0 + np.random.rand(iA.shape[0], 2))

        cases.append((iA, B, ("jacobi", {"filter": True, "weighting": "diagonal"})))
        cases.append((iA, B, ("jacobi", {"filter": True, "weighting": "block"})))
        cases.append((iA, iB, ("jacobi", {"filter": True, "weighting": "local"})))
        cases.append((iA, iB, ("jacobi", {"filter": True, "weighting": "block"})))

        cases.append((iA.tobsr(blocksize=(5, 5)), B, ("jacobi", {"filter": True, "weighting": "block"})))
        cases.append((iA.tobsr(blocksize=(5, 5)), iB, ("jacobi", {"filter": True, "weighting": "block"})))

        cases.append((iA, B, ("energy", {"krylov": "cgnr", "degree": 2})))
        cases.append((iA, iB, ("energy", {"krylov": "cgnr"})))
        cases.append(
            (
                iA.tobsr(blocksize=(5, 5)),
                B,
                ("energy", {"krylov": "cgnr", "degree": 2, "maxiter": 3, "postfilter": {"theta": 0.05}}),
            )
        )
        cases.append(
            (
                iA.tobsr(blocksize=(5, 5)),
                B,
                ("energy", {"krylov": "cgnr", "degree": 2, "maxiter": 3, "prefilter": {"theta": 0.05}}),
            )
        )
        cases.append((iA.tobsr(blocksize=(5, 5)), B, ("energy", {"krylov": "cgnr", "degree": 2, "maxiter": 3})))
        cases.append((iA.tobsr(blocksize=(5, 5)), iB, ("energy", {"krylov": "cgnr"})))

        cases.append((iA, B, ("energy", {"krylov": "gmres"})))
        cases.append((iA, iB, ("energy", {"krylov": "gmres", "degree": 2})))
        cases.append((iA.tobsr(blocksize=(5, 5)), B, ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3})))
        cases.append((iA.tobsr(blocksize=(5, 5)), iB, ("energy", {"krylov": "gmres"})))

        # Simple, imaginary-valued problems
        iA = A + 1.0j * scipy.sparse.eye(A.shape[0], A.shape[1])

        cases.append((iA, B, ("jacobi", {"filter": True, "weighting": "local"})))
        cases.append((iA, B, ("jacobi", {"filter": True, "weighting": "block"})))
        cases.append((iA, iB, ("jacobi", {"filter": True, "weighting": "diagonal"})))
        cases.append((iA, iB, ("jacobi", {"filter": True, "weighting": "block"})))
        cases.append((iA.tobsr(blocksize=(4, 4)), iB, ("jacobi", {"filter": True, "weighting": "block"})))

        cases.append((iA, B, ("energy", {"krylov": "cgnr"})))
        cases.append((iA.tobsr(blocksize=(4, 4)), iB, ("energy", {"krylov": "cgnr"})))

        cases.append((iA, B, ("energy", {"krylov": "gmres"})))
        cases.append((iA.tobsr(blocksize=(4, 4)), iB, ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3})))

        cases.append(
            (
                iA.tobsr(blocksize=(4, 4)),
                iB,
                ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3, "postfilter": {"theta": 0.05}}),
            )
        )
        cases.append(
            (
                iA.tobsr(blocksize=(4, 4)),
                iB,
                ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3, "prefilter": {"theta": 0.05}}),
            )
        )

        A = gauge_laplacian(10, spacing=1.0, beta=0.21)
        B = np.ones((A.shape[0], 1))
        cases.append((A, iB, ("jacobi", {"filter": True, "weighting": "diagonal"})))
        cases.append((A, iB, ("jacobi", {"filter": True, "weighting": "local"})))

        cases.append((A, B, ("energy", {"krylov": "cg"})))
        cases.append((A, iB, ("energy", {"krylov": "cgnr"})))
        cases.append((A, iB, ("energy", {"krylov": "gmres"})))

        cases.append(
            (
                A.tobsr(blocksize=(2, 2)),
                B,
                ("energy", {"krylov": "cgnr", "degree": 2, "maxiter": 3, "postfilter": {"theta": 0.05}}),
            )
        )
        cases.append(
            (
                A.tobsr(blocksize=(2, 2)),
                B,
                ("energy", {"krylov": "cgnr", "degree": 2, "maxiter": 3, "prefilter": {"theta": 0.05}}),
            )
        )
        cases.append((A.tobsr(blocksize=(2, 2)), B, ("energy", {"krylov": "cgnr", "degree": 2, "maxiter": 3})))
        cases.append((A.tobsr(blocksize=(2, 2)), iB, ("energy", {"krylov": "cg"})))
        cases.append((A.tobsr(blocksize=(2, 2)), B, ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3})))

        cases.append(
            (
                A.tobsr(blocksize=(2, 2)),
                B,
                ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3, "postfilter": {"theta": 0.05}}),
            )
        )
        cases.append(
            (
                A.tobsr(blocksize=(2, 2)),
                B,
                ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3, "prefilter": {"theta": 0.05}}),
            )
        )

        #
        A, B = linear_elasticity((10, 10))
        cases.append((A, B, ("jacobi", {"filter": True, "weighting": "diagonal"})))
        cases.append((A, B, ("jacobi", {"filter": True, "weighting": "local"})))
        cases.append((A, B, ("jacobi", {"filter": True, "weighting": "block"})))

        cases.append((A, B, ("energy", {"degree": 2})))
        cases.append((A, B, ("energy", {"degree": 3, "postfilter": {"theta": 0.05}})))
        cases.append((A, B, ("energy", {"degree": 3, "prefilter": {"theta": 0.05}})))
        cases.append((A, B, ("energy", {"krylov": "cgnr"})))
        cases.append((A, B, ("energy", {"krylov": "gmres", "degree": 2})))

        # Classic SA cases
        for A, B, smooth in cases:
            ml = smoothed_aggregation_solver(A, B=B, max_coarse=1, max_levels=2, smooth=smooth)
            P = ml.levels[0].P
            B = ml.levels[0].B
            R = ml.levels[1].B
            assert_almost_equal(P * R, B)

        def blocksize(A):
            # Helper Function: return the blocksize of a matrix
            if isspmatrix_bsr(A):
                return A.blocksize[0]
            else:
                return 1

        # Root-node cases
        counter = 0
        for A, B, smooth in cases:
            counter += 1

            if isinstance(smooth, tuple):
                smoother = smooth[0]
            else:
                smoother = smooth

            if smoother == "energy" and (B.shape[1] >= blocksize(A)):
                ic = [("gauss_seidel_nr", {"sweep": "symmetric", "iterations": 4}), None]
                ml = rootnode_solver(
                    A,
                    B=B,
                    max_coarse=1,
                    max_levels=2,
                    smooth=smooth,
                    improve_candidates=ic,
                    keep=True,
                    symmetry="nonsymmetric",
                )
                T = ml.levels[0].T.tocsr()
                Cpts = ml.levels[0].Cpts
                Bf = ml.levels[0].B
                Bf_H = ml.levels[0].BH
                Bc = ml.levels[1].B
                P = ml.levels[0].P.tocsr()

                # P should preserve B in its range, wherever P
                # has enough nonzeros
                mask = (P.indptr[1:] - P.indptr[:-1]) >= B.shape[1]
                assert_almost_equal((P * Bc)[mask, :], Bf[mask, :])
                assert_almost_equal((P * Bc)[mask, :], Bf_H[mask, :])

                # P should be the identity at Cpts
                I1 = eye(T.shape[1], T.shape[1], format="csr", dtype=T.dtype)
                I2 = P[Cpts, :]
                assert_almost_equal(I1.data, I2.data)
                assert_equal(I1.indptr, I2.indptr)
                assert_equal(I1.indices, I2.indices)

                # T should be the identity at Cpts
                I2 = T[Cpts, :]
                assert_almost_equal(I1.data, I2.data)
                assert_equal(I1.indptr, I2.indptr)
                assert_equal(I1.indices, I2.indices)
コード例 #11
0
ファイル: edgeAMG.py プロジェクト: sdalton1/pyamg-examples
    PEdge = csr_matrix((data, (row, col)), shape=(numEdges, numCoarseEdges))
    return PEdge


if __name__ == '__main__':

    Acurl = csr_matrix(mmread("HCurlStiffness.dat"))
    Anode = csr_matrix(mmread("H1Stiffness.dat"))
    D = csr_matrix(mmread("D.dat"))

    ml = edgeAMG(Anode, Acurl, D)
    MLOp = ml.aspreconditioner()
    x = numpy.random.rand(Acurl.shape[1], 1)
    b = Acurl * x
    x0 = numpy.ones((Acurl.shape[1], 1))

    r_edgeAMG = []
    r_None = []
    r_SA = []

    ml_SA = smoothed_aggregation_solver(Acurl)
    ML_SAOP = ml_SA.aspreconditioner()
    x_prec, info = cg(Acurl, b, x0, M=MLOp, tol=1e-8, residuals=r_edgeAMG)
    x_prec, info = cg(Acurl, b, x0, M=None, tol=1e-8, residuals=r_None)
    x_prec, info = cg(Acurl, b, x0, M=ML_SAOP, tol=1e-8, residuals=r_SA)

    import pylab
    pylab.semilogy(range(0, len(r_edgeAMG)), r_edgeAMG, range(0, len(r_None)),
                   r_None, range(0, len(r_SA)), r_SA)
    pylab.show()
コード例 #12
0
ファイル: test_solver_pyamg.py プロジェクト: ratnania/pigasus
PDE.assembly()
PDE.solve()

# getting scipy matrix
A_scipy = PDE.system.get()

b = np.ones(PDE.size)

# ----------------------------------------------
import scipy
from pyamg.aggregation import smoothed_aggregation_solver

B = None  # no near-null spaces guesses for SA

# Construct solver using AMG based on Smoothed Aggregation (SA) and display info
mls = smoothed_aggregation_solver(A_scipy, B=B)

# Solve Ax=b with no acceleration ('standalone' solver)
print "Using pyamg-standalone"
standalone_residuals = []
t_start = time.time()
x = mls.solve(b,
              tol=tol_pyamg,
              accel=None,
              maxiter=maxiter_pyamg,
              residuals=standalone_residuals)
t_end = time.time()
mls_elapsed = t_end - t_start
mls_err = standalone_residuals[-1]
mls_niter = len(standalone_residuals)
print "done."
コード例 #13
0
    def test_range(self):
        """Check that P*R=B"""
        numpy.random.seed(0) #make tests repeatable
        
        cases = []

        ##
        # Simple, real-valued diffusion problems
        X = load_example('airfoil')
        A = X['A'].tocsr(); B = X['B']
        cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'local'}) ))
        cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'block'}) ))

        cases.append((A,B,('energy', {'maxiter' : 3}) ))
        cases.append((A,B,('energy', {'krylov' : 'cgnr'}) ))
        cases.append((A,B,('energy', {'krylov' : 'gmres', 'degree' : 2}) ))
        
        A = poisson((10,10), format='csr')
        B = ones((A.shape[0],1))
        cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'diagonal'}) ))
        cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'local'}) ))

        cases.append((A,B,'energy'))
        cases.append((A,B,('energy', {'degree' : 2}) ))
        cases.append((A,B,('energy', {'krylov' : 'cgnr', 'degree' : 2}) ))
        cases.append((A,B,('energy', {'krylov' : 'gmres'}) ))

        ##
        # Simple, imaginary-valued problems
        iA = 1.0j*A
        iB = 1.0 + rand(iA.shape[0],2) + 1.0j*(1.0 + rand(iA.shape[0],2))
        
        cases.append((iA, B,('jacobi', {'filter' : True, 'weighting' : 'diagonal'}) ))
        cases.append((iA, B,('jacobi', {'filter' : True, 'weighting' : 'block'}) ))
        cases.append((iA,iB,('jacobi', {'filter' : True, 'weighting' : 'local'}) ))
        cases.append((iA,iB,('jacobi', {'filter' : True, 'weighting' : 'block'}) ))
        
        cases.append((iA.tobsr(blocksize=(5,5)),  B, ('jacobi', {'filter' : True, 'weighting' : 'block'}) ))
        cases.append((iA.tobsr(blocksize=(5,5)), iB, ('jacobi', {'filter' : True, 'weighting' : 'block'}) ))
        
        cases.append((iA,B, ('energy', {'krylov' : 'cgnr', 'degree' : 2}) ))
        cases.append((iA,iB,('energy', {'krylov' : 'cgnr'}) ))
        cases.append((iA.tobsr(blocksize=(5,5)),B, ('energy', {'krylov' : 'cgnr', 'degree' : 2, 'maxiter' : 3}) ))
        cases.append((iA.tobsr(blocksize=(5,5)),iB,('energy', {'krylov' : 'cgnr'}) ))
 
        cases.append((iA,B, ('energy', {'krylov' : 'gmres'}) ))
        cases.append((iA,iB,('energy', {'krylov' : 'gmres', 'degree' : 2}) ))
        cases.append((iA.tobsr(blocksize=(5,5)),B, ('energy', {'krylov' : 'gmres', 'degree' : 2, 'maxiter' : 3}) ))
        cases.append((iA.tobsr(blocksize=(5,5)),iB,('energy', {'krylov' : 'gmres'}) ))

        ##
        #
        # Simple, imaginary-valued problems
        iA = A + 1.0j*scipy.sparse.eye(A.shape[0], A.shape[1])

        cases.append((iA,B, ('jacobi', {'filter' : True, 'weighting' : 'local'}) ))
        cases.append((iA,B, ('jacobi', {'filter' : True, 'weighting' : 'block'}) ))
        cases.append((iA,iB,('jacobi', {'filter' : True, 'weighting' : 'diagonal'}) ))
        cases.append((iA,iB,('jacobi', {'filter' : True, 'weighting' : 'block'}) ))
        cases.append((iA.tobsr(blocksize=(4,4)), iB, ('jacobi', {'filter' : True, 'weighting' : 'block'}) ))
        
        cases.append((iA,B,  ('energy', {'krylov' : 'cgnr'}) ))
        cases.append((iA.tobsr(blocksize=(4,4)),iB,('energy', {'krylov' : 'cgnr'}) ))

        cases.append((iA,B,                         ('energy', {'krylov' : 'gmres'}) ))
        cases.append((iA.tobsr(blocksize=(4,4)),iB, ('energy', {'krylov' : 'gmres', 'degree' : 2, 'maxiter' : 3}) ))

        ##
        #
        A = gauge_laplacian(10, spacing=1.0, beta=0.21)
        B = ones((A.shape[0],1))
        cases.append((A,iB,('jacobi', {'filter' : True, 'weighting' : 'diagonal'}) ))
        cases.append((A,iB,('jacobi', {'filter' : True, 'weighting' : 'local'}) ))

        cases.append((A,B,                        ('energy', {'krylov' : 'cg'}) ))
        cases.append((A,iB,                       ('energy', {'krylov' : 'cgnr'}) ))
        cases.append((A,iB,                       ('energy', {'krylov' : 'gmres'}) ))
        
        cases.append((A.tobsr(blocksize=(2,2)),B, ('energy', {'krylov' : 'cgnr', 'degree' : 2, 'maxiter' : 3}) ))
        cases.append((A.tobsr(blocksize=(2,2)),iB,('energy', {'krylov' : 'cg'}) ))
        cases.append((A.tobsr(blocksize=(2,2)),B, ('energy', {'krylov' : 'gmres', 'degree' : 2, 'maxiter' : 3}) ))

        ##
        #
        A,B = linear_elasticity((10,10))
        cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'diagonal'}) ))
        cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'local'}) ))
        cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'block'}) ))

        cases.append((A,B,('energy', {'degree' : 2}) ))
        cases.append((A,B,('energy', {'krylov' : 'cgnr'}) ))
        cases.append((A,B,('energy', {'krylov' : 'gmres', 'degree' : 2}) ))
               

        ##
        # Classic SA cases
        for A,B,smooth in cases:
            ml = smoothed_aggregation_solver(A, B=B, max_coarse=1, max_levels=2, smooth=smooth )
            P = ml.levels[0].P
            B = ml.levels[0].B
            R = ml.levels[1].B
            assert_almost_equal(P*R, B)
        
        def blocksize(A):
            # Helper Function: return the blocksize of a matrix 
            if isspmatrix_bsr(A):
                return A.blocksize[0]
            else:
                return 1

        ##
        # Root-node cases
        counter = 0
        for A,B,smooth in cases:
            counter += 1
            
            if isinstance( smooth, tuple):
                smoother = smooth[0]
            else:
                smoother = smooth
            
            if smoother == 'energy' and (B.shape[1] >= blocksize(A)):
                 ml = rootnode_solver(A, B=B, max_coarse=1, max_levels=2, smooth=smooth, 
                         improve_candidates =[('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 4}), None],
                         keep=True, symmetry='nonsymmetric')
                 T = ml.levels[0].T.tocsr()
                 Cpts = ml.levels[0].Cpts
                 Bf = ml.levels[0].B 
                 Bf_H = ml.levels[0].BH 
                 Bc = ml.levels[1].B 
                 P = ml.levels[0].P.tocsr()
                 ##
                 # P should preserve B in its range, wherever P 
                 # has enough nonzeros
                 mask = ((P.indptr[1:] - P.indptr[:-1]) >= B.shape[1])
                 assert_almost_equal( (P*Bc)[mask,:], Bf[mask,:])
                 assert_almost_equal( (P*Bc)[mask,:], Bf_H[mask,:])
                 ##
                 # P should be the identity at Cpts
                 I = eye(T.shape[1], T.shape[1], format='csr', dtype=T.dtype)
                 I2 = P[Cpts,:]
                 assert_almost_equal(I.data, I2.data)
                 assert_equal(I.indptr, I2.indptr)
                 assert_equal(I.indices, I2.indices)
                 ##
                 # T should be the identity at Cpts
                 I2 = T[Cpts,:]
                 assert_almost_equal(I.data, I2.data)
                 assert_equal(I.indptr, I2.indptr)
                 assert_equal(I.indices, I2.indices)
コード例 #14
0
ファイル: solver_diagnostics.py プロジェクト: nschloe/pynosh
def solver_diagnostics(
    A,
    fname="solver_diagnostic",
    definiteness=None,
    symmetry=None,
    strength_list=None,
    aggregate_list=None,
    smooth_list=None,
    Bimprove_list=None,
    max_levels_list=None,
    cycle_list=None,
    krylov_list=None,
    prepostsmoother_list=None,
    B_list=None,
    coarse_size_list=None,
):
    """Try many different different parameter combinations for
    smoothed_aggregation_solver(...).  The goal is to find appropriate SA
    parameter settings for the arbitrary matrix problem A x = 0 using a
    random initial guess.

    Every combination of the input parameter lists is used to construct and
    test an SA solver.  Thus, be wary of the total number of solvers possible!
    For example for an SPD CSR matrix, the default parameter lists generate 60
    different smoothed aggregation solvers.

    Symmetry and definiteness are automatically detected, but it is safest to
    manually set these parameters through the ``definiteness' and ``symmetry'
    parameters.

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix in CSR or BSR format

    fname : {string}
        File name where the diagnostic results are dumped

        Default: solver_diagnostic.txt

    definiteness : {string}
        'positive' denotes positive definiteness
        'indefinite' denotes indefiniteness

        Default: detected with a few iterations of Arnoldi iteration

    symmetry : {string}
        'hermitian' or 'nonsymmetric', denoting the symmetry of the matrix

        Default: detected by testing if A induces an inner-product

    strength_list : {list}
        List of various parameter choices for the strength argument sent to
        smoothed_aggregation_solver(...)

        Default:  [('symmetric', {'theta' : 0.0}),
                   ('evolution', {'k':2, 'proj_type':'l2', 'epsilon':2.0}),
                   ('evolution', {'k':2, 'proj_type':'l2', 'epsilon':4.0})]

    aggregate_list : {list}
        List of various parameter choices for the aggregate argument sent to
        smoothed_aggregation_solver(...)

        Default: ['standard']

    smooth_list : {list}
        List of various parameter choices for the smooth argument sent to
        smoothed_aggregation_solver(...)

        Default depends on the symmetry and definiteness parameters:
        if definiteness == 'positive' and (symmetry=='hermitian' or symmetry=='symmetric'):
            ['jacobi', ('jacobi', {'filter' : True, 'weighting' : 'local'}),
            ('energy',{'krylov':'cg','maxiter':2, 'degree':1, 'weighting':'local'}),
            ('energy',{'krylov':'cg','maxiter':3, 'degree':2, 'weighting':'local'}),
            ('energy',{'krylov':'cg','maxiter':4, 'degree':3, 'weighting':'local'})]
        if definiteness == 'indefinite' or symmetry=='nonsymmetric':
           [('energy',{'krylov':'gmres','maxiter':2,'degree':1,'weighting':'local'}),
            ('energy',{'krylov':'gmres','maxiter':3,'degree':2,'weighting':'local'}),
            ('energy',{'krylov':'gmres','maxiter':3,'degree':3,'weighting':'local'})]

    Bimprove_list : {list}
        List of various parameter choices for the Bimprove argument sent to
        smoothed_aggregation_solver(...)

        Default: ['default', None]

    max_levels_list : {list}
        List of various parameter choices for the max_levels argument sent to
        smoothed_aggregation_solver(...)

        Default: [25]

    cycle_list : {list}
        List of various parameter choices for the cycle argument sent to
        smoothed_aggregation_solver.solve()

        Default: ['V', 'W']

    krylov_list : {list}
        List of various parameter choices for the krylov argument sent to
        smoothed_aggregation_solver.solve().  Basic form is (string, dict),
        where the string is a Krylov descriptor, e.g., 'cg' or 'gmres', and
        dict is a dictionary of parameters like tol and maxiter.  The dictionary
        dict may be empty.

        Default depends on the symmetry and definiteness parameters:
        if symmetry == 'nonsymmetric' or definiteness == 'indefinite':
            [('gmres', {'tol':1e-8, 'maxiter':300})]
        else:
            [('cg', {'tol':1e-8, 'maxiter':300})]

    prepostsmoother_list : {list}
        List of various parameter choices for the presmoother and postsmoother
        arguments sent to smoothed_aggregation_solver(...).  Basic form is
        [ (presmoother_descriptor, postsmoother_descriptor), ...].

        Default depends on the symmetry parameter:
        if symmetry == 'nonsymmetric' or definiteness == 'indefinite':
            [ (('gauss_seidel_nr', {'sweep':'symmetric', 'iterations':2}),
               ('gauss_seidel_nr', {'sweep':'symmetric', 'iterations':2})) ]
        else:
            [ (('block_gauss_seidel',{'sweep':'symmetric','iterations':1}),
               ('block_gauss_seidel',{'sweep':'symmetric','iterations':1})) ]

    B_list : {list}
        List of various B parameter choices for the B and BH arguments sent to
        smoothed_aggregation_solver(...).  Basic form is [ (B, BH, string), ...].
        B is a vector of left near null-space modes used to generate
        prolongation, BH is a vector of right near null-space modes used to
        generate restriction, and string is a python command(s) that can generate
        your particular B and BH choice.  B and BH must have a row-size equal
        to the dimensionality of A.  string is only used in the automatically
        generated test script.

        Default depends on whether A is BSR:
        if A is CSR:
            B_list = [(ones((A.shape[0],1)), ones((A.shape[0],1)), 'B, BH are all ones')]
        if A is BSR:
            bsize = A.blocksize[0]
            B_list = [(ones((A.shape[0],1)), ones((A.shape[0],1)), 'B, BH are all ones'),
                      (kron(ones((A.shape[0]/bsize,1)), numpy.eye(bsize)),
                       kron(ones((A.shape[0]/bsize,1)), numpy.eye(bsize)),
                       'B = kron(ones((A.shape[0]/A.blocksize[0],1), dtype=A.dtype),
                                 eye(A.blocksize[0])); BH = B.copy()')]

    coarse_size_list : {list}
        List of various tuples containing pairs of the (max_coarse, coarse_solver)
        parameters sent to smoothed_aggregation_solver(...).

        Default: [ (300, 'pinv') ]

    Notes
    -----
    Only smoothed_aggregation_solver(...) is used.  The Ruge-Stuben solver
    framework is not used.

    60 total solvers are generated by the defaults for CSR SPD matrices.  For
    BSR SPD matrices, 120 total solvers are generated by the defaults.  A
    somewhat smaller number of total solvers is generated if the matrix is
    indefinite or nonsymmetric.  Every combination of the parameter lists is
    attempted.

    Generally, there are two types of parameter lists passed to this function.
    Type 1 includes: cycle_list, strength_list, aggregate_list, smooth_list,
                     krylov_list, Bimprove_list, max_levels_list
                     -------------------------------------------
                     Here, you pass in a list of different parameters, e.g.,
                     cycle_list=['V','W'].

    Type 2 includes: B_list, coarse_size_list, prepostsmoother_list
                     -------------------------------------------
                     This is similar to Type 1, only these represent lists of
                     pairs of parameters, e.g.,
                     coarse_size_list=[ (300, 'pinv'), (5000, 'splu')],
                     where coarse size_list is of the form
                     [ (max_coarse, coarse_solver), ...].

    For detailed info on each of these parameter lists, see above.

    Returns
    -------
    Two files are written:
    (1) fname + '.py'
        Use the function defined here to generate and run the best
        smoothed aggregation method found.  The only argument taken
        is a BSR/CSR matrix.
    (2) fname + '.txt'
        This file outputs the solver profile for each method
        tried in a sorted table listing the best solver first.
        The detailed solver descriptions then follow the table.

    See Also
    --------
    smoothed_aggregation_solver

    Examples
    --------
    >>> from pyamg import gallery
    >>> from solver_diagnostics import *
    >>> A = gallery.poisson( (50,50), format='csr')
    >>> solver_diagnostics(A, fname='isotropic_diffusion_diagnostics.txt', cycle_list=['V'])

    """

    ##
    # Preprocess A
    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        try:
            A = csr_matrix(A)
            print("Implicit conversion of A to CSR in"
                  "pyamg.smoothed_aggregation_solver")
        except:
            raise TypeError("Argument A must have type csr_matrix or "
                            "bsr_matrix, or be convertible to csr_matrix")
    #
    A = A.asfptype()
    #
    if A.shape[0] != A.shape[1]:
        raise ValueError("expected square matrix")

    print(("\nSearching for optimal smoothed aggregation method for "
           "(%d,%d) matrix" % A.shape))
    print("    ...")

    ##
    # Detect symmetry
    if symmetry is None:
        if ishermitian(A, fast_check=True):
            symmetry = "hermitian"
        else:
            symmetry = "nonsymmetric"
        ##
        print("    Detected a " + symmetry + " matrix")
    else:
        print("    User specified a " + symmetry + " matrix")

    ##
    # Detect definiteness
    if definiteness is None:
        [EVect, Lambda, H, V,
         breakdown_flag] = _approximate_eigenvalues(A, 1e-6, 40)
        if Lambda.min() < 0.0:
            definiteness = "indefinite"
            print("    Detected indefiniteness")
        else:
            definiteness = "positive"
            print("    Detected positive definiteness")
    else:
        print("    User specified definiteness as " + definiteness)

    ##
    # Default B are (1) a vector of all ones, and
    # (2) if A is BSR, the constant for each variable
    if B_list is None:
        B_list = [(
            ones((A.shape[0], 1), dtype=A.dtype),
            ones((A.shape[0], 1), dtype=A.dtype),
            "B = ones((A.shape[0],1), dtype=A.dtype); BH = B.copy()",
        )]

        if isspmatrix_bsr(A) and A.blocksize[0] > 1:
            bsize = A.blocksize[0]
            B_list.append((
                kron(ones((A.shape[0] / bsize, 1), dtype=A.dtype), eye(bsize)),
                kron(ones((A.shape[0] / bsize, 1), dtype=A.dtype), eye(bsize)),
                "B = kron(ones((A.shape[0]/A.blocksize[0],1), dtype=A.dtype), eye(A.blocksize[0])); BH = B.copy()",
            ))

    ##
    # Default is to try V- and W-cycles
    if cycle_list is None:
        cycle_list = ["V", "W"]

    ##
    # Default strength of connection values
    if strength_list is None:
        strength_list = [
            ("symmetric", {
                "theta": 0.0
            }),
            ("evolution", {
                "k": 2,
                "proj_type": "l2",
                "epsilon": 2.0
            }),
            ("evolution", {
                "k": 2,
                "proj_type": "l2",
                "epsilon": 4.0
            }),
        ]

    ##
    # Default aggregation strategies
    if aggregate_list is None:
        aggregate_list = ["standard"]

    ##
    # Default prolongation smoothers
    if smooth_list is None:
        if definiteness == "positive" and (symmetry == "hermitian"
                                           or symmetry == "symmetric"):
            smooth_list = [
                "jacobi",
                ("jacobi", {
                    "filter": True,
                    "weighting": "local"
                }),
                (
                    "energy",
                    {
                        "krylov": "cg",
                        "maxiter": 2,
                        "degree": 1,
                        "weighting": "local"
                    },
                ),
                (
                    "energy",
                    {
                        "krylov": "cg",
                        "maxiter": 3,
                        "degree": 2,
                        "weighting": "local"
                    },
                ),
                (
                    "energy",
                    {
                        "krylov": "cg",
                        "maxiter": 4,
                        "degree": 3,
                        "weighting": "local"
                    },
                ),
            ]
        elif definiteness == "indefinite" or symmetry == "nonsymmetric":
            smooth_list = [
                (
                    "energy",
                    {
                        "krylov": "gmres",
                        "maxiter": 2,
                        "degree": 1,
                        "weighting": "local",
                    },
                ),
                (
                    "energy",
                    {
                        "krylov": "gmres",
                        "maxiter": 3,
                        "degree": 2,
                        "weighting": "local",
                    },
                ),
                (
                    "energy",
                    {
                        "krylov": "gmres",
                        "maxiter": 4,
                        "degree": 3,
                        "weighting": "local",
                    },
                ),
            ]
        else:
            raise ValueError("invalid string for definiteness and/or symmetry")

    ##
    # Default pre- and postsmoothers
    if prepostsmoother_list is None:
        if symmetry == "nonsymmetric" or definiteness == "indefinite":
            prepostsmoother_list = [(
                ("gauss_seidel_nr", {
                    "sweep": "symmetric",
                    "iterations": 2
                }),
                ("gauss_seidel_nr", {
                    "sweep": "symmetric",
                    "iterations": 2
                }),
            )]
        else:
            prepostsmoother_list = [(
                ("block_gauss_seidel", {
                    "sweep": "symmetric",
                    "iterations": 1
                }),
                ("block_gauss_seidel", {
                    "sweep": "symmetric",
                    "iterations": 1
                }),
            )]

    ##
    # Default Krylov wrapper
    if krylov_list is None:
        if symmetry == "nonsymmetric" or definiteness == "indefinite":
            krylov_list = [("gmres", {"tol": 1e-8, "maxiter": 300})]
        else:
            krylov_list = [("cg", {"tol": 1e-8, "maxiter": 300})]

    ##
    # Default Bimprove
    if Bimprove_list is None:
        Bimprove_list = ["default", None]

    ##
    # Default basic solver parameters
    if max_levels_list is None:
        max_levels_list = [25]
    if coarse_size_list is None:
        coarse_size_list = [(300, "pinv")]

    ##
    # Setup for ensuing numerical tests
    # The results array will hold in each row, three values:
    # iterations, operator complexity, and work per digit of accuracy
    num_test = (len(cycle_list) * len(strength_list) * len(aggregate_list) *
                len(smooth_list) * len(krylov_list) * len(Bimprove_list) *
                len(max_levels_list) * len(B_list) * len(coarse_size_list) *
                len(prepostsmoother_list))
    results = zeros((num_test, 3))
    solver_descriptors = []
    solver_args = []

    ##
    # Zero RHS and random initial guess
    random.seed(0)
    b = zeros((A.shape[0], 1), dtype=A.dtype)
    if A.dtype == complex:
        x0 = rand(A.shape[0], 1) + 1.0j * rand(A.shape[0], 1)
    else:
        x0 = rand(A.shape[0], 1)

    ##
    # Begin loops over parameter choices
    print("    ...")
    counter = -1
    for cycle in cycle_list:
        for krylov in krylov_list:
            for max_levels in max_levels_list:
                for max_coarse, coarse_solver in coarse_size_list:
                    for presmoother, postsmoother in prepostsmoother_list:
                        for B_index in range(len(B_list)):
                            for strength in strength_list:
                                for aggregate in aggregate_list:
                                    for smooth in smooth_list:
                                        for Bimprove in Bimprove_list:

                                            counter += 1
                                            print("    Test %d out of %d" %
                                                  (counter + 1, num_test))

                                            ##
                                            # Grab B vectors
                                            B, BH, Bdescriptor = B_list[
                                                B_index]

                                            ##
                                            # Store this solver setup
                                            if "tol" in krylov[1]:
                                                tol = krylov[1]["tol"]
                                            else:
                                                tol = 1e-6
                                            if "maxiter" in krylov[1]:
                                                maxiter = krylov[1]["maxiter"]
                                            else:
                                                maxiter = 300
                                            ##
                                            descriptor = (
                                                "  Solve phase arguments:" +
                                                "\n"
                                                "    cycle = " + str(cycle) +
                                                "\n"
                                                "    krylov accel = " +
                                                str(krylov[0]) + "\n"
                                                "    tol = " + str(tol) + "\n"
                                                "    maxiter = " +
                                                str(maxiter) + "\n"
                                                "  Setup phase arguments:" +
                                                "\n"
                                                "    max_levels = " +
                                                str(max_levels) + "\n"
                                                "    max_coarse = " +
                                                str(max_coarse) + "\n"
                                                "    coarse_solver = " +
                                                str(coarse_solver) + "\n"
                                                "    presmoother = " +
                                                str(presmoother) + "\n"
                                                "    postsmoother = " +
                                                str(postsmoother) + "\n"
                                                "    " + Bdescriptor + "\n"
                                                "    strength = " +
                                                str(strength) + "\n"
                                                "    aggregate = " +
                                                str(aggregate) + "\n"
                                                "    smooth = " + str(smooth) +
                                                "\n"
                                                "    Bimprove = " +
                                                str(Bimprove))
                                            solver_descriptors.append(
                                                descriptor)
                                            solver_args.append({
                                                "cycle":
                                                cycle,
                                                "accel":
                                                str(krylov[0]),
                                                "tol":
                                                tol,
                                                "maxiter":
                                                maxiter,
                                                "max_levels":
                                                max_levels,
                                                "max_coarse":
                                                max_coarse,
                                                "coarse_solver":
                                                coarse_solver,
                                                "B_index":
                                                B_index,
                                                "presmoother":
                                                presmoother,
                                                "postsmoother":
                                                postsmoother,
                                                "strength":
                                                strength,
                                                "aggregate":
                                                aggregate,
                                                "smooth":
                                                smooth,
                                                "Bimprove":
                                                Bimprove,
                                            })

                                            ##
                                            # Construct solver
                                            try:
                                                sa = smoothed_aggregation_solver(
                                                    A,
                                                    B=B,
                                                    BH=BH,
                                                    strength=strength,
                                                    smooth=smooth,
                                                    Bimprove=Bimprove,
                                                    aggregate=aggregate,
                                                    presmoother=presmoother,
                                                    max_levels=max_levels,
                                                    postsmoother=postsmoother,
                                                    max_coarse=max_coarse,
                                                    coarse_solver=coarse_solver,
                                                )

                                                ##
                                                # Solve system
                                                residuals = []
                                                x = sa.solve(
                                                    b,
                                                    x0=x0,
                                                    accel=krylov[0],
                                                    cycle=cycle,
                                                    tol=tol,
                                                    maxiter=maxiter,
                                                    residuals=residuals,
                                                )

                                                ##
                                                # Store results: iters, operator complexity, and
                                                # work per digit-of-accuracy
                                                results[counter,
                                                        0] = len(residuals)
                                                results[
                                                    counter,
                                                    1] = sa.operator_complexity(
                                                    )
                                                resid_rate = (
                                                    residuals[-1] /
                                                    residuals[0])**(
                                                        1.0 /
                                                        (len(residuals) - 1.))
                                                results[
                                                    counter,
                                                    2] = sa.cycle_complexity(
                                                    ) / abs(log10(resid_rate))

                                            except:
                                                descriptor_indented = (
                                                    "      " +
                                                    descriptor.replace(
                                                        "\n", "\n      "))
                                                print(
                                                    "    --> Failed this test")
                                                print(
                                                    "    --> Solver descriptor is..."
                                                )
                                                print(descriptor_indented)
                                                results[counter, :] = inf
    ##
    # Sort results and solver_descriptors according to work-per-doa
    indys = argsort(results[:, 2])
    results = results[indys, :]
    solver_descriptors = list(array(solver_descriptors)[indys])
    solver_args = list(array(solver_args)[indys])

    ##
    # Create table from results and print to file
    table = [["solver #", "iters", "op complexity", "work per DOA"]]
    for i in range(results.shape[0]):
        if (results[i, :] == inf).all() == True:
            # in this case the test failed...
            table.append(["%d" % (i + 1), "err", "err", "err"])
        else:
            table.append([
                "%d" % (i + 1),
                "%d" % results[i, 0],
                "%1.1f" % results[i, 1],
                "%1.1f" % results[i, 2],
            ])
    #
    fptr = open(fname + ".txt", "w")
    fptr.write(
        "****************************************************************\n" +
        "*                Begin Solver Diagnostic Results               *\n" +
        "*                                                              *\n" +
        "*        ''solver #'' refers to below solver descriptors       *\n" +
        "*                                                              *\n" +
        "*        ''iters'' refers to iterations taken                  *\n" +
        "*                                                              *\n" +
        "*        ''op complexity'' refers to operator complexity       *\n" +
        "*                                                              *\n" +
        "*        ''work per DOA'' refers to work per digit of          *\n" +
        "*          accuracy to solve the algebraic system, i.e. it     *\n" +
        "*          measures the overall efficiency of the solver       *\n" +
        "****************************************************************\n\n")
    fptr.write(print_table(table))

    ##
    # Now print each solver descriptor to file
    fptr.write(
        "\n****************************************************************\n"
        +
        "*                 Begin Solver Descriptors                     *\n" +
        "****************************************************************\n\n")
    for i in range(len(solver_descriptors)):
        fptr.write("Solver Descriptor %d\n" % (i + 1))
        fptr.write(solver_descriptors[i])
        fptr.write(" \n \n")

    fptr.close()

    ##
    # Now write a function definition file that generates the 'best' solver
    fptr = open(fname + ".py", "w")

    # Helper function for file writing
    def to_string(a):
        if type(a) == type((1, )):
            return str(a)
        elif type(a) == type("s"):
            return "'%s'" % a
        else:
            return str(a)

    #
    fptr.write(
        "#######################################################################\n"
    )
    fptr.write(
        "# Function definition automatically generated by solver_diagnostics.py\n"
    )
    fptr.write("#\n")
    fptr.write(
        "# Use the function defined here to generate and run the best\n")
    fptr.write(
        "# smoothed aggregation method found by solver_diagnostics(...).\n")
    fptr.write("# The only argument taken is a CSR/BSR matrix.\n")
    fptr.write("#\n")
    fptr.write("# To run:  >>> # User must load/generate CSR/BSR matrix A\n")
    fptr.write("#          >>> from " + fname + " import " + fname + "\n")
    fptr.write("#          >>> " + fname + "(A)" + "\n")
    fptr.write(
        "#######################################################################\n\n"
    )
    fptr.write("from pyamg import smoothed_aggregation_solver\n")
    fptr.write("from pyamg.util.linalg import norm\n")
    fptr.write("from numpy import ones, array, arange, zeros, abs, random\n")
    fptr.write("from scipy import rand, ravel, log10, kron, eye\n")
    fptr.write("from scipy.io import loadmat\n")
    fptr.write("from scipy.sparse import isspmatrix_bsr, isspmatrix_csr\n")
    fptr.write("import pylab\n\n")
    fptr.write("def " + fname + "(A):\n")
    fptr.write("    ##\n    # Generate B\n")
    fptr.write("    " + B_list[B_index][2] + "\n\n")
    fptr.write("    ##\n    # Random initial guess, zero right-hand side\n")
    fptr.write("    random.seed(0)\n")
    fptr.write("    b = zeros((A.shape[0],1))\n")
    fptr.write("    x0 = rand(A.shape[0],1)\n\n")
    fptr.write("    ##\n    # Create solver\n")
    fptr.write(
        "    ml = smoothed_aggregation_solver(A, B=B, BH=BH,\n" +
        "        strength=%s,\n" % to_string(solver_args[0]["strength"]) +
        "        smooth=%s,\n" % to_string(solver_args[0]["smooth"]) +
        "        Bimprove=%s,\n" % to_string(solver_args[0]["Bimprove"]) +
        "        aggregate=%s,\n" % to_string(solver_args[0]["aggregate"]) +
        "        presmoother=%s,\n" %
        to_string(solver_args[0]["presmoother"]) +
        "        postsmoother=%s,\n" %
        to_string(solver_args[0]["postsmoother"]) +
        "        max_levels=%s,\n" % to_string(solver_args[0]["max_levels"]) +
        "        max_coarse=%s,\n" % to_string(solver_args[0]["max_coarse"]) +
        "        coarse_solver=%s)\n\n" %
        to_string(solver_args[0]["coarse_solver"]))
    fptr.write("    ##\n    # Solve system\n")
    fptr.write("    res = []\n")
    fptr.write(
        "    x = ml.solve(b, x0=x0, tol=%s, residuals=res, accel=%s, maxiter=%s, cycle=%s)\n"
        % (
            to_string(solver_args[0]["tol"]),
            to_string(solver_args[0]["accel"]),
            to_string(solver_args[0]["maxiter"]),
            to_string(solver_args[0]["cycle"]),
        ))
    fptr.write("    res_rate = (res[-1]/res[0])**(1.0/(len(res)-1.))\n")
    fptr.write("    normr0 = norm(ravel(b) - ravel(A*x0))\n")
    fptr.write("    print " "\n")
    fptr.write("    print ml\n")
    fptr.write("    print 'System size:                ' + str(A.shape)\n")
    fptr.write("    print 'Avg. Resid Reduction:       %1.2f'%res_rate\n")
    fptr.write("    print 'Iterations:                 %d'%len(res)\n")
    fptr.write(
        "    print 'Operator Complexity:        %1.2f'%ml.operator_complexity()\n"
    )
    fptr.write(
        "    print 'Work per DOA:               %1.2f'%(ml.cycle_complexity()/abs(log10(res_rate)))\n"
    )
    fptr.write(
        "    print 'Relative residual norm:     %1.2e'%(norm(ravel(b) - ravel(A*x))/normr0)\n\n"
    )
    fptr.write("    ##\n    # Plot residual history\n")
    fptr.write("    pylab.semilogy(array(res)/normr0)\n")
    fptr.write("    pylab.title('Residual Histories')\n")
    fptr.write("    pylab.xlabel('Iteration')\n")
    fptr.write("    pylab.ylabel('Relative Residual Norm')\n")
    fptr.write("    pylab.show()\n\n")
    # Close file pointer
    fptr.close()

    print("    ...")
    print("    --> Diagnostic Results located in " + fname + ".txt")
    print("    ...")
    print(
        "    --> See automatically generated function definition\n" +
        "        ./" + fname + ".py.\n\n" +
        "        Use the function defined here to generate and run the best\n"
        +
        "        smoothed aggregation method found.  The only argument taken\n"
        + "        is a CSR/BSR matrix.\n\n" +
        "        To run: >>> # User must load/generate CSR/BSR matrix A\n" +
        "                >>> from " + fname + " import " + fname + "\n" +
        "                >>> " + fname + "(A)")
コード例 #15
0
def solver_diagnostics(
    A,
    fname='solver_diagnostic',
    definiteness=None,
    symmetry=None,
    strength_list=None,
    aggregate_list=None,
    smooth_list=None,
    Bimprove_list=None,
    max_levels_list=None,
    cycle_list=None,
    krylov_list=None,
    prepostsmoother_list=None,
    B_list=None,
    coarse_size_list=None
    ):
    '''Try many different different parameter combinations for
    smoothed_aggregation_solver(...).  The goal is to find appropriate SA
    parameter settings for the arbitrary matrix problem A x = 0 using a
    random initial guess.

    Every combination of the input parameter lists is used to construct and
    test an SA solver.  Thus, be wary of the total number of solvers possible!
    For example for an SPD CSR matrix, the default parameter lists generate 60
    different smoothed aggregation solvers.

    Symmetry and definiteness are automatically detected, but it is safest to
    manually set these parameters through the ``definiteness' and ``symmetry'
    parameters.

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix in CSR or BSR format

    fname : {string}
        File name where the diagnostic results are dumped

        Default: solver_diagnostic.txt

    definiteness : {string}
        'positive' denotes positive definiteness
        'indefinite' denotes indefiniteness

        Default: detected with a few iterations of Arnoldi iteration

    symmetry : {string}
        'hermitian' or 'nonsymmetric', denoting the symmetry of the matrix

        Default: detected by testing if A induces an inner-product

    strength_list : {list}
        List of various parameter choices for the strength argument sent to
        smoothed_aggregation_solver(...)

        Default:  [('symmetric', {'theta' : 0.0}),
                   ('evolution', {'k':2, 'proj_type':'l2', 'epsilon':2.0}),
                   ('evolution', {'k':2, 'proj_type':'l2', 'epsilon':4.0})]

    aggregate_list : {list}
        List of various parameter choices for the aggregate argument sent to
        smoothed_aggregation_solver(...)

        Default: ['standard']

    smooth_list : {list}
        List of various parameter choices for the smooth argument sent to
        smoothed_aggregation_solver(...)

        Default depends on the symmetry and definiteness parameters:
        if definiteness == 'positive' and (symmetry=='hermitian' or symmetry=='symmetric'):
            ['jacobi', ('jacobi', {'filter' : True, 'weighting' : 'local'}),
            ('energy',{'krylov':'cg','maxiter':2, 'degree':1, 'weighting':'local'}),
            ('energy',{'krylov':'cg','maxiter':3, 'degree':2, 'weighting':'local'}),
            ('energy',{'krylov':'cg','maxiter':4, 'degree':3, 'weighting':'local'})]
        if definiteness == 'indefinite' or symmetry=='nonsymmetric':
           [('energy',{'krylov':'gmres','maxiter':2,'degree':1,'weighting':'local'}),
            ('energy',{'krylov':'gmres','maxiter':3,'degree':2,'weighting':'local'}),
            ('energy',{'krylov':'gmres','maxiter':3,'degree':3,'weighting':'local'})]

    Bimprove_list : {list}
        List of various parameter choices for the Bimprove argument sent to
        smoothed_aggregation_solver(...)

        Default: ['default', None]

    max_levels_list : {list}
        List of various parameter choices for the max_levels argument sent to
        smoothed_aggregation_solver(...)

        Default: [25]

    cycle_list : {list}
        List of various parameter choices for the cycle argument sent to
        smoothed_aggregation_solver.solve()

        Default: ['V', 'W']

    krylov_list : {list}
        List of various parameter choices for the krylov argument sent to
        smoothed_aggregation_solver.solve().  Basic form is (string, dict),
        where the string is a Krylov descriptor, e.g., 'cg' or 'gmres', and
        dict is a dictionary of parameters like tol and maxiter.  The dictionary
        dict may be empty.

        Default depends on the symmetry and definiteness parameters:
        if symmetry == 'nonsymmetric' or definiteness == 'indefinite':
            [('gmres', {'tol':1e-8, 'maxiter':300})]
        else:
            [('cg', {'tol':1e-8, 'maxiter':300})]

    prepostsmoother_list : {list}
        List of various parameter choices for the presmoother and postsmoother
        arguments sent to smoothed_aggregation_solver(...).  Basic form is
        [ (presmoother_descriptor, postsmoother_descriptor), ...].

        Default depends on the symmetry parameter:
        if symmetry == 'nonsymmetric' or definiteness == 'indefinite':
            [ (('gauss_seidel_nr', {'sweep':'symmetric', 'iterations':2}),
               ('gauss_seidel_nr', {'sweep':'symmetric', 'iterations':2})) ]
        else:
            [ (('block_gauss_seidel',{'sweep':'symmetric','iterations':1}),
               ('block_gauss_seidel',{'sweep':'symmetric','iterations':1})) ]

    B_list : {list}
        List of various B parameter choices for the B and BH arguments sent to
        smoothed_aggregation_solver(...).  Basic form is [ (B, BH, string), ...].
        B is a vector of left near null-space modes used to generate
        prolongation, BH is a vector of right near null-space modes used to
        generate restriction, and string is a python command(s) that can generate
        your particular B and BH choice.  B and BH must have a row-size equal
        to the dimensionality of A.  string is only used in the automatically
        generated test script.

        Default depends on whether A is BSR:
        if A is CSR:
            B_list = [(ones((A.shape[0],1)), ones((A.shape[0],1)), 'B, BH are all ones')]
        if A is BSR:
            bsize = A.blocksize[0]
            B_list = [(ones((A.shape[0],1)), ones((A.shape[0],1)), 'B, BH are all ones'),
                      (kron(ones((A.shape[0]/bsize,1)), numpy.eye(bsize)),
                       kron(ones((A.shape[0]/bsize,1)), numpy.eye(bsize)),
                       'B = kron(ones((A.shape[0]/A.blocksize[0],1), dtype=A.dtype),
                                 eye(A.blocksize[0])); BH = B.copy()')]

    coarse_size_list : {list}
        List of various tuples containing pairs of the (max_coarse, coarse_solver)
        parameters sent to smoothed_aggregation_solver(...).

        Default: [ (300, 'pinv') ]

    Notes
    -----
    Only smoothed_aggregation_solver(...) is used.  The Ruge-Stuben solver
    framework is not used.

    60 total solvers are generated by the defaults for CSR SPD matrices.  For
    BSR SPD matrices, 120 total solvers are generated by the defaults.  A
    somewhat smaller number of total solvers is generated if the matrix is
    indefinite or nonsymmetric.  Every combination of the parameter lists is
    attempted.

    Generally, there are two types of parameter lists passed to this function.
    Type 1 includes: cycle_list, strength_list, aggregate_list, smooth_list,
                     krylov_list, Bimprove_list, max_levels_list
                     -------------------------------------------
                     Here, you pass in a list of different parameters, e.g.,
                     cycle_list=['V','W'].

    Type 2 includes: B_list, coarse_size_list, prepostsmoother_list
                     -------------------------------------------
                     This is similar to Type 1, only these represent lists of
                     pairs of parameters, e.g.,
                     coarse_size_list=[ (300, 'pinv'), (5000, 'splu')],
                     where coarse size_list is of the form
                     [ (max_coarse, coarse_solver), ...].

    For detailed info on each of these parameter lists, see above.

    Returns
    -------
    Two files are written:
    (1) fname + '.py'
        Use the function defined here to generate and run the best
        smoothed aggregation method found.  The only argument taken
        is a BSR/CSR matrix.
    (2) fname + '.txt'
        This file outputs the solver profile for each method
        tried in a sorted table listing the best solver first.
        The detailed solver descriptions then follow the table.

    See Also
    --------
    smoothed_aggregation_solver

    Examples
    --------
    >>> from pyamg import gallery
    >>> from solver_diagnostics import *
    >>> A = gallery.poisson( (50,50), format='csr')
    >>> solver_diagnostics(A, fname='isotropic_diffusion_diagnostics.txt', cycle_list=['V'])

    '''

    ##
    # Preprocess A
    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        try:
            A = csr_matrix(A)
            print('Implicit conversion of A to CSR in'
                  'pyamg.smoothed_aggregation_solver')
        except:
            raise TypeError('Argument A must have type csr_matrix or '
                            'bsr_matrix, or be convertible to csr_matrix'
                            )
    #
    A = A.asfptype()
    #
    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    print('\nSearching for optimal smoothed aggregation method for '
          '(%d,%d) matrix' % A.shape
          )
    print('    ...')

    ##
    # Detect symmetry
    if symmetry is None:
        if ishermitian(A, fast_check=True):
            symmetry = 'hermitian'
        else:
            symmetry = 'nonsymmetric'
        ##
        print '    Detected a ' + symmetry + ' matrix'
    else:
        print '    User specified a ' + symmetry + ' matrix'

    ##
    # Detect definiteness
    if definiteness is None:
        [EVect, Lambda, H, V, breakdown_flag] = \
            _approximate_eigenvalues(A, 1e-6, 40)
        if Lambda.min() < 0.0:
            definiteness = 'indefinite'
            print '    Detected indefiniteness'
        else:
            definiteness = 'positive'
            print '    Detected positive definiteness'
    else:
        print '    User specified definiteness as ' + definiteness

    ##
    # Default B are (1) a vector of all ones, and
    # (2) if A is BSR, the constant for each variable
    if B_list is None:
        B_list = [(ones((A.shape[0], 1), dtype=A.dtype),
                   ones((A.shape[0], 1), dtype=A.dtype),
                   'B = ones((A.shape[0],1), dtype=A.dtype); BH = B.copy()')]

        if isspmatrix_bsr(A) and A.blocksize[0] > 1:
            bsize = A.blocksize[0]
            B_list.append(
                (kron(ones((A.shape[0]/bsize, 1), dtype=A.dtype), eye(bsize)),
                 kron(ones((A.shape[0]/bsize, 1), dtype=A.dtype), eye(bsize)),
                 'B = kron(ones((A.shape[0]/A.blocksize[0],1), dtype=A.dtype), eye(A.blocksize[0])); BH = B.copy()'
                 )
                )

    ##
    # Default is to try V- and W-cycles
    if cycle_list is None:
        cycle_list = ['V', 'W']

    ##
    # Default strength of connection values
    if strength_list is None:
        strength_list = [('symmetric', {'theta': 0.0}),
                         ('evolution', {'k': 2,
                                        'proj_type': 'l2',
                                        'epsilon': 2.0
                                        }),
                         ('evolution', {'k': 2,
                                        'proj_type': 'l2',
                                        'epsilon': 4.0
                                        })]

    ##
    # Default aggregation strategies
    if aggregate_list is None:
        aggregate_list = ['standard']

    ##
    # Default prolongation smoothers
    if smooth_list is None:
        if definiteness == 'positive' \
            and (symmetry == 'hermitian' or symmetry == 'symmetric'):
            smooth_list = [
                'jacobi',
                ('jacobi', {'filter': True, 'weighting': 'local'}),
                ('energy', {'krylov': 'cg',
                            'maxiter': 2,
                            'degree': 1,
                            'weighting': 'local'
                            }),
                ('energy', {'krylov': 'cg',
                            'maxiter': 3,
                            'degree': 2,
                            'weighting': 'local'
                            }),
                ('energy', {'krylov': 'cg',
                            'maxiter': 4,
                            'degree': 3,
                            'weighting': 'local'
                            })
                ]
        elif definiteness == 'indefinite' or symmetry=='nonsymmetric':
            smooth_list =[('energy',{'krylov':'gmres','maxiter':2,'degree':1,'weighting':'local'}),
                          ('energy',{'krylov':'gmres','maxiter':3,'degree':2,'weighting':'local'}),
                          ('energy',{'krylov':'gmres','maxiter':4,'degree':3,'weighting':'local'})]
        else:
            raise ValueError('invalid string for definiteness and/or symmetry')

    ##
    # Default pre- and postsmoothers
    if prepostsmoother_list is None:
        if symmetry == 'nonsymmetric' or definiteness == 'indefinite':
            prepostsmoother_list = [ (('gauss_seidel_nr', {'sweep':'symmetric', 'iterations':2}),
                                      ('gauss_seidel_nr', {'sweep':'symmetric', 'iterations':2})) ]
        else:
            prepostsmoother_list= [ (('block_gauss_seidel',{'sweep':'symmetric','iterations':1}),
                                     ('block_gauss_seidel',{'sweep':'symmetric','iterations':1})) ]

    ##
    # Default Krylov wrapper
    if krylov_list is None:
        if symmetry == 'nonsymmetric' or definiteness == 'indefinite':
            krylov_list = [('gmres', {'tol':1e-8, 'maxiter':300})]
        else:
            krylov_list = [('cg', {'tol':1e-8, 'maxiter':300})]

    ##
    # Default Bimprove
    if Bimprove_list is None:
        Bimprove_list = ['default', None]

    ##
    # Default basic solver parameters
    if max_levels_list is None:
        max_levels_list = [25]
    if coarse_size_list is None:
        coarse_size_list = [(300, 'pinv')]

    ##
    # Setup for ensuing numerical tests
    # The results array will hold in each row, three values:
    # iterations, operator complexity, and work per digit of accuracy
    num_test = len(cycle_list)*len(strength_list)*len(aggregate_list)*len(smooth_list)* \
               len(krylov_list)*len(Bimprove_list)*len(max_levels_list)*len(B_list)* \
               len(coarse_size_list)*len(prepostsmoother_list)
    results = zeros( (num_test,3) )
    solver_descriptors = []
    solver_args = []

    ##
    # Zero RHS and random initial guess
    random.seed(0)
    b = zeros((A.shape[0], 1), dtype=A.dtype)
    if A.dtype == complex:
        x0 = rand(A.shape[0], 1) + 1.0j*rand(A.shape[0], 1)
    else:
        x0 = rand(A.shape[0], 1)

    ##
    # Begin loops over parameter choices
    print '    ...'
    counter = -1
    for cycle in cycle_list:
        for krylov in krylov_list:
            for max_levels in max_levels_list:
                for max_coarse,coarse_solver in coarse_size_list:
                    for presmoother,postsmoother in prepostsmoother_list:
                        for B_index in range(len(B_list)):
                            for strength in strength_list:
                                for aggregate in aggregate_list:
                                    for smooth in smooth_list:
                                        for Bimprove in Bimprove_list:

                                            counter += 1
                                            print '    Test %d out of %d'%(counter+1,num_test)

                                            ##
                                            # Grab B vectors
                                            B,BH,Bdescriptor = B_list[B_index]

                                            ##
                                            # Store this solver setup
                                            if krylov[1].has_key('tol'):
                                                tol = krylov[1]['tol']
                                            else:
                                                tol = 1e-6
                                            if krylov[1].has_key('maxiter'):
                                                maxiter = krylov[1]['maxiter']
                                            else:
                                                maxiter = 300
                                            ##
                                            descriptor = '  Solve phase arguments:' + '\n' \
                                                '    cycle = ' + str(cycle) + '\n' \
                                                '    krylov accel = ' + str(krylov[0]) + '\n' \
                                                '    tol = ' + str(tol) + '\n' \
                                                '    maxiter = ' + str(maxiter)+'\n'\
                                                '  Setup phase arguments:' + '\n' \
                                                '    max_levels = ' + str(max_levels) + '\n' \
                                                '    max_coarse = ' + str(max_coarse) + '\n' \
                                                '    coarse_solver = ' + str(coarse_solver)+'\n'\
                                                '    presmoother = ' + str(presmoother) + '\n' \
                                                '    postsmoother = ' + str(postsmoother) + '\n'\
                                                '    ' + Bdescriptor + '\n' \
                                                '    strength = ' + str(strength) + '\n' \
                                                '    aggregate = ' + str(aggregate) + '\n' \
                                                '    smooth = ' + str(smooth) + '\n' \
                                                '    Bimprove = ' + str(Bimprove)
                                            solver_descriptors.append(descriptor)
                                            solver_args.append( {'cycle' : cycle,
                                                'accel' : str(krylov[0]),
                                                'tol' : tol, 'maxiter' : maxiter,
                                                'max_levels' : max_levels, 'max_coarse' : max_coarse,
                                                'coarse_solver' : coarse_solver, 'B_index' : B_index,
                                                'presmoother' : presmoother,
                                                'postsmoother' : postsmoother,
                                                'strength' : strength, 'aggregate' : aggregate,
                                                'smooth' : smooth, 'Bimprove' : Bimprove} )

                                            ##
                                            # Construct solver
                                            try:
                                                sa = smoothed_aggregation_solver(A, B=B, BH=BH,
                                                  strength=strength, smooth=smooth,
                                                  Bimprove=Bimprove, aggregate=aggregate,
                                                  presmoother=presmoother, max_levels=max_levels,
                                                  postsmoother=postsmoother, max_coarse=max_coarse,
                                                  coarse_solver=coarse_solver)

                                                ##
                                                # Solve system
                                                residuals = []
                                                x = sa.solve(b, x0=x0, accel=krylov[0],
                                                  cycle=cycle, tol=tol, maxiter=maxiter,
                                                  residuals=residuals)

                                                ##
                                                # Store results: iters, operator complexity, and
                                                # work per digit-of-accuracy
                                                results[counter,0] = len(residuals)
                                                results[counter,1] = sa.operator_complexity()
                                                resid_rate = (residuals[-1]/residuals[0])**\
                                                             (1.0/(len(residuals)-1.))
                                                results[counter,2] = sa.cycle_complexity()/ \
                                                                     abs(log10(resid_rate))

                                            except:
                                                descriptor_indented = '      ' + \
                                                  descriptor.replace('\n', '\n      ')
                                                print'    --> Failed this test'
                                                print'    --> Solver descriptor is...'
                                                print descriptor_indented
                                                results[counter,:] = inf
    ##
    # Sort results and solver_descriptors according to work-per-doa
    indys = argsort(results[:,2])
    results = results[indys,:]
    solver_descriptors = list(array(solver_descriptors)[indys])
    solver_args = list(array(solver_args)[indys])

    ##
    # Create table from results and print to file
    table = [ ['solver #', 'iters', 'op complexity', 'work per DOA'] ]
    for i in range(results.shape[0]):
        if (results[i,:] == inf).all() == True:
            # in this case the test failed...
            table.append(['%d'%(i+1), 'err', 'err', 'err'])
        else:
            table.append(['%d'%(i+1),'%d'%results[i,0],'%1.1f'%results[i,1],'%1.1f'%results[i,2]])
    #
    fptr = open(fname+'.txt', 'w')
    fptr.write('****************************************************************\n' + \
               '*                Begin Solver Diagnostic Results               *\n' + \
               '*                                                              *\n' + \
               '*        \'\'solver #\'\' refers to below solver descriptors       *\n' + \
               '*                                                              *\n' + \
               '*        \'\'iters\'\' refers to iterations taken                  *\n' + \
               '*                                                              *\n' + \
               '*        \'\'op complexity\'\' refers to operator complexity       *\n' + \
               '*                                                              *\n' + \
               '*        \'\'work per DOA\'\' refers to work per digit of          *\n' + \
               '*          accuracy to solve the algebraic system, i.e. it     *\n' + \
               '*          measures the overall efficiency of the solver       *\n' + \
               '****************************************************************\n\n')
    fptr.write(print_table(table))

    ##
    # Now print each solver descriptor to file
    fptr.write('\n****************************************************************\n' + \
                 '*                 Begin Solver Descriptors                     *\n' + \
                 '****************************************************************\n\n')
    for i in range(len(solver_descriptors)):
        fptr.write('Solver Descriptor %d\n'%(i+1))
        fptr.write(solver_descriptors[i])
        fptr.write(' \n \n')

    fptr.close()

    ##
    # Now write a function definition file that generates the 'best' solver
    fptr = open(fname + '.py', 'w')
    # Helper function for file writing
    def to_string(a):
        if type(a) == type((1,)):   return(str(a))
        elif type(a) == type('s'):  return('\'%s\''%a)
        else: return str(a)
    #
    fptr.write('#######################################################################\n')
    fptr.write('# Function definition automatically generated by solver_diagnostics.py\n')
    fptr.write('#\n')
    fptr.write('# Use the function defined here to generate and run the best\n')
    fptr.write('# smoothed aggregation method found by solver_diagnostics(...).\n')
    fptr.write('# The only argument taken is a CSR/BSR matrix.\n')
    fptr.write('#\n')
    fptr.write('# To run:  >>> # User must load/generate CSR/BSR matrix A\n')
    fptr.write('#          >>> from ' + fname + ' import ' + fname + '\n' )
    fptr.write('#          >>> ' + fname + '(A)' + '\n')
    fptr.write('#######################################################################\n\n')
    fptr.write('from pyamg import smoothed_aggregation_solver\n')
    fptr.write('from pyamg.util.linalg import norm\n')
    fptr.write('from numpy import ones, array, arange, zeros, abs, random\n')
    fptr.write('from scipy import rand, ravel, log10, kron, eye\n')
    fptr.write('from scipy.io import loadmat\n')
    fptr.write('from scipy.sparse import isspmatrix_bsr, isspmatrix_csr\n')
    fptr.write('import pylab\n\n')
    fptr.write('def ' + fname + '(A):\n')
    fptr.write('    ##\n    # Generate B\n')
    fptr.write('    ' + B_list[B_index][2] + '\n\n')
    fptr.write('    ##\n    # Random initial guess, zero right-hand side\n')
    fptr.write('    random.seed(0)\n')
    fptr.write('    b = zeros((A.shape[0],1))\n')
    fptr.write('    x0 = rand(A.shape[0],1)\n\n')
    fptr.write('    ##\n    # Create solver\n')
    fptr.write('    ml = smoothed_aggregation_solver(A, B=B, BH=BH,\n' + \
               '        strength=%s,\n'%to_string(solver_args[0]['strength']) + \
               '        smooth=%s,\n'%to_string(solver_args[0]['smooth']) + \
               '        Bimprove=%s,\n'%to_string(solver_args[0]['Bimprove']) + \
               '        aggregate=%s,\n'%to_string(solver_args[0]['aggregate']) + \
               '        presmoother=%s,\n'%to_string(solver_args[0]['presmoother']) + \
               '        postsmoother=%s,\n'%to_string(solver_args[0]['postsmoother']) + \
               '        max_levels=%s,\n'%to_string(solver_args[0]['max_levels']) + \
               '        max_coarse=%s,\n'%to_string(solver_args[0]['max_coarse']) + \
               '        coarse_solver=%s)\n\n'%to_string(solver_args[0]['coarse_solver']) )
    fptr.write('    ##\n    # Solve system\n')
    fptr.write('    res = []\n')
    fptr.write('    x = ml.solve(b, x0=x0, tol=%s, residuals=res, accel=%s, maxiter=%s, cycle=%s)\n'%\
              (to_string(solver_args[0]['tol']),
               to_string(solver_args[0]['accel']),
               to_string(solver_args[0]['maxiter']),
               to_string(solver_args[0]['cycle'])) )
    fptr.write('    res_rate = (res[-1]/res[0])**(1.0/(len(res)-1.))\n')
    fptr.write('    normr0 = norm(ravel(b) - ravel(A*x0))\n')
    fptr.write('    print ' '\n')
    fptr.write('    print ml\n')
    fptr.write('    print \'System size:                \' + str(A.shape)\n')
    fptr.write('    print \'Avg. Resid Reduction:       %1.2f\'%res_rate\n')
    fptr.write('    print \'Iterations:                 %d\'%len(res)\n')
    fptr.write('    print \'Operator Complexity:        %1.2f\'%ml.operator_complexity()\n')
    fptr.write('    print \'Work per DOA:               %1.2f\'%(ml.cycle_complexity()/abs(log10(res_rate)))\n')
    fptr.write('    print \'Relative residual norm:     %1.2e\'%(norm(ravel(b) - ravel(A*x))/normr0)\n\n')
    fptr.write('    ##\n    # Plot residual history\n')
    fptr.write('    pylab.semilogy(array(res)/normr0)\n')
    fptr.write('    pylab.title(\'Residual Histories\')\n')
    fptr.write('    pylab.xlabel(\'Iteration\')\n')
    fptr.write('    pylab.ylabel(\'Relative Residual Norm\')\n')
    fptr.write('    pylab.show()\n\n')
    # Close file pointer
    fptr.close()

    print '    ...'
    print '    --> Diagnostic Results located in ' + fname + '.txt'
    print '    ...'
    print '    --> See automatically generated function definition\n' + \
          '        ./' + fname + '.py.\n\n' + \
          '        Use the function defined here to generate and run the best\n' + \
          '        smoothed aggregation method found.  The only argument taken\n' + \
          '        is a CSR/BSR matrix.\n\n' + \
          '        To run: >>> # User must load/generate CSR/BSR matrix A\n' + \
          '                >>> from ' + fname + ' import ' + fname + '\n' + \
          '                >>> ' + fname + '(A)'
コード例 #16
0
ファイル: test_smooth.py プロジェクト: Alexey-Voronin/pyamg-1
    def test_range(self):
        """Check that P*R=B."""
        warnings.filterwarnings('ignore', category=UserWarning,
                                message='Having less target vectors')
        np.random.seed(18410243)  # make tests repeatable

        cases = []

        # Simple, real-valued diffusion problems
        name = 'airfoil'
        X = load_example('airfoil')
        A = X['A'].tocsr()
        B = X['B']
        cases.append((A, B, ('jacobi',
                             {'filter_entries': True, 'weighting': 'local'}), name))
        cases.append((A, B, ('jacobi',
                             {'filter_entries': True, 'weighting': 'block'}), name))

        cases.append((A, B, ('energy', {'maxiter': 3}), name))
        cases.append((A, B, ('energy', {'krylov': 'cgnr', 'weighting': 'diagonal'}), name))
        cases.append((A, B, ('energy', {'krylov': 'gmres', 'degree': 2}), name))

        name = 'poisson'
        A = poisson((10, 10), format='csr')
        B = np.ones((A.shape[0], 1))
        cases.append((A, B, ('jacobi',
                             {'filter_entries': True, 'weighting': 'diagonal'}), name))
        cases.append((A, B, ('jacobi',
                             {'filter_entries': True, 'weighting': 'local'}), name))

        cases.append((A, B, 'energy', name))
        cases.append((A, B, ('energy', {'degree': 2}), name))
        cases.append((A, B, ('energy', {'krylov': 'cgnr', 'degree': 2,
                                        'weighting': 'diagonal'}), name))
        cases.append((A, B, ('energy', {'krylov': 'gmres'}), name))

        # Simple, imaginary-valued problems
        name = 'random imaginary'
        iA = 1.0j * A
        iB = 1.0 + np.random.rand(iA.shape[0], 2)\
                 + 1.0j * (1.0 + np.random.rand(iA.shape[0], 2))

        cases.append((iA, B, ('jacobi',
                              {'filter_entries': True, 'weighting': 'diagonal'}), name))
        cases.append((iA, B, ('jacobi',
                              {'filter_entries': True, 'weighting': 'block'}), name))
        cases.append((iA, iB, ('jacobi',
                               {'filter_entries': True, 'weighting': 'local'}), name))
        cases.append((iA, iB, ('jacobi',
                               {'filter_entries': True, 'weighting': 'block'}), name))

        cases.append((iA.tobsr(blocksize=(5, 5)), B,
                      ('jacobi', {'filter_entries': True, 'weighting': 'block'}), name))
        cases.append((iA.tobsr(blocksize=(5, 5)), iB,
                      ('jacobi', {'filter_entries': True, 'weighting': 'block'}), name))

        cases.append((iA, B, ('energy', {'krylov': 'cgnr', 'degree': 2,
                                         'weighting': 'diagonal'}), name))
        cases.append((iA, iB, ('energy', {'krylov': 'cgnr',
                                          'weighting': 'diagonal'}), name))
        cases.append((iA.tobsr(blocksize=(5, 5)), B,
                      ('energy',
                       {'krylov': 'cgnr', 'degree': 2, 'maxiter': 3,
                        'weighting': 'diagonal', 'postfilter': {'theta': 0.05}}), name))
        cases.append((iA.tobsr(blocksize=(5, 5)), B,
                      ('energy',
                       {'krylov': 'cgnr', 'degree': 2, 'maxiter': 3,
                        'weighting': 'diagonal',
                        'prefilter': {'theta': 0.05}}), name))
        cases.append((iA.tobsr(blocksize=(5, 5)), B,
                      ('energy',
                       {'krylov': 'cgnr', 'degree': 2,
                        'weighting': 'diagonal', 'maxiter': 3}), name))
        cases.append((iA.tobsr(blocksize=(5, 5)), iB,
                      ('energy', {'krylov': 'cgnr', 'weighting': 'diagonal'}), name))

        cases.append((iA, B, ('energy', {'krylov': 'gmres'}), name))
        cases.append((iA, iB, ('energy', {'krylov': 'gmres', 'degree': 2}), name))
        cases.append((iA.tobsr(blocksize=(5, 5)), B,
                      ('energy',
                       {'krylov': 'gmres', 'degree': 2, 'maxiter': 3}), name))
        cases.append((iA.tobsr(blocksize=(5, 5)), iB,
                      ('energy', {'krylov': 'gmres'}), name))

        # Simple, imaginary-valued problems
        name = 'random imaginary + I'
        iA = A + 1.0j * sparse.eye(A.shape[0], A.shape[1])

        cases.append((iA, B, ('jacobi',
                              {'filter_entries': True, 'weighting': 'local'}), name))
        cases.append((iA, B, ('jacobi',
                              {'filter_entries': True, 'weighting': 'block'}), name))
        cases.append((iA, iB, ('jacobi',
                               {'filter_entries': True, 'weighting': 'diagonal'}), name))
        cases.append((iA, iB, ('jacobi',
                               {'filter_entries': True, 'weighting': 'block'}), name))
        cases.append((iA.tobsr(blocksize=(4, 4)), iB,
                      ('jacobi', {'filter_entries': True, 'weighting': 'block'}), name))

        cases.append((iA, B, ('energy', {'krylov': 'cgnr', 'weighting': 'diagonal'}), name))
        cases.append((iA.tobsr(blocksize=(4, 4)), iB,
                      ('energy', {'krylov': 'cgnr', 'weighting': 'diagonal'}), name))

        cases.append((iA, B, ('energy', {'krylov': 'gmres'}), name))
        cases.append((iA.tobsr(blocksize=(4, 4)), iB,
                      ('energy',
                       {'krylov': 'gmres', 'degree': 2, 'maxiter': 3}), name))

        cases.append((iA.tobsr(blocksize=(4, 4)), iB,
                      ('energy',
                       {'krylov': 'gmres', 'degree': 2, 'maxiter': 3,
                        'postfilter': {'theta': 0.05}}), name))
        cases.append((iA.tobsr(blocksize=(4, 4)), iB,
                      ('energy',
                       {'krylov': 'gmres', 'degree': 2, 'maxiter': 3,
                        'prefilter': {'theta': 0.05}}), name))

        name = 'gauge laplacian'
        A = gauge_laplacian(10, spacing=1.0, beta=0.21)
        B = np.ones((A.shape[0], 1))
        cases.append((A, iB, ('jacobi',
                              {'filter_entries': True, 'weighting': 'diagonal'}), name))
        cases.append((A, iB, ('jacobi',
                              {'filter_entries': True, 'weighting': 'local'}), name))

        cases.append((A, B, ('energy', {'krylov': 'cg'}), name))
        cases.append((A, iB, ('energy', {'krylov': 'cgnr', 'weighting': 'diagonal'}), name))
        cases.append((A, iB, ('energy', {'krylov': 'gmres'}), name))

        name = 'gauge laplacian bsr'
        cases.append((A.tobsr(blocksize=(2, 2)), B,
                      ('energy', {'krylov': 'cgnr', 'degree': 2, 'weighting': 'diagonal',
                                  'maxiter': 3, 'postfilter': {'theta': 0.05}}),
                     name))
        cases.append((A.tobsr(blocksize=(2, 2)), B,
                      ('energy', {'krylov': 'cgnr', 'degree': 2, 'weighting': 'diagonal',
                       'maxiter': 3, 'prefilter': {'theta': 0.05}}),
                     name))
        cases.append((A.tobsr(blocksize=(2, 2)), B,
                      ('energy', {'krylov': 'cgnr', 'degree': 2, 'maxiter': 3,
                                  'weighting': 'diagonal'}),
                      name))
        cases.append((A.tobsr(blocksize=(2, 2)), iB,
                     ('energy', {'krylov': 'cg'}), name))
        cases.append((A.tobsr(blocksize=(2, 2)), B,
                      ('energy', {'krylov': 'gmres', 'degree': 2, 'maxiter': 3}),
                      name))
        cases.append((A.tobsr(blocksize=(2, 2)), B,
                     ('energy', {'krylov': 'gmres', 'degree': 2,
                      'maxiter': 3, 'postfilter': {'theta': 0.05}}),
                     name))
        cases.append((A.tobsr(blocksize=(2, 2)), B,
                     ('energy', {'krylov': 'gmres', 'degree': 2,
                      'maxiter': 3, 'prefilter': {'theta': 0.05}}),
                     name))

        #
        name = 'linear elasticity'
        A, B = linear_elasticity((10, 10))
        cases.append((A, B, ('jacobi',
                             {'filter_entries': True, 'weighting': 'diagonal'}), name))
        cases.append((A, B, ('jacobi',
                             {'filter_entries': True, 'weighting': 'local'}), name))
        cases.append((A, B, ('jacobi',
                             {'filter_entries': True, 'weighting': 'block'}), name))
        cases.append((A, B, ('energy', {'degree': 2}), name))
        cases.append((A, B, ('energy', {'degree': 3, 'postfilter': {'theta': 0.05}}), name))
        cases.append((A, B, ('energy', {'degree': 3, 'prefilter': {'theta': 0.05}}), name))
        cases.append((A, B, ('energy', {'krylov': 'cgnr', 'weighting': 'diagonal'}), name))
        cases.append((A, B, ('energy', {'krylov': 'gmres', 'degree': 2}), name))

        # Classic SA cases
        for A, B, smooth, _name in cases:
            ml = smoothed_aggregation_solver(A, B=B, max_coarse=1,
                                             max_levels=2, smooth=smooth)
            P = ml.levels[0].P
            B = ml.levels[0].B
            R = ml.levels[1].B
            assert_almost_equal(P * R, B)

        def _get_blocksize(A):
            # Helper Function: return the blocksize of a matrix
            if sparse.isspmatrix_bsr(A):
                return A.blocksize[0]

            return 1

        # Root-node cases
        counter = 0
        for A, B, smooth, _name in cases:
            counter += 1

            if isinstance(smooth, tuple):
                smoother = smooth[0]
            else:
                smoother = smooth

            if smoother == 'energy' and (B.shape[1] >= _get_blocksize(A)):
                ic = [('gauss_seidel_nr',
                       {'sweep': 'symmetric', 'iterations': 4}), None]
                ml = rootnode_solver(A, B=B, max_coarse=1, max_levels=2,
                                     smooth=smooth,
                                     improve_candidates=ic,
                                     keep=True, symmetry='nonsymmetric')
                T = ml.levels[0].T.tocsr()
                Cpts = ml.levels[0].Cpts
                Bf = ml.levels[0].B
                Bf_H = ml.levels[0].BH
                Bc = ml.levels[1].B
                P = ml.levels[0].P.tocsr()

                T.eliminate_zeros()
                P.eliminate_zeros()

                # P should preserve B in its range, wherever P
                # has enough nonzeros
                mask = ((P.indptr[1:] - P.indptr[:-1]) >= B.shape[1])
                assert_almost_equal((P*Bc)[mask, :], Bf[mask, :])
                assert_almost_equal((P*Bc)[mask, :], Bf_H[mask, :])

                # P should be the identity at Cpts
                I1 = sparse.eye(T.shape[1], T.shape[1], format='csr', dtype=T.dtype)
                I2 = P[Cpts, :]
                assert_almost_equal(I1.data, I2.data)
                assert_equal(I1.indptr, I2.indptr)
                assert_equal(I1.indices, I2.indices)

                # T should be the identity at Cpts
                I2 = T[Cpts, :]
                assert_almost_equal(I1.data, I2.data)
                assert_equal(I1.indptr, I2.indptr)
                assert_equal(I1.indices, I2.indices)
コード例 #17
0
ファイル: edgeAMG.py プロジェクト: VfifthV/pyamg-examples
    return PEdge

    
    
if __name__ == '__main__':
    
    Acurl = csr_matrix(mmread("HCurlStiffness.dat"))
    Anode = csr_matrix(mmread("H1Stiffness.dat"))
    D = csr_matrix(mmread("D.dat"))
    
    
    ml = edgeAMG(Anode,Acurl,D)
    MLOp = ml.aspreconditioner()
    x = numpy.random.rand(Acurl.shape[1],1)
    b = Acurl*x
    x0 = numpy.ones((Acurl.shape[1],1))
    
    r_edgeAMG = []
    r_None = []
    r_SA = []
    
    ml_SA = smoothed_aggregation_solver(Acurl)
    ML_SAOP = ml_SA.aspreconditioner()
    x_prec,info = cg(Acurl,b,x0,M=MLOp,tol=1e-8,residuals=r_edgeAMG)
    x_prec,info = cg(Acurl,b,x0,M=None,tol=1e-8,residuals=r_None)
    x_prec,info = cg(Acurl,b,x0,M=ML_SAOP,tol=1e-8,residuals=r_SA)
    
    import pylab
    pylab.semilogy(range(0,len(r_edgeAMG)), r_edgeAMG, range(0,len(r_None)), r_None, range(0,len(r_SA)), r_SA)
    pylab.show()
コード例 #18
0
ファイル: test_solver_pyamg.py プロジェクト: ratnania/pigasus
PDE.assembly()
PDE.solve()

# getting scipy matrix
A_scipy = PDE.system.get()

b = np.ones(PDE.size)

# ----------------------------------------------
import scipy
from pyamg.aggregation import smoothed_aggregation_solver

B = None                                # no near-null spaces guesses for SA

# Construct solver using AMG based on Smoothed Aggregation (SA) and display info
mls = smoothed_aggregation_solver(A_scipy, B=B)


# Solve Ax=b with no acceleration ('standalone' solver)
print "Using pyamg-standalone"
standalone_residuals = []
t_start = time.time()
x = mls.solve(b, tol=tol_pyamg, accel=None, maxiter=maxiter_pyamg, residuals=standalone_residuals)
t_end = time.time()
mls_elapsed = t_end - t_start
mls_err = standalone_residuals[-1]
mls_niter = len(standalone_residuals)
print "done."
standalone_residuals  = np.array(standalone_residuals)/standalone_residuals[0]
factor1 = standalone_residuals[-1]**(1.0/len(standalone_residuals))
standalone_final_err = np.linalg.norm(b-A_scipy.dot(x))