Ejemplo n.º 1
0
def setup_block_jacobi(lvl, iterations=1, omega=1.0, Dinv=None, blocksize=None,
                       withrho=True):
    # Determine Blocksize
    if blocksize is None and Dinv is None:
        if sp.sparse.isspmatrix_csr(lvl.A):
            blocksize = 1
        elif sp.sparse.isspmatrix_bsr(lvl.A):
            blocksize = lvl.A.blocksize[0]
    elif blocksize is None:
        blocksize = Dinv.shape[1]

    if blocksize == 1:
        # Block Jacobi is equivalent to normal Jacobi
        return setup_jacobi(lvl, iterations=iterations, omega=omega,
                            withrho=withrho)
    else:
        # Use Block Jacobi
        if Dinv is None:
            Dinv = get_block_diag(lvl.A, blocksize=blocksize, inv_flag=True)
        if withrho:
            omega = omega/rho_block_D_inv_A(lvl.A, Dinv)

        def smoother(A, x, b):
            relaxation.block_jacobi(A, x, b, iterations=iterations,
                                    omega=omega, Dinv=Dinv,
                                    blocksize=blocksize)
        return smoother
Ejemplo n.º 2
0
def setup_block_jacobi(lvl, iterations=DEFAULT_NITER, omega=1.0, Dinv=None,
                       blocksize=None, withrho=True):
    # Determine Blocksize
    if blocksize is None and Dinv is None:
        if sp.sparse.isspmatrix_csr(lvl.A):
            blocksize = 1
        elif sp.sparse.isspmatrix_bsr(lvl.A):
            blocksize = lvl.A.blocksize[0]
    elif blocksize is None:
        blocksize = Dinv.shape[1]

    if blocksize == 1:
        # Block Jacobi is equivalent to normal Jacobi
        return setup_jacobi(lvl, iterations=iterations, omega=omega,
                            withrho=withrho)
    else:
        # Use Block Jacobi
        if Dinv is None:
            Dinv = get_block_diag(lvl.A, blocksize=blocksize, inv_flag=True)
        if withrho:
            omega = omega/rho_block_D_inv_A(lvl.A, Dinv)

        def smoother(A, x, b):
            relaxation.block_jacobi(A, x, b, iterations=iterations,
                                    omega=omega, Dinv=Dinv,
                                    blocksize=blocksize)
        return smoother
Ejemplo n.º 3
0
def setup_block_gauss_seidel(lvl, iterations=DEFAULT_NITER,
                             sweep=DEFAULT_SWEEP,
                             Dinv=None, blocksize=None):
    # Determine Blocksize
    if blocksize is None and Dinv is None:
        if sp.sparse.isspmatrix_csr(lvl.A):
            blocksize = 1
        elif sp.sparse.isspmatrix_bsr(lvl.A):
            blocksize = lvl.A.blocksize[0]
    elif blocksize is None:
        blocksize = Dinv.shape[1]

    if blocksize == 1:
        # Block GS is equivalent to normal GS
        return setup_gauss_seidel(lvl, iterations=iterations, sweep=sweep)
    else:
        # Use Block GS
        if Dinv is None:
            Dinv = get_block_diag(lvl.A, blocksize=blocksize, inv_flag=True)

        def smoother(A, x, b):
            relaxation.block_gauss_seidel(A, x, b, iterations=iterations,
                                          Dinv=Dinv, blocksize=blocksize,
                                          sweep=sweep)

        return smoother
Ejemplo n.º 4
0
def setup_block_gauss_seidel(lvl,
                             iterations=DEFAULT_NITER,
                             sweep=DEFAULT_SWEEP,
                             Dinv=None,
                             blocksize=None):
    # Determine Blocksize
    if blocksize is None and Dinv is None:
        if sp.sparse.isspmatrix_csr(lvl.A):
            blocksize = 1
        elif sp.sparse.isspmatrix_bsr(lvl.A):
            blocksize = lvl.A.blocksize[0]
    elif blocksize is None:
        blocksize = Dinv.shape[1]

    if blocksize == 1:
        # Block GS is equivalent to normal GS
        return setup_gauss_seidel(lvl, iterations=iterations, sweep=sweep)
    else:
        # Use Block GS
        if Dinv is None:
            Dinv = get_block_diag(lvl.A, blocksize=blocksize, inv_flag=True)

        def smoother(A, x, b):
            relaxation.block_gauss_seidel(A,
                                          x,
                                          b,
                                          iterations=iterations,
                                          Dinv=Dinv,
                                          blocksize=blocksize,
                                          sweep=sweep)

        return smoother
Ejemplo n.º 5
0
def setup_FC_block_jacobi(lvl, F_iterations=DEFAULT_NITER, C_iterations=DEFAULT_NITER,
                          iterations=DEFAULT_NITER, omega=1.0, Dinv=None, blocksize=None,
                          withrho=False):
    # Determine Blocksize
    if blocksize is None and Dinv is None:
        if sp.sparse.isspmatrix_csr(lvl.A):
            blocksize = 1
        elif sp.sparse.isspmatrix_bsr(lvl.A):
            blocksize = lvl.A.blocksize[0]
    elif blocksize is None:
        if sp.sparse.isspmatrix_bsr(Dinv):
            blocksize = Dinv.blocksize[1]
        else:
            blocksize = 1

    # Check for compatible dimensions
    if (lvl.A.shape[0] % blocksize) != 0:
        raise ValueError("Blocksize does not divide size of matrix.")
    elif (len(lvl.splitting)*blocksize != lvl.A.shape[0]):
        raise ValueError("Blocksize not compatible with CF-splitting and matrix size.")

    if blocksize == 1:
        # Block Jacobi is equivalent to normal Jacobi
        return setup_FC_jacobi(lvl, iterations=iterations, omega=omega,
                            withrho=withrho)
    else:
        # Get C-points and F-points from splitting
        try:
            Fpts = np.array(np.where(lvl.splitting == 0)[0], dtype='int32')
            Cpts = np.array(np.where(lvl.splitting == 1)[0], dtype='int32') 
            lvl.nf = len(Fpts)
            lvl.nc = len(Cpts)
        except:
            raise ValueError("CF-splitting array needs to be stored in hierarchy.")

        # Use Block Jacobi
        if Dinv is None:
            Dinv = get_block_diag(lvl.A, blocksize=blocksize, inv_flag=True)
        if withrho:
            omega = omega/rho_block_D_inv_A(lvl.A, Dinv)

        def smoother(A, x, b):
            relaxation.FC_block_jacobi(A, x, b, Cpts=Cpts, Fpts=Fpts, iterations=iterations,
                                       F_iterations=F_iterations,  C_iterations=C_iterations,
                                       omega=omega, Dinv=Dinv, blocksize=blocksize)
        return smoother
Ejemplo n.º 6
0
def jacobi_prolongation_smoother(S, T, C, B, omega=4.0/3.0, degree=1, filter=False, weighting='diagonal'):
    """Jacobi prolongation smoother
   
    Parameters
    ----------
    S : {csr_matrix, bsr_matrix}
        Sparse NxN matrix used for smoothing.  Typically, A.
    T : {csr_matrix, bsr_matrix}
        Tentative prolongator
    C : {csr_matrix, bsr_matrix}
        Strength-of-connection matrix
    B : {array}
        Near nullspace modes for the coarse grid such that T*B 
        exactly reproduces the fine grid near nullspace modes
    omega : {scalar}
        Damping parameter
    filter : {boolean}
        If true, filter S before smoothing T.  This option can greatly control
        complexity.
    weighting : {string}
        'block', 'diagonal' or 'local' weighting for constructing the Jacobi D
        'local': Uses a local row-wise weight based on the Gershgorin estimate.
          Avoids any potential under-damping due to inaccurate spectral radius
          estimates.
        'block': If A is a BSR matrix, use a block diagonal inverse of A  
        'diagonal': Classic Jacobi D = diagonal(A)

    Returns
    -------
    P : {csr_matrix, bsr_matrix}
        Smoothed (final) prolongator defined by P = (I - omega/rho(K) K) * T
        where K = diag(S)^-1 * S and rho(K) is an approximation to the 
        spectral radius of K.

    Notes
    -----
    If weighting is not 'local', then results using Jacobi prolongation
    smoother are not precisely reproducible due to a random initial guess used
    for the spectral radius approximation.  For precise reproducibility, 
    set numpy.random.seed(..) to the same value before each test. 
    
    Examples
    --------
    >>> from pyamg.aggregation import jacobi_prolongation_smoother
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse import coo_matrix
    >>> import numpy
    >>> data = numpy.ones((6,))
    >>> row = numpy.arange(0,6)
    >>> col = numpy.kron([0,1],numpy.ones((3,)))
    >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
    >>> T.todense()
    matrix([[ 1.,  0.],
            [ 1.,  0.],
            [ 1.,  0.],
            [ 0.,  1.],
            [ 0.,  1.],
            [ 0.,  1.]])
    >>> A = poisson((6,),format='csr')
    >>> P = jacobi_prolongation_smoother(A,T,A,numpy.ones((2,1)))
    >>> P.todense()
    matrix([[ 0.64930164,  0.        ],
            [ 1.        ,  0.        ],
            [ 0.64930164,  0.35069836],
            [ 0.35069836,  0.64930164],
            [ 0.        ,  1.        ],
            [ 0.        ,  0.64930164]])

    """

    # preprocess weighting
    if weighting == 'block':
        if isspmatrix_csr(S):
            weighting = 'diagonal'
        elif isspmatrix_bsr(S):
            if S.blocksize[0] == 1:
                weighting = 'diagonal'
    
    if filter:
        ##
        # Implement filtered prolongation smoothing for the general case by
        # utilizing satisfy constraints

        if isspmatrix_bsr(S):
            numPDEs = S.blocksize[0]
        else:
            numPDEs = 1

        # Create a filtered S with entries dropped that aren't in C
        C = UnAmal(C, numPDEs, numPDEs)
        S = S.multiply(C)
        S.eliminate_zeros()

    if weighting == 'diagonal':
        # Use diagonal of S
        D_inv = get_diagonal(S, inv=True)
        D_inv_S = scale_rows(S, D_inv, copy=True)
        D_inv_S = (omega/approximate_spectral_radius(D_inv_S))*D_inv_S
    elif weighting == 'block':
        # Use block diagonal of S
        D_inv = get_block_diag(S, blocksize=S.blocksize[0], inv_flag=True)
        D_inv = bsr_matrix( (D_inv, numpy.arange(D_inv.shape[0]), \
                         numpy.arange(D_inv.shape[0]+1)), shape = S.shape)
        D_inv_S = D_inv*S
        D_inv_S = (omega/approximate_spectral_radius(D_inv_S))*D_inv_S
    elif weighting == 'local':
        # Use the Gershgorin estimate as each row's weight, instead of a global
        # spectral radius estimate
        D = numpy.abs(S)*numpy.ones((S.shape[0],1), dtype=S.dtype)
        D_inv = numpy.zeros_like(D)
        D_inv[D != 0] = 1.0 / numpy.abs(D[D != 0])

        D_inv_S = scale_rows(S, D_inv, copy=True)
        D_inv_S = omega*D_inv_S
    else:
        raise ValueError('Incorrect weighting option')

    
    if filter: 
        ##
        # Carry out Jacobi, but after calculating the prolongator update, U,
        # apply satisfy constraints so that U*B = 0
        P = T
        for i in range(degree):
            U =  (D_inv_S*P).tobsr(blocksize=P.blocksize)
            
            ##
            # Enforce U*B = 0 
            # (1) Construct array of inv(Bi'Bi), where Bi is B restricted to row
            # i's sparsity pattern in Sparsity Pattern. This array is used
            # multiple times in Satisfy_Constraints(...).
            BtBinv = compute_BtBinv(B, U)
            # (2) Apply satisfy constraints
            Satisfy_Constraints(U, B, BtBinv)
            
            ##
            # Update P
            P = P - U

    else:
        ##
        # Carry out Jacobi as normal
        P = T
        for i in range(degree):
            P = P - (D_inv_S*P)

    return P
Ejemplo n.º 7
0
def cg_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol, weighting='local', Cpt_params = None):
    '''
    Helper function for energy_prolongation_smoother(...)   

    Use CG to smooth T by solving A T = 0, subject to nullspace
    and sparsity constraints.

    Parameters
    ----------

    A : {csr_matrix, bsr_matrix}
        SPD sparse NxN matrix
    T : {bsr_matrix}
        Tentative prolongator, a NxM sparse matrix (M < N).
        This is initial guess for the equation A T = 0.
        Assumed that T B_c = B_f
    B : {array}
        Near-nullspace modes for coarse grid, i.e., B_c.  
        Has shape (M,k) where k is the number of coarse candidate vectors.
    BtBinv : {array}
        3 dimensional array such that,
        BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted 
        to the neighborhood (in the matrix graph) of dof of i.
    Sparsity_Pattern : {csr_matrix, bsr_matrix}
        Sparse NxM matrix
        This is the sparsity pattern constraint to enforce on the 
        eventual prolongator
    maxiter : int
        maximum number of iterations
    tol : float
        residual tolerance for A T = 0
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the diagonal preconditioning
    Cpt_params : {tuple}
        Tuple of the form (bool, dict).  If the Cpt_params[0] = False, then
        the standard SA prolongation smoothing is carried out.  If True, then
        dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
        the injection matrix for the Cpts, (2) I_F: an identity matrix
        for only the F-points (i.e. I, but with zero rows and columns for
        C-points) and I_C: the C-point analogue to I_F. 

    Returns
    -------
    T : {bsr_matrix}
        Smoothed prolongator using conjugate gradients to solve A T = 0, 
        subject to the constraints, T B_c = B_f, and T has no nonzero 
        outside of the sparsity pattern in Sparsity_Pattern.

    See Also
    --------
    The principal calling routine, 
    pyamg.aggregation.smooth.energy_prolongation_smoother 

    '''

    # Preallocate
    AP = bsr_matrix((numpy.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype), Sparsity_Pattern.indices, Sparsity_Pattern.indptr), 
                     shape=(Sparsity_Pattern.shape) )

    # CG will be run with diagonal preconditioning
    if weighting == 'diagonal':
        Dinv = get_diagonal(A, norm_eq=False, inv=True)
    elif weighting == 'block':
        Dinv = get_block_diag(A, blocksize=A.blocksize[0], inv_flag=True)
        Dinv = bsr_matrix( (Dinv, numpy.arange(Dinv.shape[0]), numpy.arange(Dinv.shape[0]+1)), shape = A.shape)
    elif weighting == 'local':
        # Based on Gershgorin estimate
        D = numpy.abs(A)*numpy.ones((A.shape[0],1), dtype=A.dtype)
        Dinv = numpy.zeros_like(D)
        Dinv[D != 0] = 1.0 / numpy.abs(D[D != 0])
    else:
        raise ValueError('weighting value is invalid')

    # Calculate initial residual
    #   Equivalent to R = -A*T;    R = R.multiply(Sparsity_Pattern)
    #   with the added constraint that R has an explicit 0 wherever 
    #   R is 0 and Sparsity_Pattern is not
    R = bsr_matrix((numpy.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype), Sparsity_Pattern.indices, 
                   Sparsity_Pattern.indptr), shape=(Sparsity_Pattern.shape) )
    pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr,  A.indices,  numpy.ravel(A.data), 
                                           T.indptr,  T.indices,  numpy.ravel(T.data),
                                           R.indptr, R.indices,   numpy.ravel(R.data),
                                           T.shape[0]/T.blocksize[0], T.shape[1]/T.blocksize[1],
                                           A.blocksize[0], A.blocksize[1], T.blocksize[1])
    R.data *= -1.0

    # Enforce R*B = 0
    Satisfy_Constraints(R, B, BtBinv)

    if R.nnz == 0:
        print "Error in sa_energy_min(..).  Initial R no nonzeros on a level.  Returning tentative prolongator\n"
        return T
    
    #Calculate Frobenius norm of the residual
    resid = R.nnz ##numpy.sqrt((R.data.conjugate()*R.data).sum())
    #print "Energy Minimization of Prolongator --- Iteration 0 --- r = " + str(resid)
    
    i = 0
    while i < maxiter and resid > tol:
        #Apply diagonal preconditioner
        if weighting == 'local' or weighting == 'diagonal':
            Z = scale_rows(R, Dinv)
        else:
            Z = Dinv*R

        #Frobenius inner-product of (R,Z) = sum( numpy.conjugate(rk).*zk)
        newsum = (R.conjugate().multiply(Z)).sum()
        if newsum < tol:
            # met tolerance, so halt
            break

        #P is the search direction, not the prolongator, which is T.    
        if(i == 0):
            P = Z
        else:
            beta = newsum/oldsum
            P = Z + beta*P
        oldsum = newsum
        
        # Calculate new direction and enforce constraints
        #   Equivalent to:  AP = A*P;    AP = AP.multiply(Sparsity_Pattern)
        #   with the added constraint that explicit zeros are in AP wherever 
        #   AP = 0 and Sparsity_Pattern does not  !!!!
        AP.data[:] = 0.0
        pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr,  A.indices,  numpy.ravel(A.data), 
                                               P.indptr,  P.indices,  numpy.ravel(P.data),
                                               AP.indptr, AP.indices, numpy.ravel(AP.data),
                                               T.shape[0]/T.blocksize[0], T.shape[1]/T.blocksize[1],
                                               A.blocksize[0], A.blocksize[1], P.blocksize[1])

        # Enforce AP*B = 0
        Satisfy_Constraints(AP, B, BtBinv)
        
        #Frobenius inner-product of (P, AP)
        alpha = newsum/(P.conjugate().multiply(AP)).sum()

        #Update the prolongator, T
        T = T + alpha*P 

        # Ensure identity at C-pts 
        if Cpt_params[0]:
            T = Cpt_params[1]['I_F']*T + Cpt_params[1]['P_I']

        #Update residual
        R = R - alpha*AP
        
        i += 1

        #Calculate Frobenius norm of the residual
        resid = R.nnz #numpy.sqrt((R.data.conjugate()*R.data).sum())
        #print "Energy Minimization of Prolongator --- Iteration " + str(i) + " --- r = " + str(resid)
   
    return T
Ejemplo n.º 8
0
def jacobi_prolongation_smoother(S,
                                 T,
                                 C,
                                 B,
                                 omega=4.0 / 3.0,
                                 degree=1,
                                 filter=False,
                                 weighting='diagonal',
                                 cost=[0.0]):
    """Jacobi prolongation smoother

    Parameters
    ----------
    S : {csr_matrix, bsr_matrix}
        Sparse NxN matrix used for smoothing.  Typically, A.
    T : {csr_matrix, bsr_matrix}
        Tentative prolongator
    C : {csr_matrix, bsr_matrix}
        Strength-of-connection matrix
    B : {array}
        Near nullspace modes for the coarse grid such that T*B
        exactly reproduces the fine grid near nullspace modes
    omega : {scalar}
        Damping parameter
    filter : {boolean}
        If true, filter S before smoothing T.  This option can greatly control
        complexity.
    weighting : {string}
        'block', 'diagonal' or 'local' weighting for constructing the Jacobi D
        'local': Uses a local row-wise weight based on the Gershgorin estimate.
          Avoids any potential under-damping due to inaccurate spectral radius
          estimates.
        'block': If A is a BSR matrix, use a block diagonal inverse of A
        'diagonal': Classic Jacobi D = diagonal(A)

    Returns
    -------
    P : {csr_matrix, bsr_matrix}
        Smoothed (final) prolongator defined by P = (I - omega/rho(K) K) * T
        where K = diag(S)^-1 * S and rho(K) is an approximation to the
        spectral radius of K.

    Notes
    -----
    If weighting is not 'local', then results using Jacobi prolongation
    smoother are not precisely reproducible due to a random initial guess used
    for the spectral radius approximation.  For precise reproducibility,
    set numpy.random.seed(..) to the same value before each test.

    Examples
    --------
    >>> from pyamg.aggregation import jacobi_prolongation_smoother
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse import coo_matrix
    >>> import numpy as np
    >>> data = np.ones((6,))
    >>> row = np.arange(0,6)
    >>> col = np.kron([0,1],np.ones((3,)))
    >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
    >>> T.todense()
    matrix([[ 1.,  0.],
            [ 1.,  0.],
            [ 1.,  0.],
            [ 0.,  1.],
            [ 0.,  1.],
            [ 0.,  1.]])
    >>> A = poisson((6,),format='csr')
    >>> P = jacobi_prolongation_smoother(A,T,A,np.ones((2,1)))
    >>> P.todense()
    matrix([[ 0.64930164,  0.        ],
            [ 1.        ,  0.        ],
            [ 0.64930164,  0.35069836],
            [ 0.35069836,  0.64930164],
            [ 0.        ,  1.        ],
            [ 0.        ,  0.64930164]])

    """

    # preprocess weighting
    if weighting == 'block':
        if sparse.isspmatrix_csr(S):
            weighting = 'diagonal'
        elif sparse.isspmatrix_bsr(S):
            if S.blocksize[0] == 1:
                weighting = 'diagonal'

    if filter:
        # Implement filtered prolongation smoothing for the general case by
        # utilizing satisfy constraints

        if sparse.isspmatrix_bsr(S):
            numPDEs = S.blocksize[0]
        else:
            numPDEs = 1

        # Create a filtered S with entries dropped that aren't in C
        C = UnAmal(C, numPDEs, numPDEs)
        S = S.multiply(C)
        S.eliminate_zeros()
        cost[0] += 1.0

    if weighting == 'diagonal':
        # Use diagonal of S
        D_inv = get_diagonal(S, inv=True)
        D_inv_S = scale_rows(S, D_inv, copy=True)
        D_inv_S = (omega / approximate_spectral_radius(D_inv_S)) * D_inv_S
        # 15 WU to find spectral radius, 2 to scale D_inv_S twice
        cost[0] += 17
    elif weighting == 'block':
        # Use block diagonal of S
        D_inv = get_block_diag(S, blocksize=S.blocksize[0], inv_flag=True)
        D_inv = sparse.bsr_matrix(
            (D_inv, np.arange(D_inv.shape[0]), np.arange(D_inv.shape[0] + 1)),
            shape=S.shape)
        D_inv_S = D_inv * S
        # 15 WU to find spectral radius, 2 to scale D_inv_S twice
        D_inv_S = (omega / approximate_spectral_radius(D_inv_S)) * D_inv_S
        cost[0] += 17
    elif weighting == 'local':
        # Use the Gershgorin estimate as each row's weight, instead of a global
        # spectral radius estimate
        D = np.abs(S) * np.ones((S.shape[0], 1), dtype=S.dtype)
        D_inv = np.zeros_like(D)
        D_inv[D != 0] = 1.0 / np.abs(D[D != 0])

        D_inv_S = scale_rows(S, D_inv, copy=True)
        D_inv_S = omega * D_inv_S
        cost[0] += 3
    else:
        raise ValueError('Incorrect weighting option')

    if filter:
        # Carry out Jacobi, but after calculating the prolongator update, U,
        # apply satisfy constraints so that U*B = 0
        P = T
        for i in range(degree):
            if sparse.isspmatrix_bsr(P):
                U = (D_inv_S * P).tobsr(blocksize=P.blocksize)
            else:
                U = D_inv_S * P

            cost[0] += P.nnz / float(S.nnz)

            # (1) Enforce U*B = 0. Construct array of inv(Bi'Bi), where Bi is B
            # restricted to row i's sparsity pattern in Sparsity Pattern. This
            # array is used multiple times in Satisfy_Constraints(...).
            temp_cost = [0.0]
            BtBinv = compute_BtBinv(B, U, cost=temp_cost)
            cost[0] += temp_cost[0] / float(S.nnz)

            # (2) Apply satisfy constraints
            temp_cost = [0.0]
            Satisfy_Constraints(U, B, BtBinv, cost=temp_cost)
            cost[0] += temp_cost[0] / float(S.nnz)

            # Update P
            P = P - U
            cost[0] += max(P.nnz, U.nnz) / float(S.nnz)
    else:
        # Carry out Jacobi as normal
        P = T
        for i in range(degree):
            P = P - (D_inv_S * P)
            cost[0] += P.nnz / float(S.nnz)

    return P
Ejemplo n.º 9
0
def gmres_prolongation_smoothing(A,
                                 T,
                                 B,
                                 BtBinv,
                                 Sparsity_Pattern,
                                 maxiter,
                                 tol,
                                 weighting='local',
                                 Cpt_params=None,
                                 cost=[0.0]):
    '''
    Helper function for energy_prolongation_smoother(...).

    Use GMRES to smooth T by solving A T = 0, subject to nullspace
    and sparsity constraints.

    Parameters
    ----------

    A : {csr_matrix, bsr_matrix}
        SPD sparse NxN matrix
        Should be at least nonsymmetric or indefinite
    T : {bsr_matrix}
        Tentative prolongator, a NxM sparse matrix (M < N).
        This is initial guess for the equation A T = 0.
        Assumed that T B_c = B_f
    B : {array}
        Near-nullspace modes for coarse grid, i.e., B_c.
        Has shape (M,k) where k is the number of coarse candidate vectors.
    BtBinv : {array}
        3 dimensional array such that,
        BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
        to the neighborhood (in the matrix graph) of dof of i.
    Sparsity_Pattern : {csr_matrix, bsr_matrix}
        Sparse NxM matrix
        This is the sparsity pattern constraint to enforce on the
        eventual prolongator
    maxiter : int
        maximum number of iterations
    tol : float
        residual tolerance for A T = 0
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the diagonal
        preconditioning
    Cpt_params : {tuple}
        Tuple of the form (bool, dict).  If the Cpt_params[0] = False, then
        the standard SA prolongation smoothing is carried out.  If True, then
        dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
        the injection matrix for the Cpts, (2) I_F: an identity matrix
        for only the F-points (i.e. I, but with zero rows and columns for
        C-points) and I_C: the C-point analogue to I_F.

    Returns
    -------
    T : {bsr_matrix}
        Smoothed prolongator using GMRES to solve A T = 0,
        subject to the constraints, T B_c = B_f, and T has no nonzero
        outside of the sparsity pattern in Sparsity_Pattern.

    See Also
    --------
    The principal calling routine,
    pyamg.aggregation.smooth.energy_prolongation_smoother

    '''

    # For non-SPD system, apply GMRES with Diagonal Preconditioning

    # Preallocate space for new search directions
    uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
    AV = sparse.bsr_matrix(
        (uones, Sparsity_Pattern.indices, Sparsity_Pattern.indptr),
        shape=(Sparsity_Pattern.shape))

    # Preallocate for Givens Rotations, Hessenberg matrix and Krylov Space
    xtype = sparse.sputils.upcast(A.dtype, T.dtype, B.dtype)
    Q = []  # Givens Rotations
    V = []  # Krylov Space
    # vs = []     # vs store the pointers to each column of V for speed

    # Upper Hessenberg matrix, converted to upper tri with Givens Rots
    H = np.zeros((maxiter + 1, maxiter + 1), dtype=xtype)

    # GMRES will be run with diagonal preconditioning
    if weighting == 'diagonal':
        Dinv = get_diagonal(A, norm_eq=False, inv=True)
    elif weighting == 'block':
        Dinv = get_block_diag(A, blocksize=A.blocksize[0], inv_flag=True)
        Dinv = sparse.bsr_matrix(
            (Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0] + 1)),
            shape=A.shape)
    elif weighting == 'local':
        # Based on Gershgorin estimate
        D = np.abs(A) * np.ones((A.shape[0], 1), dtype=A.dtype)
        Dinv = np.zeros_like(D)
        Dinv[D != 0] = 1.0 / np.abs(D[D != 0])
        cost[0] += 1.0
    else:
        raise ValueError('weighting value is invalid')

    # Calculate initial residual
    #   Equivalent to R = -A*T;    R = R.multiply(Sparsity_Pattern)
    #   with the added constraint that R has an explicit 0 wherever
    #   R is 0 and Sparsity_Pattern is not
    uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
    R = sparse.bsr_matrix(
        (uones, Sparsity_Pattern.indices, Sparsity_Pattern.indptr),
        shape=(Sparsity_Pattern.shape))
    pyamg.amg_core.incomplete_mat_mult_bsr(
        A.indptr, A.indices, np.ravel(A.data), T.indptr, T.indices,
        np.ravel(T.data), R.indptr, R.indices, np.ravel(R.data),
        int(T.shape[0] / T.blocksize[0]), int(T.shape[1] / T.blocksize[1]),
        A.blocksize[0], A.blocksize[1], T.blocksize[1])
    R.data *= -1.0
    # T is block diagonal, using sparsity pattern of R with
    # incomplete=True significantly overestimates complexity.
    # More accurate to use full mat-mat with block diagonal T.
    cost[0] += mat_mat_complexity(A, T, incomplete=False) / float(A.nnz)

    # Apply diagonal preconditioner
    if weighting == 'local' or weighting == 'diagonal':
        R = scale_rows(R, Dinv)
    else:
        R = Dinv * R

    cost[0] += R.nnz / float(A.nnz)

    # Enforce R*B = 0
    temp_cost = [0.0]
    Satisfy_Constraints(R, B, BtBinv, cost=temp_cost)
    cost[0] += temp_cost[0] / float(A.nnz)

    if R.nnz == 0:
        print("Error in sa_energy_min(..).  Initial R no nonzeros on a level. \
               Returning tentative prolongator\n")
        return T

    # This is the RHS vector for the problem in the Krylov Space
    normr = np.sqrt((R.data.conjugate() * R.data).sum())
    g = np.zeros((maxiter + 1, ), dtype=xtype)
    g[0] = normr

    # First Krylov vector
    # V[0] = r/normr
    if normr > 0.0:
        V.append((1.0 / normr) * R)

    i = -1
    while i < maxiter - 1 and normr > tol:
        i = i + 1

        # Calculate new search direction
        #   Equivalent to:  AV = A*V;    AV = AV.multiply(Sparsity_Pattern)
        #   with the added constraint that explicit zeros are in AP wherever
        #   AP = 0 and Sparsity_Pattern does not
        AV.data[:] = 0.0
        pyamg.amg_core.incomplete_mat_mult_bsr(
            A.indptr, A.indices, np.ravel(A.data), V[i].indptr, V[i].indices,
            np.ravel(V[i].data), AV.indptr, AV.indices, np.ravel(AV.data),
            int(T.shape[0] / T.blocksize[0]), int(T.shape[1] / T.blocksize[1]),
            A.blocksize[0], A.blocksize[1], T.blocksize[1])
        cost[0] += mat_mat_complexity(A, AV, incomplete=True) / float(A.nnz)

        if weighting == 'local' or weighting == 'diagonal':
            AV = scale_rows(AV, Dinv)
        else:
            AV = Dinv * AV

        cost[0] += AV.nnz / float(A.nnz)

        # Enforce AV*B = 0
        temp_cost = [0.0]
        Satisfy_Constraints(AV, B, BtBinv, cost=temp_cost)
        V.append(AV.copy())
        cost[0] += temp_cost[0] / float(A.nnz)

        # Modified Gram-Schmidt
        for j in range(i + 1):
            # Frobenius inner-product
            H[j, i] = (V[j].conjugate().multiply(V[i + 1])).sum()
            V[i + 1] = V[i + 1] - H[j, i] * V[j]
            cost[0] += 2.0 * max(V[i + 1].nnz, V[j].nnz) / float(A.nnz)

        # Frobenius Norm
        H[i + 1, i] = np.sqrt(
            (V[i + 1].data.conjugate() * V[i + 1].data).sum())
        cost[0] += V[i + 1].nnz / float(A.nnz)

        # Check for breakdown
        if H[i + 1, i] != 0.0:
            V[i + 1] = (1.0 / H[i + 1, i]) * V[i + 1]
            cost[0] += V[i + 1].nnz / float(A.nnz)

        # Apply previous Givens rotations to H
        if i > 0:
            apply_givens(Q, H[:, i], i)

        # Calculate and apply next complex-valued Givens Rotation
        if H[i + 1, i] != 0:
            h1 = H[i, i]
            h2 = H[i + 1, i]
            h1_mag = np.abs(h1)
            h2_mag = np.abs(h2)
            if h1_mag < h2_mag:
                mu = h1 / h2
                tau = np.conjugate(mu) / np.abs(mu)
            else:
                mu = h2 / h1
                tau = mu / np.abs(mu)

            denom = np.sqrt(h1_mag**2 + h2_mag**2)
            c = h1_mag / denom
            s = h2_mag * tau / denom
            Qblock = np.array([[c, np.conjugate(s)], [-s, c]], dtype=xtype)
            Q.append(Qblock)

            # Apply Givens Rotation to g,
            #   the RHS for the linear system in the Krylov Subspace.
            g[i:i + 2] = sp.dot(Qblock, g[i:i + 2])

            # Apply effect of Givens Rotation to H
            H[i, i] = sp.dot(Qblock[0, :], H[i:i + 2, i])
            H[i + 1, i] = 0.0

        normr = np.abs(g[i + 1])
    # End while loop

    # Find best update to x in Krylov Space, V.  Solve (i x i) system.
    if i != -1:
        y = la.solve(H[0:i + 1, 0:i + 1], g[0:i + 1])
        for j in range(i + 1):
            T = T + y[j] * V[j]
            cost[0] += max(T.nnz, V[j].nnz) / float(A.nnz)

    # Ensure identity at C-pts
    if Cpt_params[0]:
        T = Cpt_params[1]['I_F'] * T + Cpt_params[1]['P_I']

    return T
Ejemplo n.º 10
0
def cg_prolongation_smoothing(A,
                              T,
                              B,
                              BtBinv,
                              Sparsity_Pattern,
                              maxiter,
                              tol,
                              weighting='local',
                              Cpt_params=None,
                              cost=[0.0]):
    '''
    Helper function for energy_prolongation_smoother(...)

    Use CG to smooth T by solving A T = 0, subject to nullspace
    and sparsity constraints.

    Parameters
    ----------

    A : {csr_matrix, bsr_matrix}
        SPD sparse NxN matrix
    T : {bsr_matrix}
        Tentative prolongator, a NxM sparse matrix (M < N).
        This is initial guess for the equation A T = 0.
        Assumed that T B_c = B_f
    B : {array}
        Near-nullspace modes for coarse grid, i.e., B_c.
        Has shape (M,k) where k is the number of coarse candidate vectors.
    BtBinv : {array}
        3 dimensional array such that,
        BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
        to the neighborhood (in the matrix graph) of dof of i.
    Sparsity_Pattern : {csr_matrix, bsr_matrix}
        Sparse NxM matrix
        This is the sparsity pattern constraint to enforce on the
        eventual prolongator
    maxiter : int
        maximum number of iterations
    tol : float
        residual tolerance for A T = 0
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the diagonal
        preconditioning
    Cpt_params : {tuple}
        Tuple of the form (bool, dict).  If the Cpt_params[0] = False, then
        the standard SA prolongation smoothing is carried out.  If True, then
        dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
        the injection matrix for the Cpts, (2) I_F: an identity matrix
        for only the F-points (i.e. I, but with zero rows and columns for
        C-points) and I_C: the C-point analogue to I_F.

    Returns
    -------
    T : {bsr_matrix}
        Smoothed prolongator using conjugate gradients to solve A T = 0,
        subject to the constraints, T B_c = B_f, and T has no nonzero
        outside of the sparsity pattern in Sparsity_Pattern.

    See Also
    --------
    The principal calling routine,
    pyamg.aggregation.smooth.energy_prolongation_smoother

    '''

    # Preallocate
    AP = sparse.bsr_matrix(
        (np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype),
         Sparsity_Pattern.indices, Sparsity_Pattern.indptr),
        shape=(Sparsity_Pattern.shape))

    # CG will be run with diagonal preconditioning
    if weighting == 'diagonal':
        Dinv = get_diagonal(A, norm_eq=False, inv=True)
    elif weighting == 'block':
        Dinv = get_block_diag(A, blocksize=A.blocksize[0], inv_flag=True)
        Dinv = sparse.bsr_matrix(
            (Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0] + 1)),
            shape=A.shape)
    elif weighting == 'local':
        # Based on Gershgorin estimate
        D = np.abs(A) * np.ones((A.shape[0], 1), dtype=A.dtype)
        Dinv = np.zeros_like(D)
        Dinv[D != 0] = 1.0 / np.abs(D[D != 0])
        cost[0] += 1
    else:
        raise ValueError('weighting value is invalid')

    # Calculate initial residual
    #   Equivalent to R = -A*T;    R = R.multiply(Sparsity_Pattern)
    #   with the added constraint that R has an explicit 0 wherever
    #   R is 0 and Sparsity_Pattern is not
    uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
    R = sparse.bsr_matrix(
        (uones, Sparsity_Pattern.indices, Sparsity_Pattern.indptr),
        shape=(Sparsity_Pattern.shape))
    pyamg.amg_core.incomplete_mat_mult_bsr(
        A.indptr, A.indices, np.ravel(A.data), T.indptr, T.indices,
        np.ravel(T.data), R.indptr, R.indices, np.ravel(R.data),
        int(T.shape[0] / T.blocksize[0]), int(T.shape[1] / T.blocksize[1]),
        A.blocksize[0], A.blocksize[1], T.blocksize[1])
    R.data *= -1.0
    # T is block diagonal, using sparsity pattern of R with
    # incomplete=True significantly overestimates complexity.
    # More accurate to use full mat-mat with block diagonal T.
    cost[0] += mat_mat_complexity(A, T, incomplete=False) / float(A.nnz)

    # Enforce R*B = 0
    temp_cost = [0.0]
    Satisfy_Constraints(R, B, BtBinv, cost=temp_cost)
    cost[0] += temp_cost[0] / float(A.nnz)

    if R.nnz == 0:
        print("Error in sa_energy_min(..).  Initial R no nonzeros on a level. \
               Returning tentative prolongator\n")
        return T

    # Calculate Frobenius norm of the residual
    resid = R.nnz  # np.sqrt((R.data.conjugate()*R.data).sum())

    i = 0
    while i < maxiter and resid > tol:
        # Apply diagonal preconditioner
        if weighting == 'local' or weighting == 'diagonal':
            Z = scale_rows(R, Dinv)
        else:
            Z = Dinv * R

        cost[0] += R.nnz / float(A.nnz)

        # Frobenius inner-product of (R,Z) = sum( np.conjugate(rk).*zk)
        newsum = (R.conjugate().multiply(Z)).sum()
        cost[0] += Z.nnz / float(A.nnz)
        if newsum < tol:
            # met tolerance, so halt
            break

        # P is the search direction, not the prolongator, which is T.
        if (i == 0):
            P = Z
            oldsum = newsum
        else:
            beta = newsum / oldsum
            P = Z + beta * P
            cost[0] += max(Z.nnz, P.nnz) / float(A.nnz)
        oldsum = newsum

        # Calculate new direction and enforce constraints
        #   Equivalent to:  AP = A*P;    AP = AP.multiply(Sparsity_Pattern)
        #   with the added constraint that explicit zeros are in AP wherever
        #   AP = 0 and Sparsity_Pattern does not  !!!!
        AP.data[:] = 0.0
        pyamg.amg_core.incomplete_mat_mult_bsr(
            A.indptr, A.indices, np.ravel(A.data), P.indptr, P.indices,
            np.ravel(P.data), AP.indptr, AP.indices, np.ravel(AP.data),
            int(T.shape[0] / T.blocksize[0]), int(T.shape[1] / T.blocksize[1]),
            A.blocksize[0], A.blocksize[1], P.blocksize[1])
        cost[0] += mat_mat_complexity(A, AP, incomplete=True) / float(A.nnz)

        # Enforce AP*B = 0
        temp_cost = [0.0]
        Satisfy_Constraints(AP, B, BtBinv, cost=temp_cost)
        cost[0] += temp_cost[0] / float(A.nnz)

        # Frobenius inner-product of (P, AP)
        alpha = newsum / (P.conjugate().multiply(AP)).sum()
        cost[0] += max(P.nnz, AP.nnz) / float(A.nnz)

        # Update the prolongator, T
        T = T + alpha * P
        cost[0] += max(P.nnz, T.nnz) / float(A.nnz)

        # Ensure identity at C-pts
        if Cpt_params[0]:
            T = Cpt_params[1]['I_F'] * T + Cpt_params[1]['P_I']

        # Update residual
        R = R - alpha * AP
        cost[0] += max(R.nnz, AP.nnz) / float(A.nnz)

        # Calculate Frobenius norm of the residual
        resid = R.nnz  # np.sqrt((R.data.conjugate()*R.data).sum())
        i += 1

    return T
Ejemplo n.º 11
0
def block_gauss_seidel(A, x, b, iterations=1, sweep='forward', blocksize=1, Dinv=None):
    """Perform block Gauss-Seidel iteration on the linear system Ax=b

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    x : ndarray
        Approximate solution (length N)
    b : ndarray
        Right-hand side (length N)
    iterations : int
        Number of iterations to perform
    sweep : {'forward','backward','symmetric'}
        Direction of sweep
    Dinv : array
        Array holding block diagonal inverses of A 
        size (N/blocksize, blocksize, blocksize)
    blocksize : int
        Desired dimension of blocks


    Returns
    -------
    Nothing, x will be modified in place.

    Examples
    --------
    >>> ## Use Gauss-Seidel as a Stand-Alone Solver
    >>> from pyamg.relaxation import *
    >>> from pyamg.gallery import poisson
    >>> from pyamg.util.linalg import norm
    >>> import numpy
    >>> A = poisson((10,10), format='csr')
    >>> x0 = numpy.zeros((A.shape[0],1))
    >>> b = numpy.ones((A.shape[0],1))
    >>> block_gauss_seidel(A, x0, b, iterations=10, blocksize=4, sweep='symmetric')
    >>> print norm(b-A*x0)
    0.958333817624
    >>> #
    >>> ## Use Gauss-Seidel as the Multigrid Smoother
    >>> from pyamg import smoothed_aggregation_solver
    >>> sa = smoothed_aggregation_solver(A, B=numpy.ones((A.shape[0],1)),
    ...        coarse_solver='pinv2', max_coarse=50,
    ...        presmoother=('block_gauss_seidel', {'sweep':'symmetric', 'blocksize' : 4}), 
    ...        postsmoother=('block_gauss_seidel', {'sweep':'symmetric', 'blocksize' : 4}))
    >>> x0=numpy.zeros((A.shape[0],1))
    >>> residuals=[]
    >>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
    """
    A,x,b = make_system(A, x, b, formats=['csr','bsr'])
    A = A.tobsr(blocksize=(blocksize, blocksize))
    
    if Dinv == None:
        Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True)
    elif Dinv.shape[0] != A.shape[0]/blocksize:
        raise ValueError('Dinv and A have incompatible dimensions')
    elif (Dinv.shape[1] != blocksize) or (Dinv.shape[2] != blocksize):
        raise ValueError('Dinv and blocksize are incompatible')

    if sweep == 'forward':
        row_start,row_stop,row_step = 0,len(x)/blocksize,1
    elif sweep == 'backward':
        row_start,row_stop,row_step = len(x)/blocksize-1,-1,-1 
    elif sweep == 'symmetric':
        for iter in xrange(iterations):
            block_gauss_seidel(A, x, b, iterations=1, sweep='forward', blocksize=blocksize, Dinv=Dinv)
            block_gauss_seidel(A, x, b, iterations=1, sweep='backward',blocksize=blocksize, Dinv=Dinv)
        return
    else:
        raise ValueError("valid sweep directions are 'forward', 'backward', and 'symmetric'")


    for iter in xrange(iterations):
        amg_core.block_gauss_seidel(A.indptr, A.indices, numpy.ravel(A.data),
                                    x, b, numpy.ravel(Dinv),
                                    row_start, row_stop, row_step, blocksize)
Ejemplo n.º 12
0
def evolution_strength_of_connection(A, B='ones', epsilon=4.0, k=2,
                                     proj_type="l2", block_flag=False,
                                     symmetrize_measure=True):
    """
    Construct strength of connection matrix using an Evolution-based measure

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    B : {string, array}
        If B='ones', then the near nullspace vector used is all ones.  If B is
        an (NxK) array, then B is taken to be the near nullspace vectors.
    epsilon : scalar
        Drop tolerance
    k : integer
        ODE num time steps, step size is assumed to be 1/rho(DinvA)
    proj_type : {'l2','D_A'}
        Define norm for constrained min prob, i.e. define projection
    block_flag : {boolean}
        If True, use a block D inverse as preconditioner for A during
        weighted-Jacobi

    Returns
    -------
    Atilde : {csr_matrix}
        Sparse matrix of strength values

    References
    ----------
    .. [1] Olson, L. N., Schroder, J., Tuminaro, R. S.,
       "A New Perspective on Strength Measures in Algebraic Multigrid",
       submitted, June, 2008.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import evolution_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = evolution_strength_of_connection(A,  np.ones((A.shape[0],1)))
    """
    # local imports for evolution_strength_of_connection
    from pyamg.util.utils import scale_rows, get_block_diag, scale_columns
    from pyamg.util.linalg import approximate_spectral_radius

    # ====================================================================
    # Check inputs
    if epsilon < 1.0:
        raise ValueError("expected epsilon > 1.0")
    if k <= 0:
        raise ValueError("number of time steps must be > 0")
    if proj_type not in ['l2', 'D_A']:
        raise ValueError("proj_type must be 'l2' or 'D_A'")
    if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
        raise TypeError("expected csr_matrix or bsr_matrix")

    # ====================================================================
    # Format A and B correctly.
    # B must be in mat format, this isn't a deep copy
    if B == 'ones':
        Bmat = np.mat(np.ones((A.shape[0], 1), dtype=A.dtype))
    else:
        Bmat = np.mat(B)

    # Pre-process A.  We need A in CSR, to be devoid of explicit 0's and have
    # sorted indices
    if (not sparse.isspmatrix_csr(A)):
        csrflag = False
        numPDEs = A.blocksize[0]
        D = A.diagonal()
        # Calculate Dinv*A
        if block_flag:
            Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
            Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]),
                                     np.arange(Dinv.shape[0] + 1)),
                                     shape=A.shape)
            Dinv_A = (Dinv * A).tocsr()
        else:
            Dinv = np.zeros_like(D)
            mask = (D != 0.0)
            Dinv[mask] = 1.0 / D[mask]
            Dinv[D == 0] = 1.0
            Dinv_A = scale_rows(A, Dinv, copy=True)
        A = A.tocsr()
    else:
        csrflag = True
        numPDEs = 1
        D = A.diagonal()
        Dinv = np.zeros_like(D)
        mask = (D != 0.0)
        Dinv[mask] = 1.0 / D[mask]
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)

    A.eliminate_zeros()
    A.sort_indices()

    # Handle preliminaries for the algorithm
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    # Get spectral radius of Dinv*A, this will be used to scale the time step
    # size for the ODE
    rho_DinvA = approximate_spectral_radius(Dinv_A)

    # Calculate D_A for later use in the minimization problem
    if proj_type == "D_A":
        D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr')
    else:
        D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)

    # Calculate (I - delta_t Dinv A)^k
    #      In order to later access columns, we calculate the transpose in
    #      CSR format so that columns will be accessed efficiently
    # Calculate the number of time steps that can be done by squaring, and
    # the number of time steps that must be done incrementally
    nsquare = int(np.log2(k))
    ninc = k - 2**nsquare

    # Calculate one time step
    I = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
    Atilde = (I - (1.0/rho_DinvA)*Dinv_A)
    Atilde = Atilde.T.tocsr()

    # Construct a sparsity mask for Atilde that will restrict Atilde^T to the
    # nonzero pattern of A, with the added constraint that row i of Atilde^T
    # retains only the nonzeros that are also in the same PDE as i.
    mask = A.copy()

    # Restrict to same PDE
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(range(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # If the total number of time steps is a power of two, then there is
    # a very efficient computational short-cut.  Otherwise, we support
    # other numbers of time steps, through an inefficient algorithm.
    if ninc > 0:
        warn("The most efficient time stepping for the Evolution Strength\
             Method is done in powers of two.\nYou have chosen " + str(k) +
             " time steps.")

        # Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare
        for i in range(nsquare):
            Atilde = Atilde*Atilde

        JacobiStep = (I - (1.0/rho_DinvA)*Dinv_A).T.tocsr()
        for i in range(ninc):
            Atilde = Atilde*JacobiStep
        del JacobiStep

        # Apply mask to Atilde, zeros in mask have already been eliminated at
        # start of routine.
        mask.data[:] = 1.0
        Atilde = Atilde.multiply(mask)
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    elif nsquare == 0:
        if numPDEs > 1:
            # Apply mask to Atilde, zeros in mask have already been eliminated
            # at start of routine.
            mask.data[:] = 1.0
            Atilde = Atilde.multiply(mask)
            Atilde.eliminate_zeros()
            Atilde.sort_indices()

    else:
        # Use computational short-cut for case (ninc == 0) and (nsquare > 0)
        # Calculate Atilde^k only at the sparsity pattern of mask.
        for i in range(nsquare-1):
            Atilde = Atilde*Atilde

        # Call incomplete mat-mat mult
        AtildeCSC = Atilde.tocsc()
        AtildeCSC.sort_indices()
        mask.sort_indices()
        Atilde.sort_indices()
        amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices,
                                         Atilde.data, AtildeCSC.indptr,
                                         AtildeCSC.indices, AtildeCSC.data,
                                         mask.indptr, mask.indices, mask.data,
                                         dimen)

        del AtildeCSC, Atilde
        Atilde = mask
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    del Dinv, Dinv_A, mask

    # Calculate strength based on constrained min problem of
    # min( z - B*x ), such that
    # (B*x)|_i = z|_i, i.e. they are equal at point i
    # z = (I - (t/k) Dinv A)^k delta_i
    #
    # Strength is defined as the relative point-wise approx. error between
    # B*x and z.  We don't use the full z in this problem, only that part of
    # z that is in the sparsity pattern of A.
    #
    # Can use either the D-norm, and inner product, or l2-norm and inner-prod
    # to solve the constrained min problem.  Using D gives scale invariance.
    #
    # This is a quadratic minimization problem with a linear constraint, so
    # we can build a linear system and solve it to find the critical point,
    # i.e. minimum.
    #
    # We exploit a known shortcut for the case of NullDim = 1.  The shortcut is
    # mathematically equivalent to the longer constrained min. problem

    if NullDim == 1:
        # Use shortcut to solve constrained min problem if B is only a vector
        # Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) |
        # These ratios can be calculated by diagonal row and column scalings

        # Create necessary vectors for scaling Atilde
        #   Its not clear what to do where B == 0.  This is an
        #   an easy programming solution, that may make sense.
        Bmat_forscaling = np.ravel(Bmat)
        Bmat_forscaling[Bmat_forscaling == 0] = 1.0
        DAtilde = Atilde.diagonal()
        DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling

        # Calculate best approximation, z_tilde, in span(B)
        #   Importantly, scale_rows and scale_columns leave zero entries
        #   in the matrix.  For previous implementations this was useful
        #   because we assume data and Atilde.data are the same length below
        data = Atilde.data.copy()
        Atilde.data[:] = 1.0
        Atilde = scale_rows(Atilde, DAtildeDivB)
        Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling))

        # If angle in the complex plane between z and z_tilde is
        # greater than 90 degrees, then weak.  We can just look at the
        # dot product to determine if angle is greater than 90 degrees.
        angle = np.real(Atilde.data) * np.real(data) +\
            np.imag(Atilde.data) * np.imag(data)
        angle = angle < 0.0
        angle = np.array(angle, dtype=bool)

        # Calculate Approximation ratio
        Atilde.data = Atilde.data/data

        # If approximation ratio is less than tol, then weak connection
        weak_ratio = (np.abs(Atilde.data) < 1e-4)

        # Calculate Approximation error
        Atilde.data = abs(1.0 - Atilde.data)

        # Set small ratios and large angles to weak
        Atilde.data[weak_ratio] = 0.0
        Atilde.data[angle] = 0.0

        # Set near perfect connections to 1e-4
        Atilde.eliminate_zeros()
        Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4

        del data, weak_ratio, angle

    else:
        # For use in computing local B_i^H*B, precompute the element-wise
        # multiply of each column of B with each other column.  We also scale
        # by 2.0 to account for BDB's eventual use in a constrained
        # minimization problem
        BDBCols = int(np.sum(range(NullDim + 1)))
        BDB = np.zeros((dimen, BDBCols), dtype=A.dtype)
        counter = 0
        for i in range(NullDim):
            for j in range(i, NullDim):
                BDB[:, counter] = 2.0 *\
                    (np.conjugate(np.ravel(np.asarray(B[:, i]))) *
                        np.ravel(np.asarray(D_A * B[:, j])))
                counter = counter + 1

        # Choose tolerance for dropping "numerically zero" values later
        t = Atilde.dtype.char
        eps = np.finfo(np.float).eps
        feps = np.finfo(np.single).eps
        geps = np.finfo(np.longfloat).eps
        _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
        tol = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]

        # Use constrained min problem to define strength
        amg_core.evolution_strength_helper(Atilde.data,
                                           Atilde.indptr,
                                           Atilde.indices,
                                           Atilde.shape[0],
                                           np.ravel(np.asarray(B)),
                                           np.ravel(np.asarray(
                                               (D_A * np.conjugate(B)).T)),
                                           np.ravel(np.asarray(BDB)),
                                           BDBCols, NullDim, tol)

        Atilde.eliminate_zeros()

    # All of the strength values are real by this point, so ditch the complex
    # part
    Atilde.data = np.array(np.real(Atilde.data), dtype=float)

    # Apply drop tolerance
    if symmetrize_measure:
        Atilde = 0.5*(Atilde + Atilde.T)

    if epsilon != np.inf:
        amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr,
                                       Atilde.indices, Atilde.data)
        Atilde.eliminate_zeros()

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    I = sparse.eye(dimen, dimen, format="csr")
    I.data -= Atilde.diagonal()
    Atilde = Atilde + I

    # If converted BSR to CSR, convert back and return amalgamated matrix,
    #   i.e. the sparsity structure of the blocks of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        n_blocks = Atilde.indices.shape[0]
        blocksize = Atilde.blocksize[0]*Atilde.blocksize[1]
        CSRdata = np.zeros((n_blocks,))
        amg_core.min_blocks(n_blocks, blocksize,
                            np.ravel(np.asarray(Atilde.data)), CSRdata)
        # Atilde = sparse.csr_matrix((data, row, col), shape=(*,*))
        Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr),
                                   shape=(Atilde.shape[0] / numPDEs,
                                          Atilde.shape[1] / numPDEs))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0/Atilde.data

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)

    return Atilde
Ejemplo n.º 13
0
def setup_FC_block_jacobi(lvl,
                          F_iterations=DEFAULT_NITER,
                          C_iterations=DEFAULT_NITER,
                          iterations=DEFAULT_NITER,
                          omega=1.0,
                          Dinv=None,
                          blocksize=None,
                          withrho=False):
    # Determine Blocksize
    if blocksize is None and Dinv is None:
        if sp.sparse.isspmatrix_csr(lvl.A):
            blocksize = 1
        elif sp.sparse.isspmatrix_bsr(lvl.A):
            blocksize = lvl.A.blocksize[0]
    elif blocksize is None:
        if sp.sparse.isspmatrix_bsr(Dinv):
            blocksize = Dinv.blocksize[1]
        else:
            blocksize = 1

    # Check for compatible dimensions
    if (lvl.A.shape[0] % blocksize) != 0:
        raise ValueError("Blocksize does not divide size of matrix.")
    elif (len(lvl.splitting) * blocksize != lvl.A.shape[0]):
        raise ValueError(
            "Blocksize not compatible with CF-splitting and matrix size.")

    if blocksize == 1:
        # Block Jacobi is equivalent to normal Jacobi
        return setup_FC_jacobi(lvl,
                               iterations=iterations,
                               omega=omega,
                               withrho=withrho)
    else:
        # Get C-points and F-points from splitting
        try:
            Fpts = np.array(np.where(lvl.splitting == 0)[0], dtype='int32')
            Cpts = np.array(np.where(lvl.splitting == 1)[0], dtype='int32')
            lvl.nf = len(Fpts)
            lvl.nc = len(Cpts)
        except:
            raise ValueError(
                "CF-splitting array needs to be stored in hierarchy.")

        # Use Block Jacobi
        if Dinv is None:
            Dinv = get_block_diag(lvl.A, blocksize=blocksize, inv_flag=True)
        if withrho:
            omega = omega / rho_block_D_inv_A(lvl.A, Dinv)

        def smoother(A, x, b):
            relaxation.FC_block_jacobi(A,
                                       x,
                                       b,
                                       Cpts=Cpts,
                                       Fpts=Fpts,
                                       iterations=iterations,
                                       F_iterations=F_iterations,
                                       C_iterations=C_iterations,
                                       omega=omega,
                                       Dinv=Dinv,
                                       blocksize=blocksize)

        return smoother
Ejemplo n.º 14
0
def gmres_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol, weighting='local', Cpt_params=None):
    '''
    Helper function for energy_prolongation_smoother(...).

    Use GMRES to smooth T by solving A T = 0, subject to nullspace
    and sparsity constraints.

    Parameters
    ----------

    A : {csr_matrix, bsr_matrix}
        SPD sparse NxN matrix
        Should be at least nonsymmetric or indefinite
    T : {bsr_matrix}
        Tentative prolongator, a NxM sparse matrix (M < N).
        This is initial guess for the equation A T = 0.
        Assumed that T B_c = B_f
    B : {array}
        Near-nullspace modes for coarse grid, i.e., B_c.  
        Has shape (M,k) where k is the number of coarse candidate vectors.
    BtBinv : {array}
        3 dimensional array such that,
        BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted 
        to the neighborhood (in the matrix graph) of dof of i.
    Sparsity_Pattern : {csr_matrix, bsr_matrix}
        Sparse NxM matrix
        This is the sparsity pattern constraint to enforce on the 
        eventual prolongator
    maxiter : int
        maximum number of iterations
    tol : float
        residual tolerance for A T = 0
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the diagonal preconditioning
    Cpt_params : {tuple}
        Tuple of the form (bool, dict).  If the Cpt_params[0] = False, then
        the standard SA prolongation smoothing is carried out.  If True, then
        dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
        the injection matrix for the Cpts, (2) I_F: an identity matrix
        for only the F-points (i.e. I, but with zero rows and columns for
        C-points) and I_C: the C-point analogue to I_F. 

    Returns
    -------
    T : {bsr_matrix}
        Smoothed prolongator using GMRES to solve A T = 0, 
        subject to the constraints, T B_c = B_f, and T has no nonzero 
        outside of the sparsity pattern in Sparsity_Pattern.

    See Also
    --------
    The principal calling routine, 
    pyamg.aggregation.smooth.energy_prolongation_smoother 

    '''
        
    #For non-SPD system, apply GMRES with Diagonal Preconditioning
    
    # Preallocate space for new search directions
    AV = bsr_matrix((numpy.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype), Sparsity_Pattern.indices, Sparsity_Pattern.indptr), shape=(Sparsity_Pattern.shape) )

    # Preallocate for Givens Rotations, Hessenberg matrix and Krylov Space
    xtype = scipy.sparse.sputils.upcast(A.dtype, T.dtype, B.dtype)
    Q = []                                                 # Givens Rotations
    V = []                                                 # Krylov Space
    vs = []                                                # vs store the pointers to each column of V.
                                                           #   This saves a considerable amount of time.
    H = numpy.zeros( (maxiter+1, maxiter+1), dtype=xtype)  # Upper Hessenberg matrix, which is then 
                                                           #   converted to upper tri with Givens Rots 


    # GMRES will be run with diagonal preconditioning
    if weighting == 'diagonal':
        Dinv = get_diagonal(A, norm_eq=False, inv=True)
    elif weighting == 'block':
        Dinv = get_block_diag(A, blocksize=A.blocksize[0], inv_flag=True)
        Dinv = bsr_matrix( (Dinv, numpy.arange(Dinv.shape[0]), numpy.arange(Dinv.shape[0]+1)), shape = A.shape)
    elif weighting == 'local':
        # Based on Gershgorin estimate
        D = numpy.abs(A)*numpy.ones((A.shape[0],1), dtype=A.dtype)
        Dinv = numpy.zeros_like(D)
        Dinv[D != 0] = 1.0 / numpy.abs(D[D != 0])
    else:
        raise ValueError('weighting value is invalid')

    # Calculate initial residual
    #   Equivalent to R = -A*T;    R = R.multiply(Sparsity_Pattern)
    #   with the added constraint that R has an explicit 0 wherever 
    #   R is 0 and Sparsity_Pattern is not
    R = bsr_matrix((numpy.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype), 
                    Sparsity_Pattern.indices, Sparsity_Pattern.indptr), 
                    shape=(Sparsity_Pattern.shape) )
    pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr,  A.indices,  numpy.ravel(A.data), 
                                           T.indptr,  T.indices,  numpy.ravel(T.data),
                                           R.indptr,  R.indices,  numpy.ravel(R.data),
                                           T.shape[0]/T.blocksize[0], T.shape[1]/T.blocksize[1],
                                           A.blocksize[0], A.blocksize[1], T.blocksize[1])
    R.data *= -1.0
 
    #Apply diagonal preconditioner
    if weighting == 'local' or weighting == 'diagonal':
        R = scale_rows(R, Dinv)
    else:
        R = Dinv*R
    
    # Enforce R*B = 0
    Satisfy_Constraints(R, B, BtBinv)

    if R.nnz == 0:
        print "Error in sa_energy_min(..).  Initial R no nonzeros on a level.  Returning tentative prolongator\n"
        return T
    
    # This is the RHS vector for the problem in the Krylov Space
    normr = numpy.sqrt((R.data.conjugate()*R.data).sum())
    g = numpy.zeros((maxiter+1,), dtype=xtype) 
    g[0] = normr
    
    # First Krylov vector
    # V[0] = r/normr
    if normr > 0.0:
        V.append((1.0/normr)*R)

    #print "Energy Minimization of Prolongator --- Iteration 0 --- r = " + str(normr)
    i = -1
    #vect = numpy.ravel((A*T).data)
    #print "Iteration " + str(i+1) + "   Energy = %1.3e"%numpy.sqrt( (vect.conjugate()*vect).sum() )
    #print "Iteration " + str(i+1) + "   Normr  %1.3e"%normr
    while i < maxiter-1 and normr > tol:
        i = i+1

        # Calculate new search direction
        #   Equivalent to:  AV = A*V;    AV = AV.multiply(Sparsity_Pattern)
        #   with the added constraint that explicit zeros are in AP wherever 
        #   AP = 0 and Sparsity_Pattern does not
        AV.data[:] = 0.0
        pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr,     A.indices,     numpy.ravel(A.data), 
                                               V[i].indptr,  V[i].indices,  numpy.ravel(V[i].data),
                                               AV.indptr,    AV.indices,    numpy.ravel(AV.data),
                                               T.shape[0]/T.blocksize[0], T.shape[1]/T.blocksize[1],
                                               A.blocksize[0], A.blocksize[1], T.blocksize[1])
        
        if weighting == 'local' or weighting == 'diagonal':
            AV = scale_rows(AV, Dinv)
        else:
            AV = Dinv*AV
        
        # Enforce AV*B = 0
        Satisfy_Constraints(AV, B, BtBinv)
        V.append(AV.copy())

        # Modified Gram-Schmidt
        for j in xrange(i+1):
            # Frobenius inner-product
            H[j,i] = (V[j].conjugate().multiply(V[i+1])).sum()
            V[i+1] = V[i+1] - H[j,i]*V[j]

        # Frobenius Norm
        H[i+1,i] = numpy.sqrt( (V[i+1].data.conjugate()*V[i+1].data).sum() )
        
        # Check for breakdown
        if H[i+1,i] != 0.0:
            V[i+1] = (1.0/H[i+1,i])*V[i+1]

        # Apply previous Givens rotations to H
        if i > 0:
            apply_givens(Q, H[:,i], i)
            
        # Calculate and apply next complex-valued Givens Rotation
        if H[i+1, i] != 0:
            h1 = H[i, i]; 
            h2 = H[i+1, i];
            h1_mag = numpy.abs(h1)
            h2_mag = numpy.abs(h2)
            if h1_mag < h2_mag:
                mu = h1/h2
                tau = numpy.conjugate(mu)/numpy.abs(mu)
            else:    
                mu = h2/h1
                tau = mu/numpy.abs(mu)

            denom = numpy.sqrt( h1_mag**2 + h2_mag**2 )               
            c = h1_mag/denom
            s = h2_mag*tau/denom; 
            Qblock = numpy.array([[c, numpy.conjugate(s)], [-s, c]], dtype=xtype)
            Q.append(Qblock)
            
            # Apply Givens Rotation to g, 
            #   the RHS for the linear system in the Krylov Subspace.
            g[i:i+2] = scipy.dot(Qblock, g[i:i+2])
            
            # Apply effect of Givens Rotation to H
            H[i,     i] = scipy.dot(Qblock[0,:], H[i:i+2, i]) 
            H[i+1, i] = 0.0
            
        normr = numpy.abs(g[i+1])
        #print "Iteration " + str(i+1) + "   Normr  %1.3e"%normr

    # End while loop
    

    # Find best update to x in Krylov Space, V.  Solve (i x i) system.
    if i != -1:
        y = scipy.linalg.solve(H[0:i+1,0:i+1], g[0:i+1])
        for j in range(i+1):
            T = T + y[j]*V[j]
    
    #vect = numpy.ravel((A*T).data)
    #print "Final Iteration " + str(i) + "   Energy = %1.3e"%numpy.sqrt( (vect.conjugate()*vect).sum() )

    # Ensure identity at C-pts 
    if Cpt_params[0]:
        T = Cpt_params[1]['I_F']*T + Cpt_params[1]['P_I']

    return T
Ejemplo n.º 15
0
def block_jacobi(A, x, b, Dinv=None, blocksize=1, iterations=1, omega=1.0):
    """Perform block Jacobi iteration on the linear system Ax=b

    Parameters
    ----------
    A : csr_matrix or bsr_matrix
        Sparse NxN matrix
    x : ndarray
        Approximate solution (length N)
    b : ndarray
        Right-hand side (length N)
    Dinv : array
        Array holding block diagonal inverses of A 
        size (N/blocksize, blocksize, blocksize)
    blocksize : int
        Desired dimension of blocks
    iterations : int
        Number of iterations to perform
    omega : scalar
        Damping parameter

    Returns
    -------
    Nothing, x will be modified in place.
   
    Examples
    --------
    >>> ## Use block Jacobi as a Stand-Alone Solver
    >>> from pyamg.relaxation import *
    >>> from pyamg.gallery import poisson
    >>> from pyamg.util.linalg import norm
    >>> import numpy
    >>> A = poisson((10,10), format='csr')
    >>> x0 = numpy.zeros((A.shape[0],1))
    >>> b = numpy.ones((A.shape[0],1))
    >>> block_jacobi(A, x0, b, blocksize=4, iterations=10, omega=1.0)
    >>> print norm(b-A*x0)
    4.66474230129
    >>> #
    >>> ## Use block Jacobi as the Multigrid Smoother
    >>> from pyamg import smoothed_aggregation_solver
    >>> sa = smoothed_aggregation_solver(A, B=numpy.ones((A.shape[0],1)),
    ...        coarse_solver='pinv2', max_coarse=50,
    ...        presmoother=('block_jacobi', {'omega': 4.0/3.0, 'iterations' : 2, 'blocksize' : 4}), 
    ...        postsmoother=('block_jacobi', {'omega': 4.0/3.0, 'iterations' : 2, 'blocksize' : 4}))
    >>> x0=numpy.zeros((A.shape[0],1))
    >>> residuals=[]
    >>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
    """

    A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
    A = A.tobsr(blocksize=(blocksize, blocksize))

    if Dinv == None:
        Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True)
    elif Dinv.shape[0] != A.shape[0] / blocksize:
        raise ValueError('Dinv and A have incompatible dimensions')
    elif (Dinv.shape[1] != blocksize) or (Dinv.shape[2] != blocksize):
        raise ValueError('Dinv and blocksize are incompatible')

    sweep = slice(None)
    (row_start, row_stop, row_step) = sweep.indices(A.shape[0] / blocksize)

    if (row_stop - row_start) * row_step <= 0:  #no work to do
        return

    temp = numpy.empty_like(x)

    # Create uniform type, and convert possibly complex scalars to length 1 arrays
    [omega] = type_prep(A.dtype, [omega])

    for iter in xrange(iterations):
        amg_core.block_jacobi(A.indptr, A.indices, numpy.ravel(A.data), x, b,
                              numpy.ravel(Dinv), temp, row_start, row_stop,
                              row_step, omega, blocksize)
Ejemplo n.º 16
0
def block_jacobi(A, x, b, Dinv=None, blocksize=1, iterations=1, omega=1.0):
    """Perform block Jacobi iteration on the linear system Ax=b

    Parameters
    ----------
    A : csr_matrix or bsr_matrix
        Sparse NxN matrix
    x : ndarray
        Approximate solution (length N)
    b : ndarray
        Right-hand side (length N)
    Dinv : array
        Array holding block diagonal inverses of A 
        size (N/blocksize, blocksize, blocksize)
    blocksize : int
        Desired dimension of blocks
    iterations : int
        Number of iterations to perform
    omega : scalar
        Damping parameter

    Returns
    -------
    Nothing, x will be modified in place.
   
    Examples
    --------
    >>> ## Use block Jacobi as a Stand-Alone Solver
    >>> from pyamg.relaxation import *
    >>> from pyamg.gallery import poisson
    >>> from pyamg.util.linalg import norm
    >>> import numpy
    >>> A = poisson((10,10), format='csr')
    >>> x0 = numpy.zeros((A.shape[0],1))
    >>> b = numpy.ones((A.shape[0],1))
    >>> block_jacobi(A, x0, b, blocksize=4, iterations=10, omega=1.0)
    >>> print norm(b-A*x0)
    4.66474230129
    >>> #
    >>> ## Use block Jacobi as the Multigrid Smoother
    >>> from pyamg import smoothed_aggregation_solver
    >>> sa = smoothed_aggregation_solver(A, B=numpy.ones((A.shape[0],1)),
    ...        coarse_solver='pinv2', max_coarse=50,
    ...        presmoother=('block_jacobi', {'omega': 4.0/3.0, 'iterations' : 2, 'blocksize' : 4}), 
    ...        postsmoother=('block_jacobi', {'omega': 4.0/3.0, 'iterations' : 2, 'blocksize' : 4}))
    >>> x0=numpy.zeros((A.shape[0],1))
    >>> residuals=[]
    >>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
    """

    A,x,b = make_system(A, x, b, formats=['csr', 'bsr'])
    A = A.tobsr(blocksize=(blocksize, blocksize))

    if Dinv == None:
        Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True)
    elif Dinv.shape[0] != A.shape[0]/blocksize:
        raise ValueError('Dinv and A have incompatible dimensions')
    elif (Dinv.shape[1] != blocksize) or (Dinv.shape[2] != blocksize):
        raise ValueError('Dinv and blocksize are incompatible')
    
    sweep = slice(None)
    (row_start,row_stop,row_step) = sweep.indices(A.shape[0]/blocksize)

    if (row_stop - row_start) * row_step <= 0:  #no work to do
        return

    temp = numpy.empty_like(x)
    
    # Create uniform type, and convert possibly complex scalars to length 1 arrays
    [omega] = type_prep(A.dtype, [omega])

    for iter in xrange(iterations):
        amg_core.block_jacobi(A.indptr, A.indices, numpy.ravel(A.data),
                              x, b, numpy.ravel(Dinv), temp,
                              row_start, row_stop, row_step,
                              omega, blocksize)
Ejemplo n.º 17
0
def block_gauss_seidel(A,
                       x,
                       b,
                       iterations=1,
                       sweep='forward',
                       blocksize=1,
                       Dinv=None):
    """Perform block Gauss-Seidel iteration on the linear system Ax=b

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    x : ndarray
        Approximate solution (length N)
    b : ndarray
        Right-hand side (length N)
    iterations : int
        Number of iterations to perform
    sweep : {'forward','backward','symmetric'}
        Direction of sweep
    Dinv : array
        Array holding block diagonal inverses of A 
        size (N/blocksize, blocksize, blocksize)
    blocksize : int
        Desired dimension of blocks


    Returns
    -------
    Nothing, x will be modified in place.

    Examples
    --------
    >>> ## Use Gauss-Seidel as a Stand-Alone Solver
    >>> from pyamg.relaxation import *
    >>> from pyamg.gallery import poisson
    >>> from pyamg.util.linalg import norm
    >>> import numpy
    >>> A = poisson((10,10), format='csr')
    >>> x0 = numpy.zeros((A.shape[0],1))
    >>> b = numpy.ones((A.shape[0],1))
    >>> block_gauss_seidel(A, x0, b, iterations=10, blocksize=4, sweep='symmetric')
    >>> print norm(b-A*x0)
    0.958333817624
    >>> #
    >>> ## Use Gauss-Seidel as the Multigrid Smoother
    >>> from pyamg import smoothed_aggregation_solver
    >>> sa = smoothed_aggregation_solver(A, B=numpy.ones((A.shape[0],1)),
    ...        coarse_solver='pinv2', max_coarse=50,
    ...        presmoother=('block_gauss_seidel', {'sweep':'symmetric', 'blocksize' : 4}), 
    ...        postsmoother=('block_gauss_seidel', {'sweep':'symmetric', 'blocksize' : 4}))
    >>> x0=numpy.zeros((A.shape[0],1))
    >>> residuals=[]
    >>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
    """
    A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
    A = A.tobsr(blocksize=(blocksize, blocksize))

    if Dinv == None:
        Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True)
    elif Dinv.shape[0] != A.shape[0] / blocksize:
        raise ValueError('Dinv and A have incompatible dimensions')
    elif (Dinv.shape[1] != blocksize) or (Dinv.shape[2] != blocksize):
        raise ValueError('Dinv and blocksize are incompatible')

    if sweep == 'forward':
        row_start, row_stop, row_step = 0, len(x) / blocksize, 1
    elif sweep == 'backward':
        row_start, row_stop, row_step = len(x) / blocksize - 1, -1, -1
    elif sweep == 'symmetric':
        for iter in xrange(iterations):
            block_gauss_seidel(A,
                               x,
                               b,
                               iterations=1,
                               sweep='forward',
                               blocksize=blocksize,
                               Dinv=Dinv)
            block_gauss_seidel(A,
                               x,
                               b,
                               iterations=1,
                               sweep='backward',
                               blocksize=blocksize,
                               Dinv=Dinv)
        return
    else:
        raise ValueError(
            "valid sweep directions are 'forward', 'backward', and 'symmetric'"
        )

    for iter in xrange(iterations):
        amg_core.block_gauss_seidel(A.indptr,
                                    A.indices, numpy.ravel(A.data), x, b,
                                    numpy.ravel(Dinv), row_start, row_stop,
                                    row_step, blocksize)
Ejemplo n.º 18
0
def evolution_strength_of_connection(A,
                                     B=None,
                                     epsilon=4.0,
                                     k=2,
                                     proj_type="l2",
                                     weighting='diagonal',
                                     symmetrize_measure=True,
                                     cost=[0]):
    """
    Construct strength of connection matrix using an Evolution-based measure

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    B : {string, array}
        If B=None, then the near nullspace vector used is all ones.  If B is
        an (NxK) array, then B is taken to be the near nullspace vectors.
    epsilon : scalar
        Drop tolerance
    k : integer
        ODE num time steps, step size is assumed to be 1/rho(DinvA)
    proj_type : {'l2','D_A'}
        Define norm for constrained min prob, i.e. define projection
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the D-inverse 
        used to precondition A before "evolving" delta-functions.  The
        local option is the cheapest.

    Returns
    -------
    Atilde : {csr_matrix}
        Sparse matrix of strength values

    References
    ----------
    .. [1] Olson, L. N., Schroder, J., Tuminaro, R. S.,
       "A New Perspective on Strength Measures in Algebraic Multigrid",
       submitted, June, 2008.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import evolution_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = evolution_strength_of_connection(A,  np.ones((A.shape[0],1)))
    """
    # local imports for evolution_strength_of_connection
    from pyamg.util.utils import scale_rows, get_block_diag, scale_columns
    from pyamg.util.linalg import approximate_spectral_radius

    # ====================================================================
    # Check inputs
    if epsilon < 1.0:
        raise ValueError("expected epsilon > 1.0")
    if k <= 0:
        raise ValueError("number of time steps must be > 0")
    if proj_type not in ['l2', 'D_A']:
        raise ValueError("proj_type must be 'l2' or 'D_A'")
    if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
        raise TypeError("expected csr_matrix or bsr_matrix")

    # ====================================================================
    # Format A and B correctly.
    # B must be in mat format, this isn't a deep copy
    if B is None:
        Bmat = np.mat(np.ones((A.shape[0], 1), dtype=A.dtype))
    else:
        Bmat = np.mat(B)

    # Is matrix A CSR?
    if (not sparse.isspmatrix_csr(A)):
        numPDEs = A.blocksize[0]
        csrflag = False
    else:
        numPDEs = 1
        csrflag = True

    # Pre-process A.  We need A in CSR, to be devoid of explicit 0's, have
    # sorted indices and be scaled by D-inverse
    if weighting == 'block':
        Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
        Dinv = sparse.bsr_matrix(
            (Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0] + 1)),
            shape=A.shape)
        Dinv_A = (Dinv * A).tocsr()
        cost[0] += 1
    elif weighting == 'diagonal':
        D = A.diagonal()
        Dinv = get_diagonal(A, norm_eq=False, inv=True)
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)
        cost[0] += 1
    elif weighting == 'local':
        D = np.abs(A) * np.ones((A.shape[0], 1), dtype=A.dtype)
        Dinv = np.zeros_like(D)
        Dinv[D != 0] = 1.0 / np.abs(D[D != 0])
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)
        cost[0] += 1
    else:
        raise ValueError('Unrecognized weighting for Evolution measure')

    A = A.tocsr()
    A.eliminate_zeros()
    A.sort_indices()

    # Handle preliminaries for the algorithm
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    if weighting == 'diagonal' or weighting == 'block':
        # Get spectral radius of Dinv*A, scales the time step size for the ODE
        rho_DinvA = approximate_spectral_radius(Dinv_A)
        cost[0] += 15  # 15 lanczos iterations to approximate spectral radius
    else:
        # Using local weighting, no need for spectral radius
        rho_DinvA = 1.0

    # Calculate D_A for later use in the minimization problem
    if proj_type == "D_A":
        D = A.diagonal()
        D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr')
    else:
        D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)

    # Calculate (I - delta_t Dinv A)^k
    # We transpose the product, so that we can efficiently access
    # the columns in CSR format.  We want the columns (not rows) because
    # strength is based on the columns of (I - delta_t Dinv A)^k, i.e.,
    # relaxed delta functions

    # Calculate the number of time steps that can be done by squaring, and
    # the number of time steps that must be done incrementally
    nsquare = int(np.log2(k))
    ninc = k - 2**nsquare

    # Calculate one time step
    I = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
    Atilde = (I - (1.0 / rho_DinvA) * Dinv_A)
    Atilde = Atilde.T.tocsr()
    cost[0] += 1

    # Construct a sparsity mask for Atilde that will restrict Atilde^T to the
    # nonzero pattern of A, with the added constraint that row i of Atilde^T
    # retains only the nonzeros that are also in the same PDE as i.
    mask = A.copy()

    # Restrict to same PDE
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(np.arange(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # If the total number of time steps is a power of two, then there is
    # a very efficient computational short-cut.  Otherwise, we support
    # other numbers of time steps, through an inefficient algorithm.
    if ninc > 0:
        warn("The most efficient time stepping for the Evolution Strength\
             Method is done in powers of two.\nYou have chosen " + str(k) +
             " time steps.")

        JacobiStep = csr_matrix(Atilde, copy=True)
        # Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare
        for i in range(nsquare):
            cost[0] += mat_mat_complexity(Atilde, Atilde)
            Atilde = Atilde * Atilde

        for i in range(ninc):
            cost[0] += mat_mat_complexity(Atilde, JacobiStep)
            Atilde = Atilde * JacobiStep

        del JacobiStep

        # Apply mask to Atilde, zeros in mask have already been eliminated at
        # start of routine.
        mask.data[:] = 1.0
        Atilde = Atilde.multiply(mask)
        Atilde.eliminate_zeros()
        Atilde.sort_indices()
        cost[0] += Atilde.nnz / float(A.nnz)

    elif nsquare == 0:
        if numPDEs > 1:
            # Apply mask to Atilde, zeros in mask have already been eliminated
            # at start of routine.
            mask.data[:] = 1.0
            Atilde = Atilde.multiply(mask)
            Atilde.eliminate_zeros()
            Atilde.sort_indices()

    else:
        # Use computational short-cut for case (ninc == 0) and (nsquare > 0)
        # Calculate Atilde^k only at the sparsity pattern of mask.
        for i in range(nsquare - 1):
            cost[0] += mat_mat_complexity(Atilde, Atilde)
            Atilde = Atilde * Atilde

        # Call incomplete mat-mat mult
        AtildeCSC = Atilde.tocsc()
        AtildeCSC.sort_indices()
        mask.sort_indices()
        Atilde.sort_indices()
        amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices,
                                         Atilde.data, AtildeCSC.indptr,
                                         AtildeCSC.indices, AtildeCSC.data,
                                         mask.indptr, mask.indices, mask.data,
                                         dimen)
        cost[0] += mat_mat_complexity(Atilde, mask, incomplete=True) / float(
            A.nnz)

        del AtildeCSC, Atilde
        Atilde = mask
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    del Dinv, Dinv_A, mask

    # Calculate strength based on constrained min problem of
    # min( z - B*x ), such that
    # (B*x)|_i = z|_i, i.e. they are equal at point i
    # z = (I - (t/k) Dinv A)^k delta_i
    #
    # Strength is defined as the relative point-wise approx. error between
    # B*x and z.  We don't use the full z in this problem, only that part of
    # z that is in the sparsity pattern of A.
    #
    # Can use either the D-norm, and inner product, or l2-norm and inner-prod
    # to solve the constrained min problem.  Using D gives scale invariance.
    #
    # This is a quadratic minimization problem with a linear constraint, so
    # we can build a linear system and solve it to find the critical point,
    # i.e. minimum.
    #
    # We exploit a known shortcut for the case of NullDim = 1.  The shortcut is
    # mathematically equivalent to the longer constrained min. problem

    if NullDim == 1:
        # Use shortcut to solve constrained min problem if B is only a vector
        # Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) |
        # These ratios can be calculated by diagonal row and column scalings

        # Create necessary vectors for scaling Atilde
        #   Its not clear what to do where B == 0.  This is an
        #   an easy programming solution, that may make sense.
        Bmat_forscaling = np.ravel(Bmat)
        Bmat_forscaling[Bmat_forscaling == 0] = 1.0
        DAtilde = Atilde.diagonal()
        DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling
        cost[0] += Atilde.shape[0] / float(A.nnz)

        # Calculate best approximation, z_tilde, in span(B)
        #   Importantly, scale_rows and scale_columns leave zero entries
        #   in the matrix.  For previous implementations this was useful
        #   because we assume data and Atilde.data are the same length below
        data = Atilde.data.copy()
        Atilde.data[:] = 1.0
        Atilde = scale_rows(Atilde, DAtildeDivB)
        Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling))
        cost[0] += 2.0 * Atilde.nnz / float(A.nnz)

        # If angle in the complex plane between z and z_tilde is
        # greater than 90 degrees, then weak.  We can just look at the
        # dot product to determine if angle is greater than 90 degrees.
        angle = np.real(Atilde.data) * np.real(data) +\
            np.imag(Atilde.data) * np.imag(data)
        angle = angle < 0.0
        angle = np.array(angle, dtype=bool)
        cost[0] += Atilde.nnz / float(A.nnz)
        if Atilde.dtype is 'complex':
            cost[0] += Atilde.nnz / float(A.nnz)

        # Calculate Approximation ratio
        Atilde.data = Atilde.data / data
        cost[0] += Atilde.nnz / float(A.nnz)

        # If approximation ratio is less than tol, then weak connection
        weak_ratio = (np.abs(Atilde.data) < 1e-4)

        # Calculate Approximation error
        Atilde.data = abs(1.0 - Atilde.data)
        cost[0] += Atilde.nnz / float(A.nnz)

        # Set small ratios and large angles to weak
        Atilde.data[weak_ratio] = 0.0
        Atilde.data[angle] = 0.0

        # Set near perfect connections to 1e-4
        Atilde.eliminate_zeros()
        Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4

        del data, weak_ratio, angle

    else:
        # For use in computing local B_i^H*B, precompute the element-wise
        # multiply of each column of B with each other column.  We also scale
        # by 2.0 to account for BDB's eventual use in a constrained
        # minimization problem
        BDBCols = int(np.sum(np.arange(NullDim + 1)))
        BDB = np.zeros((dimen, BDBCols), dtype=A.dtype)
        counter = 0
        for i in range(NullDim):
            for j in range(i, NullDim):
                BDB[:, counter] = 2.0 *\
                    (np.conjugate(np.ravel(np.asarray(B[:, i]))) *
                        np.ravel(np.asarray(D_A * B[:, j])))
                counter = counter + 1
                cost[0] += B.shape[0] / float(A.nnz)

        # Choose tolerance for dropping "numerically zero" values later
        t = Atilde.dtype.char
        eps = np.finfo(np.float).eps
        feps = np.finfo(np.single).eps
        geps = np.finfo(np.longfloat).eps
        _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
        tol = {0: feps * 1e3, 1: eps * 1e6, 2: geps * 1e6}[_array_precision[t]]

        # Use constrained min problem to define strength.
        # This function is doing similar to NullDim=1 with more bad guys.
        # Complexity accounts for computing the block inverse, and
        #   hat{z_i} = B_i*x, hat{z_i} .* hat{z_i},
        #   hat{z_i} = hat{z_i} / z_i, and abs(1.0 - hat{z_i}).
        cost[0] += (Atilde.nnz * (3 + NullDim) +
                    (NullDim**3) * dimen) / float(A.nnz)
        amg_core.evolution_strength_helper(
            Atilde.data, Atilde.indptr, Atilde.indices, Atilde.shape[0],
            np.ravel(np.asarray(B)),
            np.ravel(np.asarray((D_A * np.conjugate(B)).T)),
            np.ravel(np.asarray(BDB)), BDBCols, NullDim, tol)

        Atilde.eliminate_zeros()

    # All of the strength values are real by this point, so ditch the complex
    # part
    Atilde.data = np.array(np.real(Atilde.data), dtype=float)

    # Apply drop tolerance
    if epsilon != np.inf:
        cost[0] += Atilde.nnz / float(A.nnz)
        amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr,
                                       Atilde.indices, Atilde.data)
        Atilde.eliminate_zeros()

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    I = sparse.eye(dimen, dimen, format="csr")
    I.data -= Atilde.diagonal()
    Atilde = Atilde + I
    cost[0] += Atilde.shape[0] / float(A.nnz)

    # If converted BSR to CSR, convert back and return amalgamated matrix,
    #   i.e. the sparsity structure of the blocks of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        n_blocks = Atilde.indices.shape[0]
        blocksize = Atilde.blocksize[0] * Atilde.blocksize[1]
        CSRdata = np.zeros((n_blocks, ))
        amg_core.min_blocks(n_blocks, blocksize,
                            np.ravel(np.asarray(Atilde.data)), CSRdata)
        # Atilde = sparse.csr_matrix((data, row, col), shape=(*,*))
        Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr),
                                   shape=(int(Atilde.shape[0] / numPDEs),
                                          int(Atilde.shape[1] / numPDEs)))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0 / Atilde.data
    cost[0] += Atilde.nnz / float(A.nnz)

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)
    cost[0] += Atilde.nnz / float(A.nnz)

    # Symmetrize
    if symmetrize_measure:
        Atilde = 0.5 * (Atilde + Atilde.T)
        cost[0] += Atilde.nnz / float(A.nnz)

    return Atilde
Ejemplo n.º 19
0
def FC_block_jacobi(A, x, b, Cpts, Fpts, Dinv=None, blocksize=1, iterations=1,
                    F_iterations=1, C_iterations=1, omega=1.0):
    """Perform FC block Jacobi iteration on the linear system Ax=b, that is

        x_f = (1-omega)x_f + omega*Dff^{-1}(b_f - Aff*xf - Afc*xc)
        x_c = (1-omega)x_c + omega*Dff^{-1}(b_c - Acf*xf - Acc*xc)

    where xf is x restricted to F-blocks, and Dff^{-1} the block inverse
    of the block diagonal Dff, and likewise for c subscripts.

    Parameters
    ----------
    A : csr_matrix or bsr_matrix
        Sparse NxN matrix
    x : ndarray
        Approximate solution (length N)
    b : ndarray
        Right-hand side (length N)
    Cpts : array ints
        List of C-blocks in A
    Fpts : array ints
        List of F-blocks in A
    Dinv : array
        Array holding block diagonal inverses of A
        size (N/blocksize, blocksize, blocksize)
    blocksize : int
        Desired dimension of blocks
    iterations : int
        Number of iterations to perform of total FC-cycle
    F_iterations : int
        Number of sweeps of F-relaxation to perform
    C_iterations : int
        Number of sweeps of C-relaxation to perform
    omega : scalar
        Damping parameter

    Returns
    -------
    Nothing, x will be modified in place.

    """
    A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
    A = A.tobsr(blocksize=(blocksize, blocksize))

    if Dinv is None:
        Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True)
    elif Dinv.shape[0] != int(A.shape[0]/blocksize):
        raise ValueError('Dinv and A have incompatible dimensions')
    elif (Dinv.shape[1] != blocksize) or (Dinv.shape[2] != blocksize):
        raise ValueError('Dinv and blocksize are incompatible')

    # Create uniform type, convert possibly complex scalars to length 1 arrays
    [omega] = type_prep(A.dtype, [omega])

    # Perform block C-relaxation then block F-relaxation
    for iter in range(iterations):
        for Fiter in range(F_iterations):
            amg_core.block_jacobi_indexed(A.indptr, A.indices, np.ravel(A.data),
                                          x, b, np.ravel(Dinv), Fpts, omega,
                                          blocksize)
        for Citer in range(C_iterations):
            amg_core.block_jacobi_indexed(A.indptr, A.indices, np.ravel(A.data),
                                          x, b, np.ravel(Dinv), Cpts, omega,
                                          blocksize)