def energy_prolongation_smoother(A, T, Atilde, B, Bf, Cpt_params, krylov='cg', maxiter=4, tol=1e-8, degree=1, weighting='local', prefilter={}, postfilter={}, cost=[0.0]): """Minimize the energy of the coarse basis functions (columns of T). Both root-node and non-root-node style prolongation smoothing is available, see Cpt_params description below. Parameters ---------- A : {csr_matrix, bsr_matrix} Sparse NxN matrix T : {bsr_matrix} Tentative prolongator, a NxM sparse matrix (M < N) Atilde : {csr_matrix} Strength of connection matrix B : {array} Near-nullspace modes for coarse grid. Has shape (M,k) where k is the number of coarse candidate vectors. Bf : {array} Near-nullspace modes for fine grid. Has shape (N,k) where k is the number of coarse candidate vectors. Cpt_params : {tuple} Tuple of the form (bool, dict). If the Cpt_params[0] = False, then the standard SA prolongation smoothing is carried out. If True, then root-node style prolongation smoothing is carried out. The dict must be a dictionary of parameters containing, (1) P_I: P_I.T is the injection matrix for the Cpts, (2) I_F: an identity matrix for only the F-points (i.e. I, but with zero rows and columns for C-points) and I_C: the C-point analogue to I_F. See Notes below for more information. krylov : {string} 'cg' : for SPD systems. Solve A T = 0 in a constraint space with CG 'cgnr' : for nonsymmetric and/or indefinite systems. Solve A T = 0 in a constraint space with CGNR 'gmres' : for nonsymmetric and/or indefinite systems. Solve A T = 0 in a constraint space with GMRES maxiter : integer Number of energy minimization steps to apply to the prolongator tol : {scalar} Minimization tolerance degree : {int} Generate sparsity pattern for P based on (Atilde^degree T) weighting : {string} 'block', 'diagonal' or 'local' construction of the diagonal preconditioning 'local': Uses a local row-wise weight based on the Gershgorin estimate. Avoids any potential under-damping due to inaccurate spectral radius estimates. 'block': If A is a BSR matrix, use a block diagonal inverse of A 'diagonal': Use inverse of the diagonal of A prefilter : {dictionary} : Default {} Filters elements by row in sparsity pattern for P to reduce operator and setup complexity. If None or empty dictionary, no dropping in P is done. If postfilter has key 'k', then the largest 'k' entries are kept in each row. If postfilter has key 'theta', all entries such that P[i,j] < kwargs['theta']*max(abs(P[i,:])) are dropped. If postfilter['k'] and postfiler['theta'] are present, then they are used in conjunction, with the union of their patterns used. postfilter : {dictionary} : Default {} Filters elements by row in smoothed P to reduce operator complexity. Only supported if using the rootnode_solver. If None or empty dictionary, no dropping in P is done. If postfilter has key 'k', then the largest 'k' entries are kept in each row. If postfilter has key 'theta', all entries such that P[i,j] < kwargs['theta']*max(abs(P[i,:])) are dropped. If postfilter['k'] and postfiler['theta'] are present, then they are used in conjunction, with the union of their patterns used. Returns ------- T : {bsr_matrix} Smoothed prolongator Notes ----- Only 'diagonal' weighting is supported for the CGNR method, because we are working with A^* A and not A. When Cpt_params[0] == True, root-node style prolongation smoothing is used to minimize the energy of columns of T. Essentially, an identity block is maintained in T, corresponding to injection from the coarse-grid to the fine-grid root-nodes. See [2] for more details, and see util.utils.get_Cpt_params for the helper function to generate Cpt_params. If Cpt_params[0] == False, the energy of columns of T are still minimized, but without maintaining the identity block. Examples -------- >>> from pyamg.aggregation import energy_prolongation_smoother >>> from pyamg.gallery import poisson >>> from scipy.sparse import coo_matrix >>> import numpy as np >>> data = np.ones((6,)) >>> row = np.arange(0,6) >>> col = np.kron([0,1],np.ones((3,))) >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr() >>> print T.todense() [[ 1. 0.] [ 1. 0.] [ 1. 0.] [ 0. 1.] [ 0. 1.] [ 0. 1.]] >>> A = poisson((6,),format='csr') >>> B = np.ones((2,1),dtype=float) >>> P = energy_prolongation_smoother(A,T,A,B, None, (False,{})) >>> print P.todense() [[ 1. 0. ] [ 1. 0. ] [ 0.66666667 0.33333333] [ 0.33333333 0.66666667] [ 0. 1. ] [ 0. 1. ]] References ---------- .. [1] Jan Mandel, Marian Brezina, and Petr Vanek "Energy Optimization of Algebraic Multigrid Bases" Computing 62, 205-228, 1999 http://dx.doi.org/10.1007/s006070050022 .. [2] Olson, L. and Schroder, J. and Tuminaro, R., "A general interpolation strategy for algebraic multigrid using energy minimization", SIAM Journal on Scientific Computing (SISC), vol. 33, pp. 966--991, 2011. """ # Test Inputs if maxiter < 0: raise ValueError('maxiter must be > 0') if tol > 1: raise ValueError('tol must be <= 1') if sparse.isspmatrix_csr(A): A = A.tobsr(blocksize=(1, 1), copy=False) elif sparse.isspmatrix_bsr(A): pass else: raise TypeError("A must be csr_matrix or bsr_matrix") if sparse.isspmatrix_csr(T): T = T.tobsr(blocksize=(1, 1), copy=False) elif sparse.isspmatrix_bsr(T): pass else: raise TypeError("T must be csr_matrix or bsr_matrix") if T.blocksize[0] != A.blocksize[0]: raise ValueError("T row-blocksize should be the same as A blocksize") if B.shape[0] != T.shape[1]: raise ValueError("B is the candidates for the coarse grid. \ num_rows(b) = num_cols(T)") if min(T.nnz, A.nnz) == 0: return T if not sparse.isspmatrix_csr(Atilde): raise TypeError("Atilde must be csr_matrix") if ('theta' in prefilter) and (prefilter['theta'] == 0): prefilter.pop('theta', None) if ('theta' in postfilter) and (postfilter['theta'] == 0): postfilter.pop('theta', None) # Prepocess Atilde, the strength matrix if Atilde is None: Atilde = sparse.csr_matrix( (np.ones(len(A.indices)), A.indices.copy(), A.indptr.copy()), shape=(A.shape[0] / A.blocksize[0], A.shape[1] / A.blocksize[1])) # If Atilde has no nonzeros, then return T if min(T.nnz, A.nnz) == 0: return T # Expand allowed sparsity pattern for P through multiplication by Atilde if degree > 0: # Construct Sparsity_Pattern by multiplying with Atilde T.sort_indices() shape = (int(T.shape[0] / T.blocksize[0]), int(T.shape[1] / T.blocksize[1])) Sparsity_Pattern = sparse.csr_matrix( (np.ones(T.indices.shape), T.indices, T.indptr), shape=shape) AtildeCopy = Atilde.copy() for i in range(degree): cost[0] += mat_mat_complexity(AtildeCopy, Sparsity_Pattern) / float(A.nnz) Sparsity_Pattern = AtildeCopy * Sparsity_Pattern # Optional filtering of sparsity pattern before smoothing # - Complexity: two passes through T for theta-filter, a sort on # each row for k-filter, adding matrices if both. if 'theta' in prefilter and 'k' in prefilter: temp_cost = [0.0] Sparsity_theta = filter_matrix_rows(Sparsity_Pattern, prefilter['theta'], cost=temp_cost) Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k'], cost=temp_cost) cost[0] += temp_cost[0] / float(A.nnz) # Union two sparsity patterns Sparsity_Pattern += Sparsity_theta cost[0] += Sparsity_Pattern.nnz / float(A.nnz) elif 'k' in prefilter: temp_cost = [0.0] Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k'], cost=temp_cost) cost[0] += temp_cost[0] / float(A.nnz) elif 'theta' in prefilter: temp_cost = [0.0] Sparsity_Pattern = filter_matrix_rows(Sparsity_Pattern, prefilter['theta'], cost=temp_cost) cost[0] += temp_cost[0] / float(A.nnz) elif len(prefilter) > 0: raise ValueError("Unrecognized prefilter option") # UnAmal returns a BSR matrix with 1's in the nonzero locations Sparsity_Pattern = UnAmal(Sparsity_Pattern, T.blocksize[0], T.blocksize[1]) Sparsity_Pattern.sort_indices() else: # If degree is 0, just copy T for the sparsity pattern Sparsity_Pattern = T.copy() if 'theta' in prefilter and 'k' in prefilter: temp_cost = [0.0] Sparsity_theta = filter_matrix_rows(Sparsity_Pattern, prefilter['theta'], cost=temp_cost) Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k'], cost=temp_cost) cost[0] += temp_cost[0] / float(A.nnz) # Union two sparsity patterns Sparsity_Pattern += Sparsity_theta cost[0] += Sparsity_Pattern.nnz / float(A.nnz) elif 'k' in prefilter: temp_cost = [0.0] Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k'], cost=temp_cost) cost[0] += temp_cost[0] / float(A.nnz) elif 'theta' in prefilter: temp_cost = [0.0] Sparsity_Pattern = filter_matrix_rows(Sparsity_Pattern, prefilter['theta'], cost=temp_cost) cost[0] += temp_cost[0] / float(A.nnz) elif len(prefilter) > 0: raise ValueError("Unrecognized prefilter option") Sparsity_Pattern.data[:] = 1.0 Sparsity_Pattern.sort_indices() # If using root nodes, enforce identity at C-points if Cpt_params[0]: Sparsity_Pattern = Cpt_params[1]['I_F'] * Sparsity_Pattern Sparsity_Pattern = Cpt_params[1]['P_I'] + Sparsity_Pattern cost[0] += Sparsity_Pattern.nnz / float(A.nnz) # Construct array of inv(Bi'Bi), where Bi is B restricted to row i's # sparsity pattern in Sparsity Pattern. This array is used multiple times # in Satisfy_Constraints(...). temp_cost = [0.0] BtBinv = compute_BtBinv(B, Sparsity_Pattern, cost=temp_cost) cost[0] += temp_cost[0] / float(A.nnz) # If using root nodes and B has more columns that A's blocksize, then # T must be updated so that T*B = Bfine. Note, if this is a 'secondpass' # after dropping entries in P, then we must re-enforce the constraints if (Cpt_params[0] and (B.shape[1] > A.blocksize[0])) or ('secondpass' in postfilter): temp_cost = [0.0] T = filter_operator(T, Sparsity_Pattern, B, Bf, BtBinv, cost=temp_cost) cost[0] += temp_cost[0] / float(A.nnz) # Ensure identity at C-pts if Cpt_params[0]: T = Cpt_params[1]['I_F'] * T + Cpt_params[1]['P_I'] # Iteratively minimize the energy of T subject to the constraints of # Sparsity_Pattern and maintaining T's effect on B, i.e. T*B = # (T+Update)*B, i.e. Update*B = 0 if krylov == 'cg': T = cg_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol, weighting, Cpt_params, cost) elif krylov == 'cgnr': T = cgnr_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol, weighting, Cpt_params, cost) elif krylov == 'gmres': T = gmres_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol, weighting, Cpt_params, cost) T.eliminate_zeros() # Filter entries in P, only in the rootnode case, i.e., Cpt_params[0] == True if (len(postfilter) == 0) or ('secondpass' in postfilter) or (Cpt_params[0] is False): return T else: if 'theta' in postfilter and 'k' in postfilter: temp_cost = [0.0] T_theta = filter_matrix_rows(T, postfilter['theta'], cost=temp_cost) T_k = truncate_rows(T, postfilter['k'], cost=temp_cost) cost[0] += temp_cost[0] / float(A.nnz) # Union two sparsity patterns T_theta.data[:] = 1.0 T_k.data[:] = 1.0 T_filter = T_theta + T_k T_filter.data[:] = 1.0 T_filter = T.multiply(T_filter) elif 'k' in postfilter: temp_cost = [0.0] T_filter = truncate_rows(T, postfilter['k'], cost=temp_cost) cost[0] += temp_cost[0] / float(A.nnz) elif 'theta' in postfilter: temp_cost = [0.0] T_filter = filter_matrix_rows(T, postfilter['theta'], cost=temp_cost) cost[0] += temp_cost[0] / float(A.nnz) else: raise ValueError("Unrecognized postfilter option") # Re-smooth T_filter and re-fit the modes B into the span. # Note, we set 'secondpass', because this is the second # filtering pass T = energy_prolongation_smoother(A, T_filter, Atilde, B, Bf, Cpt_params, krylov=krylov, maxiter=1, tol=1e-8, degree=0, weighting=weighting, prefilter={}, postfilter={'secondpass': True}, cost=cost) return T
def energy_prolongation_smoother(A, T, Atilde, B, Bf, Cpt_params, krylov='cg', maxiter=4, tol=1e-8, degree=1, weighting='local', prefilter={}, postfilter={}): """Minimize the energy of the coarse basis functions (columns of T). Both root-node and non-root-node style prolongation smoothing is available, see Cpt_params description below. Parameters ---------- A : {csr_matrix, bsr_matrix} Sparse NxN matrix T : {bsr_matrix} Tentative prolongator, a NxM sparse matrix (M < N) Atilde : {csr_matrix} Strength of connection matrix B : {array} Near-nullspace modes for coarse grid. Has shape (M,k) where k is the number of coarse candidate vectors. Bf : {array} Near-nullspace modes for fine grid. Has shape (N,k) where k is the number of coarse candidate vectors. Cpt_params : {tuple} Tuple of the form (bool, dict). If the Cpt_params[0] = False, then the standard SA prolongation smoothing is carried out. If True, then root-node style prolongation smoothing is carried out. The dict must be a dictionary of parameters containing, (1) P_I: P_I.T is the injection matrix for the Cpts, (2) I_F: an identity matrix for only the F-points (i.e. I, but with zero rows and columns for C-points) and I_C: the C-point analogue to I_F. See Notes below for more information. krylov : {string} 'cg' : for SPD systems. Solve A T = 0 in a constraint space with CG 'cgnr' : for nonsymmetric and/or indefinite systems. Solve A T = 0 in a constraint space with CGNR 'gmres' : for nonsymmetric and/or indefinite systems. Solve A T = 0 in a constraint space with GMRES maxiter : integer Number of energy minimization steps to apply to the prolongator tol : {scalar} Minimization tolerance degree : {int} Generate sparsity pattern for P based on (Atilde^degree T) weighting : {string} 'block', 'diagonal' or 'local' construction of the diagonal preconditioning 'local': Uses a local row-wise weight based on the Gershgorin estimate. Avoids any potential under-damping due to inaccurate spectral radius estimates. 'block': If A is a BSR matrix, use a block diagonal inverse of A 'diagonal': Use inverse of the diagonal of A prefilter : {dictionary} : Default {} Filters elements by row in sparsity pattern for P to reduce operator and setup complexity. If None or empty dictionary, no dropping in P is done. If postfilter has key 'k', then the largest 'k' entries are kept in each row. If postfilter has key 'theta', all entries such that P[i,j] < kwargs['theta']*max(abs(P[i,:])) are dropped. If postfilter['k'] and postfiler['theta'] are present, then they are used in conjunction, with the union of their patterns used. postfilter : {dictionary} : Default {} Filters elements by row in smoothed P to reduce operator complexity. Only supported if using the rootnode_solver. If None or empty dictionary, no dropping in P is done. If postfilter has key 'k', then the largest 'k' entries are kept in each row. If postfilter has key 'theta', all entries such that P[i,j] < kwargs['theta']*max(abs(P[i,:])) are dropped. If postfilter['k'] and postfiler['theta'] are present, then they are used in conjunction, with the union of their patterns used. Returns ------- T : {bsr_matrix} Smoothed prolongator Notes ----- Only 'diagonal' weighting is supported for the CGNR method, because we are working with A^* A and not A. When Cpt_params[0] == True, root-node style prolongation smoothing is used to minimize the energy of columns of T. Essentially, an identity block is maintained in T, corresponding to injection from the coarse-grid to the fine-grid root-nodes. See [2] for more details, and see util.utils.get_Cpt_params for the helper function to generate Cpt_params. If Cpt_params[0] == False, the energy of columns of T are still minimized, but without maintaining the identity block. Examples -------- >>> from pyamg.aggregation import energy_prolongation_smoother >>> from pyamg.gallery import poisson >>> from scipy.sparse import coo_matrix >>> import numpy as np >>> data = np.ones((6,)) >>> row = np.arange(0,6) >>> col = np.kron([0,1],np.ones((3,))) >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr() >>> print T.todense() [[ 1. 0.] [ 1. 0.] [ 1. 0.] [ 0. 1.] [ 0. 1.] [ 0. 1.]] >>> A = poisson((6,),format='csr') >>> B = np.ones((2,1),dtype=float) >>> P = energy_prolongation_smoother(A,T,A,B, None, (False,{})) >>> print P.todense() [[ 1. 0. ] [ 1. 0. ] [ 0.66666667 0.33333333] [ 0.33333333 0.66666667] [ 0. 1. ] [ 0. 1. ]] References ---------- .. [1] Jan Mandel, Marian Brezina, and Petr Vanek "Energy Optimization of Algebraic Multigrid Bases" Computing 62, 205-228, 1999 http://dx.doi.org/10.1007/s006070050022 .. [2] Olson, L. and Schroder, J. and Tuminaro, R., "A general interpolation strategy for algebraic multigrid using energy minimization", SIAM Journal on Scientific Computing (SISC), vol. 33, pp. 966--991, 2011. """ # Test Inputs if maxiter < 0: raise ValueError('maxiter must be > 0') if tol > 1: raise ValueError('tol must be <= 1') if sparse.isspmatrix_csr(A): A = A.tobsr(blocksize=(1, 1), copy=False) elif sparse.isspmatrix_bsr(A): pass else: raise TypeError("A must be csr_matrix or bsr_matrix") if sparse.isspmatrix_csr(T): T = T.tobsr(blocksize=(1, 1), copy=False) elif sparse.isspmatrix_bsr(T): pass else: raise TypeError("T must be csr_matrix or bsr_matrix") if T.blocksize[0] != A.blocksize[0]: raise ValueError("T row-blocksize should be the same as A blocksize") if B.shape[0] != T.shape[1]: raise ValueError("B is the candidates for the coarse grid. \ num_rows(b) = num_cols(T)") if min(T.nnz, A.nnz) == 0: return T if not sparse.isspmatrix_csr(Atilde): raise TypeError("Atilde must be csr_matrix") if ('theta' in prefilter) and (prefilter['theta'] == 0): prefilter.pop('theta', None) if ('theta' in postfilter) and (postfilter['theta'] == 0): postfilter.pop('theta', None) # Prepocess Atilde, the strength matrix if Atilde is None: Atilde = sparse.csr_matrix((np.ones(len(A.indices)), A.indices.copy(), A.indptr.copy()), shape=(A.shape[0]/A.blocksize[0], A.shape[1]/A.blocksize[1])) # If Atilde has no nonzeros, then return T if min(T.nnz, A.nnz) == 0: return T # Expand allowed sparsity pattern for P through multiplication by Atilde if degree > 0: # Construct Sparsity_Pattern by multiplying with Atilde T.sort_indices() shape = (int(T.shape[0]/T.blocksize[0]), int(T.shape[1]/T.blocksize[1])) Sparsity_Pattern = sparse.csr_matrix((np.ones(T.indices.shape), T.indices, T.indptr), shape=shape) AtildeCopy = Atilde.copy() AtildeCopy.data[:] = 1.0 for i in range(degree): Sparsity_Pattern = AtildeCopy*Sparsity_Pattern # Optional filtering of sparsity pattern before smoothing if 'theta' in prefilter and 'k' in prefilter: Sparsity_theta = filter_matrix_rows(Sparsity_Pattern, prefilter['theta']) Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k']) # Union two sparsity patterns Sparsity_Pattern += Sparsity_theta elif 'k' in prefilter: Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k']) elif 'theta' in prefilter: Sparsity_Pattern = filter_matrix_rows(Sparsity_Pattern, prefilter['theta']) elif len(prefilter) > 0: raise ValueError("Unrecognized prefilter option") # UnAmal returns a BSR matrix with 1's in the nonzero locations Sparsity_Pattern = UnAmal(Sparsity_Pattern, T.blocksize[0], T.blocksize[1]) Sparsity_Pattern.sort_indices() else: # If degree is 0, just copy T for the sparsity pattern Sparsity_Pattern = T.copy() if 'theta' in prefilter and 'k' in prefilter: Sparsity_theta = filter_matrix_rows(Sparsity_Pattern, prefilter['theta']) Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k']) # Union two sparsity patterns Sparsity_Pattern += Sparsity_theta elif 'k' in prefilter: Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k']) elif 'theta' in prefilter: Sparsity_Pattern = filter_matrix_rows(Sparsity_Pattern, prefilter['theta']) elif len(prefilter) > 0: raise ValueError("Unrecognized prefilter option") Sparsity_Pattern.data[:] = 1.0 Sparsity_Pattern.sort_indices() # If using root nodes, enforce identity at C-points if Cpt_params[0]: Sparsity_Pattern = Cpt_params[1]['I_F'] * Sparsity_Pattern Sparsity_Pattern = Cpt_params[1]['P_I'] + Sparsity_Pattern # Construct array of inv(Bi'Bi), where Bi is B restricted to row i's # sparsity pattern in Sparsity Pattern. This array is used multiple times # in Satisfy_Constraints(...). BtBinv = compute_BtBinv(B, Sparsity_Pattern) # If using root nodes and B has more columns that A's blocksize, then # T must be updated so that T*B = Bfine. Note, if this is a 'secondpass' # after dropping entries in P, then we must re-enforce the constraints if (Cpt_params[0] and (B.shape[1] > A.blocksize[0])) or ('secondpass' in postfilter): T = filter_operator(T, Sparsity_Pattern, B, Bf, BtBinv) # Ensure identity at C-pts if Cpt_params[0]: T = Cpt_params[1]['I_F']*T + Cpt_params[1]['P_I'] # Iteratively minimize the energy of T subject to the constraints of # Sparsity_Pattern and maintaining T's effect on B, i.e. T*B = # (T+Update)*B, i.e. Update*B = 0 if krylov == 'cg': T = cg_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol, weighting, Cpt_params) elif krylov == 'cgnr': T = cgnr_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol, weighting, Cpt_params) elif krylov == 'gmres': T = gmres_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol, weighting, Cpt_params) T.eliminate_zeros() # Filter entries in P, only in the rootnode case, i.e., Cpt_params[0] == True if (len(postfilter) == 0) or ('secondpass' in postfilter) or (Cpt_params[0] is False): return T else: if 'theta' in postfilter and 'k' in postfilter: T_theta = filter_matrix_rows(T, postfilter['theta']) T_k = truncate_rows(T, postfilter['k']) # Union two sparsity patterns T_theta.data[:] = 1.0 T_k.data[:] = 1.0 T_filter = T_theta + T_k T_filter.data[:] = 1.0 T_filter = T.multiply(T_filter) elif 'k' in postfilter: T_filter = truncate_rows(T, postfilter['k']) elif 'theta' in postfilter: T_filter = filter_matrix_rows(T, postfilter['theta']) else: raise ValueError("Unrecognized postfilter option") # Re-smooth T_filter and re-fit the modes B into the span. # Note, we set 'secondpass', because this is the second # filtering pass T = energy_prolongation_smoother(A, T_filter, Atilde, B, Bf, Cpt_params, krylov=krylov, maxiter=1, tol=1e-8, degree=0, weighting=weighting, prefilter={}, postfilter={'secondpass' : True} ) return T