def evolution_strength_of_connection(A, B=None, epsilon=4.0, k=2, proj_type="l2", weighting='diagonal', symmetrize_measure=True, cost=[0]): """ Construct strength of connection matrix using an Evolution-based measure Parameters ---------- A : {csr_matrix, bsr_matrix} Sparse NxN matrix B : {string, array} If B=None, then the near nullspace vector used is all ones. If B is an (NxK) array, then B is taken to be the near nullspace vectors. epsilon : scalar Drop tolerance k : integer ODE num time steps, step size is assumed to be 1/rho(DinvA) proj_type : {'l2','D_A'} Define norm for constrained min prob, i.e. define projection weighting : {string} 'block', 'diagonal' or 'local' construction of the D-inverse used to precondition A before "evolving" delta-functions. The local option is the cheapest. Returns ------- Atilde : {csr_matrix} Sparse matrix of strength values References ---------- .. [1] Olson, L. N., Schroder, J., Tuminaro, R. S., "A New Perspective on Strength Measures in Algebraic Multigrid", submitted, June, 2008. Examples -------- >>> import numpy as np >>> from pyamg.gallery import stencil_grid >>> from pyamg.strength import evolution_strength_of_connection >>> n=3 >>> stencil = np.array([[-1.0,-1.0,-1.0], ... [-1.0, 8.0,-1.0], ... [-1.0,-1.0,-1.0]]) >>> A = stencil_grid(stencil, (n,n), format='csr') >>> S = evolution_strength_of_connection(A, np.ones((A.shape[0],1))) """ # local imports for evolution_strength_of_connection from pyamg.util.utils import scale_rows, get_block_diag, scale_columns from pyamg.util.linalg import approximate_spectral_radius # ==================================================================== # Check inputs if epsilon < 1.0: raise ValueError("expected epsilon > 1.0") if k <= 0: raise ValueError("number of time steps must be > 0") if proj_type not in ['l2', 'D_A']: raise ValueError("proj_type must be 'l2' or 'D_A'") if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)): raise TypeError("expected csr_matrix or bsr_matrix") # ==================================================================== # Format A and B correctly. # B must be in mat format, this isn't a deep copy if B is None: Bmat = np.mat(np.ones((A.shape[0], 1), dtype=A.dtype)) else: Bmat = np.mat(B) # Is matrix A CSR? if (not sparse.isspmatrix_csr(A)): numPDEs = A.blocksize[0] csrflag = False else: numPDEs = 1 csrflag = True # Pre-process A. We need A in CSR, to be devoid of explicit 0's, have # sorted indices and be scaled by D-inverse if weighting == 'block': Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True) Dinv = sparse.bsr_matrix( (Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0] + 1)), shape=A.shape) Dinv_A = (Dinv * A).tocsr() cost[0] += 1 elif weighting == 'diagonal': D = A.diagonal() Dinv = get_diagonal(A, norm_eq=False, inv=True) Dinv[D == 0] = 1.0 Dinv_A = scale_rows(A, Dinv, copy=True) cost[0] += 1 elif weighting == 'local': D = np.abs(A) * np.ones((A.shape[0], 1), dtype=A.dtype) Dinv = np.zeros_like(D) Dinv[D != 0] = 1.0 / np.abs(D[D != 0]) Dinv[D == 0] = 1.0 Dinv_A = scale_rows(A, Dinv, copy=True) cost[0] += 1 else: raise ValueError('Unrecognized weighting for Evolution measure') A = A.tocsr() A.eliminate_zeros() A.sort_indices() # Handle preliminaries for the algorithm dimen = A.shape[1] NullDim = Bmat.shape[1] if weighting == 'diagonal' or weighting == 'block': # Get spectral radius of Dinv*A, scales the time step size for the ODE rho_DinvA = approximate_spectral_radius(Dinv_A) cost[0] += 15 # 15 lanczos iterations to approximate spectral radius else: # Using local weighting, no need for spectral radius rho_DinvA = 1.0 # Calculate D_A for later use in the minimization problem if proj_type == "D_A": D = A.diagonal() D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr') else: D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype) # Calculate (I - delta_t Dinv A)^k # We transpose the product, so that we can efficiently access # the columns in CSR format. We want the columns (not rows) because # strength is based on the columns of (I - delta_t Dinv A)^k, i.e., # relaxed delta functions # Calculate the number of time steps that can be done by squaring, and # the number of time steps that must be done incrementally nsquare = int(np.log2(k)) ninc = k - 2**nsquare # Calculate one time step I = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype) Atilde = (I - (1.0 / rho_DinvA) * Dinv_A) Atilde = Atilde.T.tocsr() cost[0] += 1 # Construct a sparsity mask for Atilde that will restrict Atilde^T to the # nonzero pattern of A, with the added constraint that row i of Atilde^T # retains only the nonzeros that are also in the same PDE as i. mask = A.copy() # Restrict to same PDE if numPDEs > 1: row_length = np.diff(mask.indptr) my_pde = np.mod(np.arange(dimen), numPDEs) my_pde = np.repeat(my_pde, row_length) mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0 del row_length, my_pde mask.eliminate_zeros() # If the total number of time steps is a power of two, then there is # a very efficient computational short-cut. Otherwise, we support # other numbers of time steps, through an inefficient algorithm. if ninc > 0: warn("The most efficient time stepping for the Evolution Strength\ Method is done in powers of two.\nYou have chosen " + str(k) + " time steps.") JacobiStep = csr_matrix(Atilde, copy=True) # Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare for i in range(nsquare): cost[0] += mat_mat_complexity(Atilde, Atilde) Atilde = Atilde * Atilde for i in range(ninc): cost[0] += mat_mat_complexity(Atilde, JacobiStep) Atilde = Atilde * JacobiStep del JacobiStep # Apply mask to Atilde, zeros in mask have already been eliminated at # start of routine. mask.data[:] = 1.0 Atilde = Atilde.multiply(mask) Atilde.eliminate_zeros() Atilde.sort_indices() cost[0] += Atilde.nnz / float(A.nnz) elif nsquare == 0: if numPDEs > 1: # Apply mask to Atilde, zeros in mask have already been eliminated # at start of routine. mask.data[:] = 1.0 Atilde = Atilde.multiply(mask) Atilde.eliminate_zeros() Atilde.sort_indices() else: # Use computational short-cut for case (ninc == 0) and (nsquare > 0) # Calculate Atilde^k only at the sparsity pattern of mask. for i in range(nsquare - 1): cost[0] += mat_mat_complexity(Atilde, Atilde) Atilde = Atilde * Atilde # Call incomplete mat-mat mult AtildeCSC = Atilde.tocsc() AtildeCSC.sort_indices() mask.sort_indices() Atilde.sort_indices() amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices, Atilde.data, AtildeCSC.indptr, AtildeCSC.indices, AtildeCSC.data, mask.indptr, mask.indices, mask.data, dimen) cost[0] += mat_mat_complexity(Atilde, mask, incomplete=True) / float( A.nnz) del AtildeCSC, Atilde Atilde = mask Atilde.eliminate_zeros() Atilde.sort_indices() del Dinv, Dinv_A, mask # Calculate strength based on constrained min problem of # min( z - B*x ), such that # (B*x)|_i = z|_i, i.e. they are equal at point i # z = (I - (t/k) Dinv A)^k delta_i # # Strength is defined as the relative point-wise approx. error between # B*x and z. We don't use the full z in this problem, only that part of # z that is in the sparsity pattern of A. # # Can use either the D-norm, and inner product, or l2-norm and inner-prod # to solve the constrained min problem. Using D gives scale invariance. # # This is a quadratic minimization problem with a linear constraint, so # we can build a linear system and solve it to find the critical point, # i.e. minimum. # # We exploit a known shortcut for the case of NullDim = 1. The shortcut is # mathematically equivalent to the longer constrained min. problem if NullDim == 1: # Use shortcut to solve constrained min problem if B is only a vector # Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) | # These ratios can be calculated by diagonal row and column scalings # Create necessary vectors for scaling Atilde # Its not clear what to do where B == 0. This is an # an easy programming solution, that may make sense. Bmat_forscaling = np.ravel(Bmat) Bmat_forscaling[Bmat_forscaling == 0] = 1.0 DAtilde = Atilde.diagonal() DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling cost[0] += Atilde.shape[0] / float(A.nnz) # Calculate best approximation, z_tilde, in span(B) # Importantly, scale_rows and scale_columns leave zero entries # in the matrix. For previous implementations this was useful # because we assume data and Atilde.data are the same length below data = Atilde.data.copy() Atilde.data[:] = 1.0 Atilde = scale_rows(Atilde, DAtildeDivB) Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling)) cost[0] += 2.0 * Atilde.nnz / float(A.nnz) # If angle in the complex plane between z and z_tilde is # greater than 90 degrees, then weak. We can just look at the # dot product to determine if angle is greater than 90 degrees. angle = np.real(Atilde.data) * np.real(data) +\ np.imag(Atilde.data) * np.imag(data) angle = angle < 0.0 angle = np.array(angle, dtype=bool) cost[0] += Atilde.nnz / float(A.nnz) if Atilde.dtype is 'complex': cost[0] += Atilde.nnz / float(A.nnz) # Calculate Approximation ratio Atilde.data = Atilde.data / data cost[0] += Atilde.nnz / float(A.nnz) # If approximation ratio is less than tol, then weak connection weak_ratio = (np.abs(Atilde.data) < 1e-4) # Calculate Approximation error Atilde.data = abs(1.0 - Atilde.data) cost[0] += Atilde.nnz / float(A.nnz) # Set small ratios and large angles to weak Atilde.data[weak_ratio] = 0.0 Atilde.data[angle] = 0.0 # Set near perfect connections to 1e-4 Atilde.eliminate_zeros() Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4 del data, weak_ratio, angle else: # For use in computing local B_i^H*B, precompute the element-wise # multiply of each column of B with each other column. We also scale # by 2.0 to account for BDB's eventual use in a constrained # minimization problem BDBCols = int(np.sum(np.arange(NullDim + 1))) BDB = np.zeros((dimen, BDBCols), dtype=A.dtype) counter = 0 for i in range(NullDim): for j in range(i, NullDim): BDB[:, counter] = 2.0 *\ (np.conjugate(np.ravel(np.asarray(B[:, i]))) * np.ravel(np.asarray(D_A * B[:, j]))) counter = counter + 1 cost[0] += B.shape[0] / float(A.nnz) # Choose tolerance for dropping "numerically zero" values later t = Atilde.dtype.char eps = np.finfo(np.float).eps feps = np.finfo(np.single).eps geps = np.finfo(np.longfloat).eps _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2} tol = {0: feps * 1e3, 1: eps * 1e6, 2: geps * 1e6}[_array_precision[t]] # Use constrained min problem to define strength. # This function is doing similar to NullDim=1 with more bad guys. # Complexity accounts for computing the block inverse, and # hat{z_i} = B_i*x, hat{z_i} .* hat{z_i}, # hat{z_i} = hat{z_i} / z_i, and abs(1.0 - hat{z_i}). cost[0] += (Atilde.nnz * (3 + NullDim) + (NullDim**3) * dimen) / float(A.nnz) amg_core.evolution_strength_helper( Atilde.data, Atilde.indptr, Atilde.indices, Atilde.shape[0], np.ravel(np.asarray(B)), np.ravel(np.asarray((D_A * np.conjugate(B)).T)), np.ravel(np.asarray(BDB)), BDBCols, NullDim, tol) Atilde.eliminate_zeros() # All of the strength values are real by this point, so ditch the complex # part Atilde.data = np.array(np.real(Atilde.data), dtype=float) # Apply drop tolerance if epsilon != np.inf: cost[0] += Atilde.nnz / float(A.nnz) amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr, Atilde.indices, Atilde.data) Atilde.eliminate_zeros() # Set diagonal to 1.0, as each point is strongly connected to itself. I = sparse.eye(dimen, dimen, format="csr") I.data -= Atilde.diagonal() Atilde = Atilde + I cost[0] += Atilde.shape[0] / float(A.nnz) # If converted BSR to CSR, convert back and return amalgamated matrix, # i.e. the sparsity structure of the blocks of Atilde if not csrflag: Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs)) n_blocks = Atilde.indices.shape[0] blocksize = Atilde.blocksize[0] * Atilde.blocksize[1] CSRdata = np.zeros((n_blocks, )) amg_core.min_blocks(n_blocks, blocksize, np.ravel(np.asarray(Atilde.data)), CSRdata) # Atilde = sparse.csr_matrix((data, row, col), shape=(*,*)) Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr), shape=(int(Atilde.shape[0] / numPDEs), int(Atilde.shape[1] / numPDEs))) # Standardized strength values require small values be weak and large # values be strong. So, we invert the algebraic distances computed here Atilde.data = 1.0 / Atilde.data cost[0] += Atilde.nnz / float(A.nnz) # Scale C by the largest magnitude entry in each row Atilde = scale_rows_by_largest_entry(Atilde) cost[0] += Atilde.nnz / float(A.nnz) # Symmetrize if symmetrize_measure: Atilde = 0.5 * (Atilde + Atilde.T) cost[0] += Atilde.nnz / float(A.nnz) return Atilde
def evolution_strength_of_connection(A, B='ones', epsilon=4.0, k=2, proj_type="l2", block_flag=False, symmetrize_measure=True): """ Construct strength of connection matrix using an Evolution-based measure Parameters ---------- A : {csr_matrix, bsr_matrix} Sparse NxN matrix B : {string, array} If B='ones', then the near nullspace vector used is all ones. If B is an (NxK) array, then B is taken to be the near nullspace vectors. epsilon : scalar Drop tolerance k : integer ODE num time steps, step size is assumed to be 1/rho(DinvA) proj_type : {'l2','D_A'} Define norm for constrained min prob, i.e. define projection block_flag : {boolean} If True, use a block D inverse as preconditioner for A during weighted-Jacobi Returns ------- Atilde : {csr_matrix} Sparse matrix of strength values References ---------- .. [1] Olson, L. N., Schroder, J., Tuminaro, R. S., "A New Perspective on Strength Measures in Algebraic Multigrid", submitted, June, 2008. Examples -------- >>> import numpy as np >>> from pyamg.gallery import stencil_grid >>> from pyamg.strength import evolution_strength_of_connection >>> n=3 >>> stencil = np.array([[-1.0,-1.0,-1.0], ... [-1.0, 8.0,-1.0], ... [-1.0,-1.0,-1.0]]) >>> A = stencil_grid(stencil, (n,n), format='csr') >>> S = evolution_strength_of_connection(A, np.ones((A.shape[0],1))) """ # local imports for evolution_strength_of_connection from pyamg.util.utils import scale_rows, get_block_diag, scale_columns from pyamg.util.linalg import approximate_spectral_radius # ==================================================================== # Check inputs if epsilon < 1.0: raise ValueError("expected epsilon > 1.0") if k <= 0: raise ValueError("number of time steps must be > 0") if proj_type not in ['l2', 'D_A']: raise ValueError("proj_type must be 'l2' or 'D_A'") if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)): raise TypeError("expected csr_matrix or bsr_matrix") # ==================================================================== # Format A and B correctly. # B must be in mat format, this isn't a deep copy if B == 'ones': Bmat = np.mat(np.ones((A.shape[0], 1), dtype=A.dtype)) else: Bmat = np.mat(B) # Pre-process A. We need A in CSR, to be devoid of explicit 0's and have # sorted indices if (not sparse.isspmatrix_csr(A)): csrflag = False numPDEs = A.blocksize[0] D = A.diagonal() # Calculate Dinv*A if block_flag: Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True) Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0] + 1)), shape=A.shape) Dinv_A = (Dinv * A).tocsr() else: Dinv = np.zeros_like(D) mask = (D != 0.0) Dinv[mask] = 1.0 / D[mask] Dinv[D == 0] = 1.0 Dinv_A = scale_rows(A, Dinv, copy=True) A = A.tocsr() else: csrflag = True numPDEs = 1 D = A.diagonal() Dinv = np.zeros_like(D) mask = (D != 0.0) Dinv[mask] = 1.0 / D[mask] Dinv[D == 0] = 1.0 Dinv_A = scale_rows(A, Dinv, copy=True) A.eliminate_zeros() A.sort_indices() # Handle preliminaries for the algorithm dimen = A.shape[1] NullDim = Bmat.shape[1] # Get spectral radius of Dinv*A, this will be used to scale the time step # size for the ODE rho_DinvA = approximate_spectral_radius(Dinv_A) # Calculate D_A for later use in the minimization problem if proj_type == "D_A": D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr') else: D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype) # Calculate (I - delta_t Dinv A)^k # In order to later access columns, we calculate the transpose in # CSR format so that columns will be accessed efficiently # Calculate the number of time steps that can be done by squaring, and # the number of time steps that must be done incrementally nsquare = int(np.log2(k)) ninc = k - 2**nsquare # Calculate one time step I = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype) Atilde = (I - (1.0/rho_DinvA)*Dinv_A) Atilde = Atilde.T.tocsr() # Construct a sparsity mask for Atilde that will restrict Atilde^T to the # nonzero pattern of A, with the added constraint that row i of Atilde^T # retains only the nonzeros that are also in the same PDE as i. mask = A.copy() # Restrict to same PDE if numPDEs > 1: row_length = np.diff(mask.indptr) my_pde = np.mod(range(dimen), numPDEs) my_pde = np.repeat(my_pde, row_length) mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0 del row_length, my_pde mask.eliminate_zeros() # If the total number of time steps is a power of two, then there is # a very efficient computational short-cut. Otherwise, we support # other numbers of time steps, through an inefficient algorithm. if ninc > 0: warn("The most efficient time stepping for the Evolution Strength\ Method is done in powers of two.\nYou have chosen " + str(k) + " time steps.") # Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare for i in range(nsquare): Atilde = Atilde*Atilde JacobiStep = (I - (1.0/rho_DinvA)*Dinv_A).T.tocsr() for i in range(ninc): Atilde = Atilde*JacobiStep del JacobiStep # Apply mask to Atilde, zeros in mask have already been eliminated at # start of routine. mask.data[:] = 1.0 Atilde = Atilde.multiply(mask) Atilde.eliminate_zeros() Atilde.sort_indices() elif nsquare == 0: if numPDEs > 1: # Apply mask to Atilde, zeros in mask have already been eliminated # at start of routine. mask.data[:] = 1.0 Atilde = Atilde.multiply(mask) Atilde.eliminate_zeros() Atilde.sort_indices() else: # Use computational short-cut for case (ninc == 0) and (nsquare > 0) # Calculate Atilde^k only at the sparsity pattern of mask. for i in range(nsquare-1): Atilde = Atilde*Atilde # Call incomplete mat-mat mult AtildeCSC = Atilde.tocsc() AtildeCSC.sort_indices() mask.sort_indices() Atilde.sort_indices() amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices, Atilde.data, AtildeCSC.indptr, AtildeCSC.indices, AtildeCSC.data, mask.indptr, mask.indices, mask.data, dimen) del AtildeCSC, Atilde Atilde = mask Atilde.eliminate_zeros() Atilde.sort_indices() del Dinv, Dinv_A, mask # Calculate strength based on constrained min problem of # min( z - B*x ), such that # (B*x)|_i = z|_i, i.e. they are equal at point i # z = (I - (t/k) Dinv A)^k delta_i # # Strength is defined as the relative point-wise approx. error between # B*x and z. We don't use the full z in this problem, only that part of # z that is in the sparsity pattern of A. # # Can use either the D-norm, and inner product, or l2-norm and inner-prod # to solve the constrained min problem. Using D gives scale invariance. # # This is a quadratic minimization problem with a linear constraint, so # we can build a linear system and solve it to find the critical point, # i.e. minimum. # # We exploit a known shortcut for the case of NullDim = 1. The shortcut is # mathematically equivalent to the longer constrained min. problem if NullDim == 1: # Use shortcut to solve constrained min problem if B is only a vector # Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) | # These ratios can be calculated by diagonal row and column scalings # Create necessary vectors for scaling Atilde # Its not clear what to do where B == 0. This is an # an easy programming solution, that may make sense. Bmat_forscaling = np.ravel(Bmat) Bmat_forscaling[Bmat_forscaling == 0] = 1.0 DAtilde = Atilde.diagonal() DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling # Calculate best approximation, z_tilde, in span(B) # Importantly, scale_rows and scale_columns leave zero entries # in the matrix. For previous implementations this was useful # because we assume data and Atilde.data are the same length below data = Atilde.data.copy() Atilde.data[:] = 1.0 Atilde = scale_rows(Atilde, DAtildeDivB) Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling)) # If angle in the complex plane between z and z_tilde is # greater than 90 degrees, then weak. We can just look at the # dot product to determine if angle is greater than 90 degrees. angle = np.real(Atilde.data) * np.real(data) +\ np.imag(Atilde.data) * np.imag(data) angle = angle < 0.0 angle = np.array(angle, dtype=bool) # Calculate Approximation ratio Atilde.data = Atilde.data/data # If approximation ratio is less than tol, then weak connection weak_ratio = (np.abs(Atilde.data) < 1e-4) # Calculate Approximation error Atilde.data = abs(1.0 - Atilde.data) # Set small ratios and large angles to weak Atilde.data[weak_ratio] = 0.0 Atilde.data[angle] = 0.0 # Set near perfect connections to 1e-4 Atilde.eliminate_zeros() Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4 del data, weak_ratio, angle else: # For use in computing local B_i^H*B, precompute the element-wise # multiply of each column of B with each other column. We also scale # by 2.0 to account for BDB's eventual use in a constrained # minimization problem BDBCols = int(np.sum(range(NullDim + 1))) BDB = np.zeros((dimen, BDBCols), dtype=A.dtype) counter = 0 for i in range(NullDim): for j in range(i, NullDim): BDB[:, counter] = 2.0 *\ (np.conjugate(np.ravel(np.asarray(B[:, i]))) * np.ravel(np.asarray(D_A * B[:, j]))) counter = counter + 1 # Choose tolerance for dropping "numerically zero" values later t = Atilde.dtype.char eps = np.finfo(np.float).eps feps = np.finfo(np.single).eps geps = np.finfo(np.longfloat).eps _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2} tol = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]] # Use constrained min problem to define strength amg_core.evolution_strength_helper(Atilde.data, Atilde.indptr, Atilde.indices, Atilde.shape[0], np.ravel(np.asarray(B)), np.ravel(np.asarray( (D_A * np.conjugate(B)).T)), np.ravel(np.asarray(BDB)), BDBCols, NullDim, tol) Atilde.eliminate_zeros() # All of the strength values are real by this point, so ditch the complex # part Atilde.data = np.array(np.real(Atilde.data), dtype=float) # Apply drop tolerance if symmetrize_measure: Atilde = 0.5*(Atilde + Atilde.T) if epsilon != np.inf: amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr, Atilde.indices, Atilde.data) Atilde.eliminate_zeros() # Set diagonal to 1.0, as each point is strongly connected to itself. I = sparse.eye(dimen, dimen, format="csr") I.data -= Atilde.diagonal() Atilde = Atilde + I # If converted BSR to CSR, convert back and return amalgamated matrix, # i.e. the sparsity structure of the blocks of Atilde if not csrflag: Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs)) n_blocks = Atilde.indices.shape[0] blocksize = Atilde.blocksize[0]*Atilde.blocksize[1] CSRdata = np.zeros((n_blocks,)) amg_core.min_blocks(n_blocks, blocksize, np.ravel(np.asarray(Atilde.data)), CSRdata) # Atilde = sparse.csr_matrix((data, row, col), shape=(*,*)) Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr), shape=(Atilde.shape[0] / numPDEs, Atilde.shape[1] / numPDEs)) # Standardized strength values require small values be weak and large # values be strong. So, we invert the algebraic distances computed here Atilde.data = 1.0/Atilde.data # Scale C by the largest magnitude entry in each row Atilde = scale_rows_by_largest_entry(Atilde) return Atilde