Esempio n. 1
0
def setup_block_jacobi(lvl, iterations=DEFAULT_NITER, omega=1.0, Dinv=None,
                       blocksize=None, withrho=True):
    # Determine Blocksize
    if blocksize is None and Dinv is None:
        if sparse.isspmatrix_csr(lvl.A):
            blocksize = 1
        elif sparse.isspmatrix_bsr(lvl.A):
            blocksize = lvl.A.blocksize[0]
    elif blocksize is None:
        blocksize = Dinv.shape[1]

    if blocksize == 1:
        # Block Jacobi is equivalent to normal Jacobi
        return setup_jacobi(lvl, iterations=iterations, omega=omega,
                            withrho=withrho)
    else:
        # Use Block Jacobi
        if Dinv is None:
            Dinv = get_block_diag(lvl.A, blocksize=blocksize, inv_flag=True)
        if withrho:
            omega = omega/rho_block_D_inv_A(lvl.A, Dinv)

        def smoother(A, x, b):
            relaxation.block_jacobi(A, x, b, iterations=iterations,
                                    omega=omega, Dinv=Dinv,
                                    blocksize=blocksize)
        return smoother
Esempio n. 2
0
    def _slice(data, obs_selector=None, vars_selector=None):
        """
        Slice date using any selector that the AnnData object
        supprots for slicing.  If selector is None, will not slice
        on that axis.

        This method exists to optimize filtering/slicing sparse data that has
        access patterns which impact slicing performance.

        https://docs.scipy.org/doc/scipy/reference/sparse.html
        """
        prefer_row_access = (sparse.isspmatrix_csr(data._X)
                             or sparse.isspmatrix_lil(data._X)
                             or sparse.isspmatrix_bsr(data._X))
        if prefer_row_access:
            # Row-major slicing
            if obs_selector is not None:
                data = data[obs_selector, :]
            if vars_selector is not None:
                data = data[:, vars_selector]
        else:
            # Col-major slicing
            if vars_selector is not None:
                data = data[:, vars_selector]
            if obs_selector is not None:
                data = data[obs_selector, :]

        return data
Esempio n. 3
0
def setup_block_gauss_seidel(lvl,
                             iterations=DEFAULT_NITER,
                             sweep=DEFAULT_SWEEP,
                             Dinv=None,
                             blocksize=None):
    """Set up block Gauss-Seidel."""
    # Determine Blocksize
    if blocksize is None and Dinv is None:
        if sparse.isspmatrix_csr(lvl.A):
            blocksize = 1
        elif sparse.isspmatrix_bsr(lvl.A):
            blocksize = lvl.A.blocksize[0]
    elif blocksize is None:
        blocksize = Dinv.shape[1]

    if blocksize == 1:
        # Block GS is equivalent to normal GS
        return setup_gauss_seidel(lvl, iterations=iterations, sweep=sweep)

    # Use Block GS
    if Dinv is None:
        Dinv = get_block_diag(lvl.A, blocksize=blocksize, inv_flag=True)

    def smoother(A, x, b):
        relaxation.block_gauss_seidel(A,
                                      x,
                                      b,
                                      iterations=iterations,
                                      Dinv=Dinv,
                                      blocksize=blocksize,
                                      sweep=sweep)

    return smoother
Esempio n. 4
0
 def add_sparse_convolution(self, name, sp_pattern, 
                                 use_sp_data = True,
                                 kernelsize = 1, 
                                 stride = 1, 
                                 pad = 0,
                                 biasterm = False,
                                 weight_filler = {},
                                 bias_filler = {}, 
                                 blobs_lr=[1.0,2.0],
                                 weight_decays=[1.0,1.0],
                                 weights = None ):
   from scipy import sparse
   assert sparse.isspmatrix(sp_pattern)
   num_output = sp_pattern.shape[0]  # sparsity pattern is given in input
   
   cp = gpudm.ConvolutionParameter()
   cp.set_kernel_h(kernelsize)
   cp.set_kernel_w(kernelsize)
   cp.set_num_output(num_output)
   assert stride>0, 'error: stride is 0 for '+name
   cp.set_stride_h(stride)
   cp.set_stride_w(stride)
   cp.set_pad_h(pad)
   cp.set_pad_w(pad)
   cp.set_bias_term(biasterm)
   self.set_filler_params(cp.mutable_weight_filler(), weight_filler)
   self.set_filler_params(cp.mutable_bias_filler(), bias_filler)
   lp = gpudm.LayerParameter()
   lp.set_allocated_convolution_param(cp)
   cp.this.disown()  # otherwise it will be freed 2 times
   
   def arrToBlob(arr): # dirty function but simpler for now
       bb = gpudm.BlobFloat(1,1,1,arr.size)
       bb.mutable_to_numpy_ref().view(arr.dtype)[:] = arr.ravel()
       return bb
   
   if sparse.isspmatrix_csr(sp_pattern):
     sparsity_args = (sp_pattern.nnz, 
                      arrToBlob(sp_pattern.indptr), arrToBlob(sp_pattern.indices), 
                      arrToBlob(sp_pattern.data) if use_sp_data else None)
     self.add_layer(name, gpudm.CSR_SparseConvolutionLayerFloat, lp)
   elif sparse.isspmatrix_bsr(sp_pattern):
     br,bc = sp_pattern.blocksize
     assert br == bc, "error: not implemented for non-square blocks"
     sparsity_args = (sp_pattern.nnz/(br*bc), br,
                      arrToBlob(sp_pattern.indptr), arrToBlob(sp_pattern.indices), 
                      arrToBlob(sp_pattern.data) if use_sp_data else None)
     self.add_layer(name, gpudm.BSR_SparseConvolutionLayerFloat, lp)
   else:
     assert False, "This sparse matrix type is not implemented"
   
   # define sparsity pattern now
   self.layers[-1][1].SetSparsityPattern( *sparsity_args )
   
   self.layers[-1][1].blobs_lr = blobs_lr
   self.layers[-1][1].weight_decays = weight_decays
   if weights: self.set_parameters({name:weights}, verbose=0)
Esempio n. 5
0
    def test_loading_and_storing_empty_containers(self):
        filename = make_temp_dir('empty_containers.hdf5')
        traj = Trajectory(filename=filename, add_time=True)

        # traj.f_add_parameter('empty.dict', {})
        # traj.f_add_parameter('empty.list', [])
        traj.f_add_parameter(ArrayParameter, 'empty.tuple', ())
        traj.f_add_parameter(ArrayParameter, 'empty.array',
                             np.array([], dtype=float))

        spsparse_csc = spsp.csc_matrix((2, 10))
        spsparse_csr = spsp.csr_matrix((6660, 660))
        spsparse_bsr = spsp.bsr_matrix((3330, 2220))
        spsparse_dia = spsp.dia_matrix((1230, 1230))

        traj.f_add_parameter(SparseParameter, 'empty.csc', spsparse_csc)
        traj.f_add_parameter(SparseParameter, 'empty.csr', spsparse_csr)
        traj.f_add_parameter(SparseParameter, 'empty.bsr', spsparse_bsr)
        traj.f_add_parameter(SparseParameter, 'empty.dia', spsparse_dia)

        traj.f_add_result(SparseResult,
                          'empty.all',
                          dict={},
                          list=[],
                          series=pd.Series(),
                          frame=pd.DataFrame(),
                          panel=pd.Panel(),
                          **traj.par.f_to_dict(short_names=True,
                                               fast_access=True))

        traj.f_store()

        newtraj = load_trajectory(index=-1, filename=filename)

        newtraj.f_load(load_data=2)

        epg = newtraj.par.empty
        self.assertTrue(type(epg.tuple) is tuple)
        self.assertTrue(len(epg.tuple) == 0)

        self.assertTrue(type(epg.array) is np.ndarray)
        self.assertTrue(epg.array.size == 0)

        self.assertTrue(spsp.isspmatrix_csr(epg.csr))
        self.assertTrue(epg.csr.size == 0)

        self.assertTrue(spsp.isspmatrix_csc(epg.csc))
        self.assertTrue(epg.csc.size == 0)

        self.assertTrue(spsp.isspmatrix_bsr(epg.bsr))
        self.assertTrue(epg.bsr.size == 0)

        self.assertTrue(spsp.isspmatrix_dia(epg.dia))
        self.assertTrue(epg.dia.size == 0)

        self.compare_trajectories(traj, newtraj)
Esempio n. 6
0
def sparse_matrix_report(m):
    print(repr(m))
    print('Number of non-zeros  :', m.nnz)
    print('Sparsity             :', 1 - m.nnz / (m.shape[0] * m.shape[1]))

    if isspmatrix_csr(m) or isspmatrix_csc(m):
        print('data length          : {} ({})'.format(len(m.data),
                                                      m.data.dtype))
        print('indptr length        : {} ({})'.format(len(m.indptr),
                                                      m.indptr.dtype))
        print('indices length       : {} ({})'.format(len(m.indices),
                                                      m.indices.dtype))
        print('Size                 :',
              size(m.data.nbytes + m.indptr.nbytes + m.indices.nbytes))
        print('10 x 10 preview:')
        print(m[:10, :10].toarray())
    elif isspmatrix_bsr(m):
        print('data length          : {} ({})'.format(len(m.data),
                                                      m.data.dtype))
        print('indptr length        : {} ({})'.format(len(m.indptr),
                                                      m.indptr.dtype))
        print('indices length       : {} ({})'.format(len(m.indices),
                                                      m.indices.dtype))
        print('blocksize length     : {}'.format(m.blocksize))
        print('Size                 :',
              size(m.data.nbytes + m.indptr.nbytes + m.indices.nbytes))
        print('preview:')
        print(m)
    elif isspmatrix_coo(m):
        print('data length          : {} ({})'.format(len(m.data),
                                                      m.data.dtype))
        print('row length           : {} ({})'.format(len(m.row), m.row.dtype))
        print('col length           : {} ({})'.format(len(m.col), m.col.dtype))
        print('Size                 :',
              size(m.data.nbytes + m.row.nbytes + m.col.nbytes))
        print('preview:')
        print(m)
    elif isspmatrix_dok(m):
        print('Size                 :', size(sys.getsizeof(m)))
        print('10 x 10 preview:')
        print(m[:10, :10].toarray())
    elif isspmatrix_dia(m):
        print('data length          : {} ({})'.format(len(m.data),
                                                      m.data.dtype))
        print('Offsets              : {} ({})'.format(len(m.offsets),
                                                      m.offsets.dtype))
        print('Size                 :', size(m.data.nbytes + m.offsets.nbytes))
        print('(no preview)')
    elif isspmatrix_lil(m):
        print('data length          : {} ({})'.format(len(m.data),
                                                      m.data.dtype))
        print('rows                 : {} ({})'.format(len(m.rows),
                                                      m.rows.dtype))
        print('Size                 :', size(m.data.nbytes + m.rows.nbytes))
        print('(no preview)')
Esempio n. 7
0
def Satisfy_Constraints(U, B, BtBinv, cost=[0.0]):
    """U is the prolongator update.
       Project out components of U such that U*B = 0

    Parameters
    ----------
    U : {bsr_matrix}
        m x n sparse bsr matrix
        Update to the prolongator
    B : {array}
        n x k array of the coarse grid near nullspace vectors
    BtBinv : {array}
        Local inv(B_i.H*B_i) matrices for each supernode, i
        B_i is B restricted to the sparsity pattern of supernode i in U
    cost : {list containing one scalar}
        cost[0] is incremented to reflect a FLOP estimate for this function

    Returns
    -------
    Updated U, so that U*B = 0.
    Update is computed by orthogonally (in 2-norm) projecting
    out the components of span(B) in U in a row-wise fashion.

    See Also
    --------
    The principal calling routine,
    pyamg.aggregation.smooth.energy_prolongation_smoother

    """

    if sparse.isspmatrix_bsr(U):
        RowsPerBlock = U.blocksize[0]
        ColsPerBlock = U.blocksize[1]
    else:
        RowsPerBlock = 1
        ColsPerBlock = 1

    num_block_rows = int(U.shape[0] / RowsPerBlock)

    UB = np.ravel(U * B)

    # Apply constraints, noting that we need the conjugate of B
    # for use as Bi.H in local projection
    pyamg.amg_core.satisfy_constraints_helper(RowsPerBlock, ColsPerBlock,
                                              num_block_rows, B.shape[1],
                                              np.conjugate(np.ravel(B)), UB,
                                              np.ravel(BtBinv), U.indptr,
                                              U.indices, np.ravel(U.data))

    cost[0] += U.nnz * (2.0*B.shape[1] + B.shape[1]**2) + \
                        (B.shape[1]**3) * B.shape[0]
    return U
Esempio n. 8
0
    def test_loading_and_storing_empty_containers(self):
        filename = make_temp_dir('empty_containers.hdf5')
        traj = Trajectory(filename=filename)

        # traj.f_add_parameter('empty.dict', {})
        # traj.f_add_parameter('empty.list', [])
        traj.f_add_parameter(ArrayParameter, 'empty.tuple', ())
        traj.f_add_parameter(ArrayParameter, 'empty.array', np.array([], dtype=float))

        spsparse_csc = spsp.csc_matrix((2,10))
        spsparse_csr = spsp.csr_matrix((6660,660))
        spsparse_bsr = spsp.bsr_matrix((3330,2220))
        spsparse_dia = spsp.dia_matrix((1230,1230))

        traj.f_add_parameter(SparseParameter, 'empty.csc', spsparse_csc)
        traj.f_add_parameter(SparseParameter, 'empty.csr', spsparse_csr)
        traj.f_add_parameter(SparseParameter, 'empty.bsr', spsparse_bsr)
        traj.f_add_parameter(SparseParameter, 'empty.dia', spsparse_dia)

        traj.f_add_result(SparseResult, 'empty.all', dict={}, list=[],
                          series = pd.Series(),
                          frame = pd.DataFrame(),
                          panel = pd.Panel(),
                          **traj.par.f_to_dict(short_names=True, fast_access=True))

        traj.f_store()

        newtraj = load_trajectory(index=-1, filename=filename)

        newtraj.f_load(load_data=2)

        epg = newtraj.par.empty
        self.assertTrue(type(epg.tuple) is tuple)
        self.assertTrue(len(epg.tuple) == 0)

        self.assertTrue(type(epg.array) is np.ndarray)
        self.assertTrue(epg.array.size == 0)

        self.assertTrue(spsp.isspmatrix_csr(epg.csr))
        self.assertTrue(epg.csr.size == 0)

        self.assertTrue(spsp.isspmatrix_csc(epg.csc))
        self.assertTrue(epg.csc.size == 0)

        self.assertTrue(spsp.isspmatrix_bsr(epg.bsr))
        self.assertTrue(epg.bsr.size == 0)

        self.assertTrue(spsp.isspmatrix_dia(epg.dia))
        self.assertTrue(epg.dia.size == 0)

        self.compare_trajectories(traj, newtraj)
Esempio n. 9
0
def ParCR(M,f,dim,COMM):        # takes already split Schur data

    rank = COMM.Get_rank()      # processor rankordered_data
    numP = COMM.Get_size()      # num. processors

    assert sparse.isspmatrix_bsr(M)     # M must be BSR matrix
    dim = M.blocksize[0]                # blocks  have size [dim x dim]
    n = M.shape[0]/dim                  # M is [n x n] blocks
    h = n                    # subsystem size one each processor
    k = int(np.log2(h))-1               # number of reduction levels
    printr(M.shape,COMM)
    if head(COMM) or tail(COMM):
        assert power2check(n)
    else:
        assert power2check(n-1)
    print_master('Subsystem sizes check out; proceed with ParCR call', COMM)      
    
    M.sort_indices()

    '''SPLIT SYSTEM ACROSS PROCESSORS'''
    Mi = M
    if head(COMM):
        yi = f
    elif tail(COMM):
        yi = np.zeros(h*dim)
        yi[dim:] = f[dim:]
    else:
        yi = np.zeros((h)*dim)
        yi[dim:] = f[dim:]

    '''REDUCE ALL SUBSYSTEMS'''
    xi = reduction_step(Mi,yi,k,COMM)   
    
    if head(COMM):
        sendX = xi.ravel()
        recvX = np.empty((1,sendX.shape[1]*numP),np.float64)
    elif tail(COMM):
        temp = np.zeros((xi.shape[0],1))
        temp[:-dim] = xi[dim:]
        sendX = temp.reshape((1,xi.shape[0]))
        recvX = None
    else: 
        sendX = xi[dim:].ravel()
        recvX = None

    return xi.ravel()
          
Esempio n. 10
0
def convert_to_petsc(a, comm=PETSc.COMM_WORLD):
    """ Convert a scipy sparse matrix to the relevant PETSc type, currently
    only supports csr, bsr, vectors and dense matrices formats. """
    if sp.isspmatrix_csr(a):
        a.sort_indices()
        csr = (a.indptr, a.indices, a.data)
        b = PETSc.Mat().createAIJ(size=a.shape, csr=csr, comm=comm)
    elif sp.isspmatrix_bsr(a):
        a.sort_indices()
        csr = (a.indptr, a.indices, a.data)
        b = PETSc.Mat().createBAIJ(size=a.shape, bsize=a.blocksize,
                                   csr=csr, comm=comm)
    elif a.ndim == 1:
        b = PETSc.Vec().createWithArray(a, comm=comm)
    else:
        b = PETSc.Mat().createDense(size=a.shape, array=a, comm=comm)
    return b
Esempio n. 11
0
def get_geometric_weights(A, theta, Nx, Ny):

    n = A.shape[0]
    if isspmatrix_csr(A):
        W = A.copy()
        # Get angle between nodes for (i,j)th entry in A
        for i in range(0, n):
            lower_ind = W.indptr[i]
            upper_ind = W.indptr[i + 1]
            for j_ind in range(lower_ind, upper_ind):
                j = W.indices[j_ind]
                W.data[j_ind] = get_weight(i, j, Nx, Ny, theta)

    elif isspmatrix_bsr(A):
        raise ValueError("Not implemented for BSR matrices yet.")

    return W
Esempio n. 12
0
def injection_interpolation(A, splitting, cost=[0]):
    """ Create interpolation operator by injection, that is C-points are
    interpolated by value and F-points are not interpolated.

    Parameters
    ----------
    A : {csr_matrix}
        NxN matrix in CSR format or BSR format
    splitting : array
        C/F splitting stored in an array of length N

    Returns
    -------
    NxNc interpolation operator, P
    """
    if isspmatrix_bsr(A):
        blocksize = A.blocksize[0]
        n = A.shape[0] / blocksize
    elif isspmatrix_csr(A):
        n = A.shape[0]
        blocksize = 1
    else:
        try:
            A = A.tocsr()
            warn("Implicit conversion of A to csr", SparseEfficiencyWarning)
            n = A.shape[0]
            blocksize = 1
        except:
            raise TypeError("Invalid matrix type, must be CSR or BSR.")

    P_rowptr = np.append(np.array([0], dtype='int32'),
                         np.cumsum(splitting, dtype='int32'))
    nc = P_rowptr[-1]
    P_colinds = np.arange(start=0, stop=nc, step=1, dtype='int32')

    if blocksize == 1:
        return csr_matrix((np.ones(
            (nc, ), dtype=A.dtype), P_colinds, P_rowptr),
                          shape=[n, nc])
    else:
        P_data = np.array(nc * [np.identity(blocksize, dtype=A.dtype)],
                          dtype=A.dtype)
        return bsr_matrix((P_data, P_colinds, P_rowptr),
                          blocksize=[blocksize, blocksize],
                          shape=[n * blocksize, nc * blocksize])
Esempio n. 13
0
def get_geometric_weights_d2(A, theta, Nx, Ny):

    n = A.shape[0]
    # Get maximum possible number of paths of length 2 in A
    paths_d1 = np.max(A.indptr[1:(n + 1)] - A.indptr[0:n])
    max_paths = n * paths_d1 * paths_d1
    # Preallocate empty sparse array
    rowptr = np.zeros((n + 1, ))
    data = np.zeros((max_paths, ))
    colinds = np.zeros((max_paths, ))
    next_ind = 0

    if isspmatrix_csr(A):
        # Loop over each node in A
        for i in range(0, n):
            lower_ind1 = A.indptr[i]
            upper_ind1 = A.indptr[i + 1]
            neighbors = []

            # Loop over d1 and d2 neighbors for ith node, save neighbors in set.
            for d1_ind in range(lower_ind1, upper_ind1):
                d1_node = A.indices[d1_ind]
                lower_ind2 = A.indptr[d1_node]
                upper_ind2 = A.indptr[d1_node + 1]
                for d2_ind in range(lower_ind2, upper_ind2):
                    d2_node = A.indices[d2_ind]
                    neighbors.append(d2_node)

            # Find unique set of neighbors, get weight w(i,j)
            neighbors = np.unique(neighbors)
            for node in neighbors:
                colinds[next_ind] = node
                data[next_ind] = get_weight(i, node, Nx, Ny, theta)
                next_ind += 1

            # Set row-pointer for ith row
            rowptr[i + 1] = next_ind

    elif isspmatrix_bsr(A):
        raise ValueError("Not implemented for BSR matrices yet.")

    W = csr_matrix((data[0:next_ind], colinds[0:next_ind], rowptr),
                   shape=A.shape)
    return W
Esempio n. 14
0
def make_csr(A):
    """
    Convert A to CSR, if A is not a CSR or BSR matrix already.

    Parameters
    ----------
    A : {array, matrix, sparse matrix}
        (n x n) matrix to convert to CSR

    Returns
    -------
    A : {csr_matrix, bsr_matrix}
        If A is csr_matrix or bsr_matrix, then do nothing and return A.
        Else, convert A to CSR if possible and return.

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.blackbox import make_csr
    >>> A = poisson((40,40),format='csc')
    >>> Acsr = make_csr(A)
    Implicit conversion of A to CSR in pyamg.blackbox.make_csr
    """

    # Convert to CSR or BSR if necessary
    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        try:
            A = csr_matrix(A)
            print("Implicit conversion of A to CSR in pyamg.blackbox.make_csr")
        except:
            raise TypeError(
                "Argument A must have type csr_matrix or\
                    bsr_matrix, or be convertible to csr_matrix"
            )
    #
    if A.shape[0] != A.shape[1]:
        raise TypeError("Argument A must be a square")
    #
    A = A.asfptype()

    return A
Esempio n. 15
0
def make_csr(A):
    """
    Convert A to CSR, if A is not a CSR or BSR matrix already.

    Parameters
    ----------
    A : array, matrix, sparse matrix
        (n x n) matrix to convert to CSR

    Returns
    -------
    A : csr_matrix, bsr_matrix
        If A is csr_matrix or bsr_matrix, then do nothing and return A.
        Else, convert A to CSR if possible and return.

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.blackbox import make_csr
    >>> A = poisson((40,40),format='csc')
    >>> Acsr = make_csr(A)
    Implicit conversion of A to CSR in pyamg.blackbox.make_csr

    """
    # Convert to CSR or BSR if necessary
    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        try:
            A = csr_matrix(A)
            print('Implicit conversion of A to CSR in pyamg.blackbox.make_csr')
        except BaseException as e:
            raise TypeError(
                'Argument A must have type csr_matrix or '
                'bsr_matrix, or be convertible to csr_matrix') from e

    if A.shape[0] != A.shape[1]:
        raise TypeError('Argument A must be a square')

    A = A.asfptype()

    return A
Esempio n. 16
0
def energy_prolongation_smoother(A, T, Atilde, B, Bf, Cpt_params, krylov='cg', maxiter=4, 
                        tol=1e-8, degree=1, weighting='local'):
    """Minimize the energy of the coarse basis functions (columns of T).  Both
    root-node and non-root-node style prolongation smoothing is available, see
    Cpt_params description below.

    Parameters
    ----------

    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    T : {bsr_matrix}
        Tentative prolongator, a NxM sparse matrix (M < N)
    Atilde : {csr_matrix}
        Strength of connection matrix
    B : {array}
        Near-nullspace modes for coarse grid.  Has shape (M,k) where
        k is the number of coarse candidate vectors.
    Bf : {array}
        Near-nullspace modes for fine grid.  Has shape (N,k) where
        k is the number of coarse candidate vectors.
    Cpt_params : {tuple}
        Tuple of the form (bool, dict).  If the Cpt_params[0] = False, then the
        standard SA prolongation smoothing is carried out.  If True, then
        root-node style prolongation smoothing is carried out.  The dict must
        be a dictionary of parameters containing, (1) P_I: P_I.T is the
        injection matrix for the Cpts, (2) I_F: an identity matrix for only the
        F-points (i.e. I, but with zero rows and columns for C-points) and I_C:
        the C-point analogue to I_F.  See Notes below for more information. 
    krylov : {string}
        'cg' : for SPD systems.  Solve A T = 0 in a constraint space with CG
        'cgnr' : for nonsymmetric and/or indefinite systems.  
                 Solve A T = 0 in a constraint space with CGNR
        'gmres' : for nonsymmetric and/or indefinite systems.  
                 Solve A T = 0 in a constraint space with GMRES
    maxiter : integer
        Number of energy minimization steps to apply to the prolongator
    tol : {scalar}
        Minimization tolerance
    degree : {int}
        Generate sparsity pattern for P based on (Atilde^degree T)
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the diagonal preconditioning
        'local': Uses a local row-wise weight based on the Gershgorin estimate.
          Avoids any potential under-damping due to inaccurate spectral radius
          estimates.
        'block': If A is a BSR matrix, use a block diagonal inverse of A  
        'diagonal': Use inverse of the diagonal of A

    Returns
    -------
    T : {bsr_matrix}
        Smoothed prolongator

    Notes
    -----
    Only 'diagonal' weighting is supported for the CGNR method, because
    we are working with A^* A and not A.
    
    When Cpt_params[0] == True, root-node style prolongation smoothing
    is used to minimize the energy of columns of T.  Essentially, an 
    identity block is maintained in T, corresponding to injection from 
    the coarse-grid to the fine-grid root-nodes.  See [2] for more details,
    and see util.utils.get_Cpt_params for the helper function to generate
    Cpt_params.

    If Cpt_params[0] == False, the energy of columns of T are still 
    minimized, but without maintaining the identity block.

    Examples
    --------
    >>> from pyamg.aggregation import energy_prolongation_smoother
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse import coo_matrix
    >>> import numpy
    >>> data = numpy.ones((6,))
    >>> row = numpy.arange(0,6)
    >>> col = numpy.kron([0,1],numpy.ones((3,)))
    >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
    >>> print T.todense()
    [[ 1.  0.]
     [ 1.  0.]
     [ 1.  0.]
     [ 0.  1.]
     [ 0.  1.]
     [ 0.  1.]]
    >>> A = poisson((6,),format='csr')
    >>> P = energy_prolongation_smoother(A,T,A,numpy.ones((2,1),dtype=float), None, (False,{}) )
    >>> print P.todense()
    [[ 1.          0.        ]
     [ 1.          0.        ]
     [ 0.66666667  0.33333333]
     [ 0.33333333  0.66666667]
     [ 0.          1.        ]
     [ 0.          1.        ]]

    References
    ----------
    .. [1] Jan Mandel, Marian Brezina, and Petr Vanek
       "Energy Optimization of Algebraic Multigrid Bases"
       Computing 62, 205-228, 1999
       http://dx.doi.org/10.1007/s006070050022
    .. [2] Olson, L. and Schroder, J. and Tuminaro, R.,
       "A general interpolation strategy for algebraic 
       multigrid using energy minimization", SIAM Journal
       on Scientific Computing (SISC), vol. 33, pp. 
       966--991, 2011.
    """
    
    #====================================================================
    
    #Test Inputs
    if maxiter < 0:
        raise ValueError('maxiter must be > 0')
    if tol > 1:
        raise ValueError('tol must be <= 1') 
   
    if isspmatrix_csr(A):
        A = A.tobsr(blocksize=(1,1), copy=False)
    elif isspmatrix_bsr(A):
        pass
    else:
        raise TypeError("A must be csr_matrix or bsr_matrix")

    if isspmatrix_csr(T):
        T = T.tobsr(blocksize=(1,1), copy=False)
    elif isspmatrix_bsr(T):
        pass
    else:
        raise TypeError("T must be csr_matrix or bsr_matrix")

    if Atilde is None:
        AtildeCopy = csr_matrix( (numpy.ones(len(A.indices)), A.indices.copy(), A.indptr.copy()), 
                                     shape=(A.shape[0]/A.blocksize[0], A.shape[1]/A.blocksize[1]))
    else:
        AtildeCopy = Atilde.copy()

    if not isspmatrix_csr(AtildeCopy):
        raise TypeError("Atilde must be csr_matrix")

    if T.blocksize[0] != A.blocksize[0]:
        raise ValueError("T's row-blocksize should be the same as A's blocksize")
    
    if B.shape[0] != T.shape[1]:
        raise ValueError("B is the candidates for the coarse grid. \
                            num_rows(b) = num_cols(T)")

    if min(T.nnz, AtildeCopy.nnz, A.nnz) == 0:
        return T
    
    ##
    # Expand the allowed sparsity pattern for P through multiplication by Atilde
    T.sort_indices()
    Sparsity_Pattern = csr_matrix( (numpy.ones(T.indices.shape), T.indices, T.indptr), 
                                    shape=(T.shape[0]/T.blocksize[0],T.shape[1]/T.blocksize[1])  )
    AtildeCopy.data[:] = 1.0
    for i in range(degree):
        Sparsity_Pattern = AtildeCopy*Sparsity_Pattern
    
    ##
    #UnAmal returns a BSR matrix
    Sparsity_Pattern = UnAmal(Sparsity_Pattern, T.blocksize[0], T.blocksize[1])
    Sparsity_Pattern.sort_indices()
    
    ##
    #If using root nodes, enforce identity at C-points
    if Cpt_params[0]:
        Sparsity_Pattern = Cpt_params[1]['I_F']*Sparsity_Pattern
        Sparsity_Pattern = Cpt_params[1]['P_I'] + Sparsity_Pattern

    ##
    # Construct array of inv(Bi'Bi), where Bi is B restricted to row i's sparsity pattern in 
    # Sparsity Pattern. This array is used multiple times in Satisfy_Constraints(...).
    BtBinv = compute_BtBinv(B, Sparsity_Pattern)
    
    ##
    # If using root nodes and B has more columns that A's blocksize, then
    # T must be updated so that T*B = Bfine
    if Cpt_params[0] and (B.shape[1] > A.blocksize[0]):
        T = filter_operator(T, Sparsity_Pattern, B, Bf, BtBinv)
        # Ensure identity at C-pts 
        if Cpt_params[0]:
            T = Cpt_params[1]['I_F']*T + Cpt_params[1]['P_I']
   
    ##
    # Iteratively minimize the energy of T subject to the constraints of Sparsity_Pattern
    # and maintaining T's effect on B, i.e. T*B = (T+Update)*B, i.e. Update*B = 0 
    if krylov == 'cg':
        T = cg_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol, weighting, Cpt_params)
    elif krylov == 'cgnr':   
        T = cgnr_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol, weighting, Cpt_params)
    elif krylov == 'gmres':
        T = gmres_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol, weighting, Cpt_params)

    T.eliminate_zeros()

    return T
Esempio n. 17
0
def energy_prolongation_smoother(A,
                                 T,
                                 Atilde,
                                 B,
                                 Bf,
                                 Cpt_params,
                                 krylov='cg',
                                 maxiter=4,
                                 tol=1e-8,
                                 degree=1,
                                 weighting='local',
                                 prefilter={},
                                 postfilter={},
                                 cost=[0.0]):
    """Minimize the energy of the coarse basis functions (columns of T).  Both
    root-node and non-root-node style prolongation smoothing is available, see
    Cpt_params description below.

    Parameters
    ----------

    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    T : {bsr_matrix}
        Tentative prolongator, a NxM sparse matrix (M < N)
    Atilde : {csr_matrix}
        Strength of connection matrix
    B : {array}
        Near-nullspace modes for coarse grid.  Has shape (M,k) where
        k is the number of coarse candidate vectors.
    Bf : {array}
        Near-nullspace modes for fine grid.  Has shape (N,k) where
        k is the number of coarse candidate vectors.
    Cpt_params : {tuple}
        Tuple of the form (bool, dict).  If the Cpt_params[0] = False, then the
        standard SA prolongation smoothing is carried out.  If True, then
        root-node style prolongation smoothing is carried out.  The dict must
        be a dictionary of parameters containing, (1) P_I: P_I.T is the
        injection matrix for the Cpts, (2) I_F: an identity matrix for only the
        F-points (i.e. I, but with zero rows and columns for C-points) and I_C:
        the C-point analogue to I_F.  See Notes below for more information.
    krylov : {string}
        'cg' : for SPD systems.  Solve A T = 0 in a constraint space with CG
        'cgnr' : for nonsymmetric and/or indefinite systems.
                 Solve A T = 0 in a constraint space with CGNR
        'gmres' : for nonsymmetric and/or indefinite systems.
                 Solve A T = 0 in a constraint space with GMRES
    maxiter : integer
        Number of energy minimization steps to apply to the prolongator
    tol : {scalar}
        Minimization tolerance
    degree : {int}
        Generate sparsity pattern for P based on (Atilde^degree T)
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the
            diagonal preconditioning
        'local': Uses a local row-wise weight based on the Gershgorin estimate.
            Avoids any potential under-damping due to inaccurate spectral
            radius estimates.
        'block': If A is a BSR matrix, use a block diagonal inverse of A
        'diagonal': Use inverse of the diagonal of A
    prefilter : {dictionary} : Default {}
        Filters elements by row in sparsity pattern for P to reduce operator and
        setup complexity. If None or empty dictionary, no dropping in P is done.
        If postfilter has key 'k', then the largest 'k' entries  are kept in each
        row.  If postfilter has key 'theta', all entries such that
            P[i,j] < kwargs['theta']*max(abs(P[i,:]))
        are dropped.  If postfilter['k'] and postfiler['theta'] are present, then
        they are used in conjunction, with the union of their patterns used.
    postfilter : {dictionary} : Default {}
        Filters elements by row in smoothed P to reduce operator complexity. 
        Only supported if using the rootnode_solver. If None or empty dictionary,
        no dropping in P is done. If postfilter has key 'k', then the largest 'k'
        entries  are kept in each row.  If postfilter has key 'theta', all entries
        such that
            P[i,j] < kwargs['theta']*max(abs(P[i,:]))
        are dropped.  If postfilter['k'] and postfiler['theta'] are present, then
        they are used in conjunction, with the union of their patterns used.

    Returns
    -------
    T : {bsr_matrix}
        Smoothed prolongator

    Notes
    -----
    Only 'diagonal' weighting is supported for the CGNR method, because
    we are working with A^* A and not A.

    When Cpt_params[0] == True, root-node style prolongation smoothing
    is used to minimize the energy of columns of T.  Essentially, an
    identity block is maintained in T, corresponding to injection from
    the coarse-grid to the fine-grid root-nodes.  See [2] for more details,
    and see util.utils.get_Cpt_params for the helper function to generate
    Cpt_params.

    If Cpt_params[0] == False, the energy of columns of T are still
    minimized, but without maintaining the identity block.

    Examples
    --------
    >>> from pyamg.aggregation import energy_prolongation_smoother
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse import coo_matrix
    >>> import numpy as np
    >>> data = np.ones((6,))
    >>> row = np.arange(0,6)
    >>> col = np.kron([0,1],np.ones((3,)))
    >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
    >>> print T.todense()
    [[ 1.  0.]
     [ 1.  0.]
     [ 1.  0.]
     [ 0.  1.]
     [ 0.  1.]
     [ 0.  1.]]
    >>> A = poisson((6,),format='csr')
    >>> B = np.ones((2,1),dtype=float)
    >>> P = energy_prolongation_smoother(A,T,A,B, None, (False,{}))
    >>> print P.todense()
    [[ 1.          0.        ]
     [ 1.          0.        ]
     [ 0.66666667  0.33333333]
     [ 0.33333333  0.66666667]
     [ 0.          1.        ]
     [ 0.          1.        ]]

    References
    ----------
    .. [1] Jan Mandel, Marian Brezina, and Petr Vanek
       "Energy Optimization of Algebraic Multigrid Bases"
       Computing 62, 205-228, 1999
       http://dx.doi.org/10.1007/s006070050022
    .. [2] Olson, L. and Schroder, J. and Tuminaro, R.,
       "A general interpolation strategy for algebraic
       multigrid using energy minimization", SIAM Journal
       on Scientific Computing (SISC), vol. 33, pp.
       966--991, 2011.
    """

    # Test Inputs
    if maxiter < 0:
        raise ValueError('maxiter must be > 0')
    if tol > 1:
        raise ValueError('tol must be <= 1')

    if sparse.isspmatrix_csr(A):
        A = A.tobsr(blocksize=(1, 1), copy=False)
    elif sparse.isspmatrix_bsr(A):
        pass
    else:
        raise TypeError("A must be csr_matrix or bsr_matrix")

    if sparse.isspmatrix_csr(T):
        T = T.tobsr(blocksize=(1, 1), copy=False)
    elif sparse.isspmatrix_bsr(T):
        pass
    else:
        raise TypeError("T must be csr_matrix or bsr_matrix")

    if T.blocksize[0] != A.blocksize[0]:
        raise ValueError("T row-blocksize should be the same as A blocksize")

    if B.shape[0] != T.shape[1]:
        raise ValueError("B is the candidates for the coarse grid. \
                            num_rows(b) = num_cols(T)")

    if min(T.nnz, A.nnz) == 0:
        return T

    if not sparse.isspmatrix_csr(Atilde):
        raise TypeError("Atilde must be csr_matrix")

    if ('theta' in prefilter) and (prefilter['theta'] == 0):
        prefilter.pop('theta', None)

    if ('theta' in postfilter) and (postfilter['theta'] == 0):
        postfilter.pop('theta', None)

    # Prepocess Atilde, the strength matrix
    if Atilde is None:
        Atilde = sparse.csr_matrix(
            (np.ones(len(A.indices)), A.indices.copy(), A.indptr.copy()),
            shape=(A.shape[0] / A.blocksize[0], A.shape[1] / A.blocksize[1]))

    # If Atilde has no nonzeros, then return T
    if min(T.nnz, A.nnz) == 0:
        return T

    # Expand allowed sparsity pattern for P through multiplication by Atilde
    if degree > 0:

        # Construct Sparsity_Pattern by multiplying with Atilde
        T.sort_indices()
        shape = (int(T.shape[0] / T.blocksize[0]),
                 int(T.shape[1] / T.blocksize[1]))
        Sparsity_Pattern = sparse.csr_matrix(
            (np.ones(T.indices.shape), T.indices, T.indptr), shape=shape)

        AtildeCopy = Atilde.copy()
        for i in range(degree):
            cost[0] += mat_mat_complexity(AtildeCopy,
                                          Sparsity_Pattern) / float(A.nnz)
            Sparsity_Pattern = AtildeCopy * Sparsity_Pattern

        # Optional filtering of sparsity pattern before smoothing
        #   - Complexity: two passes through T for theta-filter, a sort on
        #     each row for k-filter, adding matrices if both.
        if 'theta' in prefilter and 'k' in prefilter:
            temp_cost = [0.0]
            Sparsity_theta = filter_matrix_rows(Sparsity_Pattern,
                                                prefilter['theta'],
                                                cost=temp_cost)
            Sparsity_Pattern = truncate_rows(Sparsity_Pattern,
                                             prefilter['k'],
                                             cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
            # Union two sparsity patterns
            Sparsity_Pattern += Sparsity_theta
            cost[0] += Sparsity_Pattern.nnz / float(A.nnz)
        elif 'k' in prefilter:
            temp_cost = [0.0]
            Sparsity_Pattern = truncate_rows(Sparsity_Pattern,
                                             prefilter['k'],
                                             cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
        elif 'theta' in prefilter:
            temp_cost = [0.0]
            Sparsity_Pattern = filter_matrix_rows(Sparsity_Pattern,
                                                  prefilter['theta'],
                                                  cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
        elif len(prefilter) > 0:
            raise ValueError("Unrecognized prefilter option")

        # UnAmal returns a BSR matrix with 1's in the nonzero locations
        Sparsity_Pattern = UnAmal(Sparsity_Pattern, T.blocksize[0],
                                  T.blocksize[1])
        Sparsity_Pattern.sort_indices()

    else:
        # If degree is 0, just copy T for the sparsity pattern
        Sparsity_Pattern = T.copy()
        if 'theta' in prefilter and 'k' in prefilter:
            temp_cost = [0.0]
            Sparsity_theta = filter_matrix_rows(Sparsity_Pattern,
                                                prefilter['theta'],
                                                cost=temp_cost)
            Sparsity_Pattern = truncate_rows(Sparsity_Pattern,
                                             prefilter['k'],
                                             cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
            # Union two sparsity patterns
            Sparsity_Pattern += Sparsity_theta
            cost[0] += Sparsity_Pattern.nnz / float(A.nnz)
        elif 'k' in prefilter:
            temp_cost = [0.0]
            Sparsity_Pattern = truncate_rows(Sparsity_Pattern,
                                             prefilter['k'],
                                             cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
        elif 'theta' in prefilter:
            temp_cost = [0.0]
            Sparsity_Pattern = filter_matrix_rows(Sparsity_Pattern,
                                                  prefilter['theta'],
                                                  cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
        elif len(prefilter) > 0:
            raise ValueError("Unrecognized prefilter option")

        Sparsity_Pattern.data[:] = 1.0
        Sparsity_Pattern.sort_indices()

    # If using root nodes, enforce identity at C-points
    if Cpt_params[0]:
        Sparsity_Pattern = Cpt_params[1]['I_F'] * Sparsity_Pattern
        Sparsity_Pattern = Cpt_params[1]['P_I'] + Sparsity_Pattern
        cost[0] += Sparsity_Pattern.nnz / float(A.nnz)

    # Construct array of inv(Bi'Bi), where Bi is B restricted to row i's
    # sparsity pattern in Sparsity Pattern. This array is used multiple times
    # in Satisfy_Constraints(...).
    temp_cost = [0.0]
    BtBinv = compute_BtBinv(B, Sparsity_Pattern, cost=temp_cost)
    cost[0] += temp_cost[0] / float(A.nnz)

    # If using root nodes and B has more columns that A's blocksize, then
    # T must be updated so that T*B = Bfine.  Note, if this is a 'secondpass'
    # after dropping entries in P, then we must re-enforce the constraints
    if (Cpt_params[0] and
        (B.shape[1] > A.blocksize[0])) or ('secondpass' in postfilter):
        temp_cost = [0.0]
        T = filter_operator(T, Sparsity_Pattern, B, Bf, BtBinv, cost=temp_cost)
        cost[0] += temp_cost[0] / float(A.nnz)
        # Ensure identity at C-pts
        if Cpt_params[0]:
            T = Cpt_params[1]['I_F'] * T + Cpt_params[1]['P_I']

    # Iteratively minimize the energy of T subject to the constraints of
    # Sparsity_Pattern and maintaining T's effect on B, i.e. T*B =
    # (T+Update)*B, i.e. Update*B = 0
    if krylov == 'cg':
        T = cg_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern,
                                      maxiter, tol, weighting, Cpt_params,
                                      cost)
    elif krylov == 'cgnr':
        T = cgnr_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern,
                                        maxiter, tol, weighting, Cpt_params,
                                        cost)
    elif krylov == 'gmres':
        T = gmres_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern,
                                         maxiter, tol, weighting, Cpt_params,
                                         cost)

    T.eliminate_zeros()

    # Filter entries in P, only in the rootnode case, i.e., Cpt_params[0] == True
    if (len(postfilter) == 0) or ('secondpass'
                                  in postfilter) or (Cpt_params[0] is False):
        return T
    else:
        if 'theta' in postfilter and 'k' in postfilter:
            temp_cost = [0.0]
            T_theta = filter_matrix_rows(T,
                                         postfilter['theta'],
                                         cost=temp_cost)
            T_k = truncate_rows(T, postfilter['k'], cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)

            # Union two sparsity patterns
            T_theta.data[:] = 1.0
            T_k.data[:] = 1.0
            T_filter = T_theta + T_k
            T_filter.data[:] = 1.0
            T_filter = T.multiply(T_filter)

        elif 'k' in postfilter:
            temp_cost = [0.0]
            T_filter = truncate_rows(T, postfilter['k'], cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
        elif 'theta' in postfilter:
            temp_cost = [0.0]
            T_filter = filter_matrix_rows(T,
                                          postfilter['theta'],
                                          cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
        else:
            raise ValueError("Unrecognized postfilter option")

        # Re-smooth T_filter and re-fit the modes B into the span.
        # Note, we set 'secondpass', because this is the second
        # filtering pass
        T = energy_prolongation_smoother(A,
                                         T_filter,
                                         Atilde,
                                         B,
                                         Bf,
                                         Cpt_params,
                                         krylov=krylov,
                                         maxiter=1,
                                         tol=1e-8,
                                         degree=0,
                                         weighting=weighting,
                                         prefilter={},
                                         postfilter={'secondpass': True},
                                         cost=cost)

    return T
Esempio n. 18
0
def ruge_stuben_solver(A,
                       strength=('classical', {'theta': 0.25}),
                       CF='RS',
                       interpolation='direct',
                       restriction='galerkin',
                       presmoother=('gauss_seidel', {'sweep': 'symmetric'}),
                       postsmoother=('gauss_seidel', {'sweep': 'symmetric'}),
                       max_levels=10, max_coarse=10, keep=False, **kwargs):
    """Create a multilevel solver using Classical AMG (Ruge-Stuben AMG)

    Parameters
    ----------
    A : csr_matrix
        Square matrix in CSR format
    strength : ['symmetric', 'classical', 'evolution', 'distance',
                'algebraic_distance','affinity', 'energy_based', None]
        Method used to determine the strength of connection between unknowns
        of the linear system.  Method-specific parameters may be passed in
        using a tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If
        strength=None, all nonzero entries of the matrix are considered strong.
    CF : {string} : default 'RS'
        Method used for coarse grid selection (C/F splitting)
        Supported methods are RS, PMIS, PMISc, CLJP, CLJPc, and CR.
    interpolation : {string} : default 'direct'
        Method for interpolation. Options include 'direct', 'standard', 'injection',
        'one_point', and 'distance_two'.
    restriction : {string or dict} : default 'galerkin'
        'Galerkin' means set R := P^T for a Galerkin coarse-grid operator. Can also specify
        an interpolation method as above, to build the restriciton operator based on A^T. 
    presmoother : {string or dict}
        Method used for presmoothing at each level.  Method-specific parameters
        may be passed in using a tuple, e.g.
        presmoother=('gauss_seidel',{'sweep':'symmetric}), the default.
    postsmoother : {string or dict}
        Postsmoothing method with the same usage as presmoother
    max_levels: {integer} : default 10
        Maximum number of levels to be used in the multilevel solver.
    max_coarse: {integer} : default 500
        Maximum number of variables permitted on the coarse grid.
    keep: {bool} : default False
        Flag to indicate keeping extra operators in the hierarchy for
        diagnostics.  For example, if True, then strength of connection (C) and
        tentative prolongation (T) are kept.

    Returns
    -------
    ml : multilevel_solver
        Multigrid hierarchy of matrices and prolongation operators

    Other Parameters
    ----------------
    cycle_type : ['V','W','F']
        Structrure of multigrid cycle
    coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
        Solver used at the coarsest level of the MG hierarchy.
            Optionally, may be a tuple (fn, args), where fn is a string such as
        ['splu', 'lu', ...] or a callable function, and args is a dictionary of
        arguments to be passed to fn.
    setup_complexity : bool
        For a detailed, more accurate setup complexity, pass in 
        'setup_complexity' = True. This will slow down performance, but
        increase accuracy of complexity count. 

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg import ruge_stuben_solver
    >>> A = poisson((10,),format='csr')
    >>> ml = ruge_stuben_solver(A,max_coarse=3)

    Notes
    -----

    Standard interpolation is generally considered more robust than
    direct, but direct is the currently the default until our new 
    implementation of standard has been more rigorously tested.

    "coarse_solver" is an optional argument and is the solver used at the
    coarsest grid.  The default is a pseudo-inverse.  Most simply,
    coarse_solver can be one of ['splu', 'lu', 'cholesky, 'pinv',
    'gauss_seidel', ... ].  Additionally, coarse_solver may be a tuple
    (fn, args), where fn is a string such as ['splu', 'lu', ...] or a callable
    function, and args is a dictionary of arguments to be passed to fn.


    References
    ----------
    .. [1] Trottenberg, U., Oosterlee, C. W., and Schuller, A.,
       "Multigrid" San Diego: Academic Press, 2001.  Appendix A

    See Also
    --------
    aggregation.smoothed_aggregation_solver, multilevel_solver,
    aggregation.rootnode_solver

    """

    if ('setup_complexity' in kwargs):
        if kwargs['setup_complexity'] == True:
            mat_mat_complexity.__detailed__ = True
        del kwargs['setup_complexity']

    # Convert A to csr
    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        try:
            A = csr_matrix(A)
            warn("Implicit conversion of A to CSR", SparseEfficiencyWarning)
        except:
            raise TypeError('Argument A must have type csr_matrix, bsr_matrix, \
                             or be convertible to csr_matrix')
    
    # if isspmatrix_bsr(A):
    #     warn("Classical AMG is often more effective on CSR matrices.")

    # preprocess A
    A = A.asfptype()
    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    levels = [multilevel_solver.level()]
    levels[-1].A = A

    while len(levels) < max_levels and levels[-1].A.shape[0] > max_coarse:
        extend_hierarchy(levels, strength, CF, interpolation, restriction, keep)

    ml = multilevel_solver(levels, **kwargs)
    change_smoothers(ml, presmoother, postsmoother)
    return ml
Esempio n. 19
0
def distance_two_interpolation(A,
                               C,
                               splitting,
                               theta=None,
                               norm='min',
                               plus_i=True,
                               cost=[0]):
    """Create prolongator using distance-two AMG interpolation (extended+i interpolaton).

    Parameters
    ----------
    A : {csr_matrix}
        NxN matrix in CSR format
    C : {csr_matrix}
        Strength-of-Connection matrix
        Must have zero diagonal
    splitting : array
        C/F splitting stored in an array of length N
    theta : float in [0,1), default None
        theta value defining strong connections in a classical AMG sense. Provide if
        different SOC used for P than for CF-splitting; otherwise, theta = None. 
    norm : string, default 'abs'
        Norm used in redefining classical SOC. Options are 'min' and 'abs' for CSR matrices,
        and 'min', 'abs', and 'fro' for BSR matrices. See strength.py for more information.
    plus_i : bool, default True
        Use "Extended+i" interpolation from [0] as opposed to "Extended" interpolation. Typically
        gives better interpolation with minimal added expense.

    Returns
    -------
    P : {csr_matrix}
        Prolongator using standard interpolation

    References
    ----------
    [0] "Distance-Two Interpolation for Parallel Algebraic Multigrid,"
       H. De Sterck, R. Falgout, J. Nolting, U. M. Yang, (2007).

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.classical import standard_interpolation
    >>> import numpy as np
    >>> A = poisson((5,),format='csr')
    >>> splitting = np.array([1,0,1,0,1], dtype='intc')
    >>> P = standard_interpolation(A, A, splitting)
    >>> print P.todense()
    [[ 1.   0.   0. ]
     [ 0.5  0.5  0. ]
     [ 0.   1.   0. ]
     [ 0.   0.5  0.5]
     [ 0.   0.   1. ]]

    """
    if not isspmatrix_csr(C):
        raise TypeError('Expected csr_matrix SOC matrix, C.')

    nc = np.sum(splitting)
    n = A.shape[0]

    # Block BSR format. Transfer A to CSR and the splitting and SOC matrix to have
    # DOFs corresponding to CSR A
    if isspmatrix_bsr(A):
        temp_A = A.tocsr()
        splitting0 = splitting * np.ones((A.blocksize[0], 1), dtype='intc')
        splitting0 = np.reshape(splitting0, (np.prod(splitting0.shape), ),
                                order='F')
        if theta is not None:
            C0 = classical_strength_of_connection(A,
                                                  theta=theta,
                                                  norm=norm,
                                                  cost=cost)
            C0 = UnAmal(C0, A.blocksize[0], A.blocksize[1])
        else:
            C0 = UnAmal(C, A.blocksize[0], A.blocksize[1])
        C0 = C0.tocsr()
        C0.eliminate_zeros()

        # Interpolation weights are computed based on entries in A, but subject to
        # the sparsity pattern of C.  So, copy the entries of A into the
        # sparsity pattern of C.
        C0.data[:] = 1.0
        C0 = C0.multiply(temp_A)

        P_indptr = np.empty_like(temp_A.indptr)
        amg_core.distance_two_amg_interpolation_pass1(temp_A.shape[0],
                                                      C0.indptr, C0.indices,
                                                      splitting0, P_indptr)
        nnz = P_indptr[-1]
        P_colinds = np.empty(nnz, dtype=P_indptr.dtype)
        P_data = np.empty(nnz, dtype=temp_A.dtype)
        if plus_i:
            amg_core.extended_plusi_interpolation_pass2(
                temp_A.shape[0], temp_A.indptr, temp_A.indices, temp_A.data,
                C0.indptr, C0.indices, C0.data, splitting0, P_indptr,
                P_colinds, P_data)
        else:
            amg_core.extended_interpolation_pass2(temp_A.shape[0],
                                                  temp_A.indptr,
                                                  temp_A.indices, temp_A.data,
                                                  C0.indptr, C0.indices,
                                                  C0.data, splitting0,
                                                  P_indptr, P_colinds, P_data)
        nc = np.sum(splitting0)
        n = A.shape[0]
        P = csr_matrix((P_data, P_colinds, P_indptr), shape=[n, nc])
        return P.tobsr(blocksize=A.blocksize)

    # CSR format
    else:
        if theta is not None:
            C0 = classical_strength_of_connection(A,
                                                  theta=theta,
                                                  norm=norm,
                                                  cost=cost)
        else:
            C0 = C.copy()
        C0.eliminate_zeros()

        # Interpolation weights are computed based on entries in A, but subject to
        # the sparsity pattern of C.  So, copy the entries of A into the
        # sparsity pattern of C.
        C0.data[:] = 1.0
        C0 = C0.multiply(A)

        P_indptr = np.empty_like(A.indptr)
        amg_core.distance_two_amg_interpolation_pass1(A.shape[0], C0.indptr,
                                                      C0.indices, splitting,
                                                      P_indptr)
        nnz = P_indptr[-1]
        P_colinds = np.empty(nnz, dtype=P_indptr.dtype)
        P_data = np.empty(nnz, dtype=A.dtype)
        if plus_i:
            amg_core.extended_plusi_interpolation_pass2(
                A.shape[0], A.indptr, A.indices, A.data, C0.indptr, C0.indices,
                C0.data, splitting, P_indptr, P_colinds, P_data)
        else:
            amg_core.extended_interpolation_pass2(A.shape[0], A.indptr,
                                                  A.indices, A.data, C0.indptr,
                                                  C0.indices, C0.data,
                                                  splitting, P_indptr,
                                                  P_colinds, P_data)
        nc = np.sum(splitting)
        n = A.shape[0]
        return csr_matrix((P_data, P_colinds, P_indptr), shape=[n, nc])
def solver_diagnostics(A, solver=smoothed_aggregation_solver, 
        fname='solver_diagnostic', definiteness=None,
        symmetry=None, strength_list=None, aggregate_list=None,
        smooth_list=None, Bimprove_list=None, max_levels_list=None,
        cycle_list=None, krylov_list=None, prepostsmoother_list=None,
        B_list=None, coarse_size_list=None):
    ''' 
    Try many different different parameter combinations for
    smoothed_aggregation_solver(...).  The goal is to find appropriate SA
    parameter settings for the arbitrary matrix problem A x = 0 using a 
    random initial guess.
    
    Every combination of the input parameter lists is used to construct and
    test an SA solver.  Thus, be wary of the total number of solvers possible!
    For example for an SPD CSR matrix, the default parameter lists generate 60
    different smoothed aggregation solvers.

    Symmetry and definiteness are automatically detected, but it is safest to
    manually set these parameters through the ``definiteness" and ``symmetry"
    parameters.
    
    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix in CSR or BSR format
    
    solver : {smoothed_aggregation_solver, rootnode_solver}
        Solver to run diagnostic on.  Currently, these two solvers are supported.

    fname : {string}
        File name where the diagnostic results are dumped

        Default: solver_diagnostic.txt
    
    definiteness : {string}
        'positive' denotes positive definiteness
        'indefinite' denotes indefiniteness

        Default: detected with a few iterations of Arnoldi iteration
    
    symmetry : {string}
        'hermitian' or 'nonsymmetric', denoting the symmetry of the matrix

        Default: detected by testing if A induces an inner-product
    
    strength_list : {list} 
        List of various parameter choices for the strength argument sent to solver(...)
        
        Default:  [('symmetric', {'theta' : 0.0}), 
                   ('evolution', {'k':2, 'proj_type':'l2', 'epsilon':2.0}),
                   ('evolution', {'k':2, 'proj_type':'l2', 'epsilon':4.0})]
    
    aggregate_list : {list} 
        List of various parameter choices for the aggregate argument sent to solver(...)

        Default: ['standard']
    
    smooth_list : {list} 
        List of various parameter choices for the smooth argument sent to solver(...)

        Default depends on the symmetry and definiteness parameters:
        if definiteness == 'positive' and (symmetry=='hermitian' or symmetry=='symmetric'):
            ['jacobi', ('jacobi', {'filter' : True, 'weighting' : 'local'}),
            ('energy',{'krylov':'cg','maxiter':2, 'degree':1, 'weighting':'local'}),
            ('energy',{'krylov':'cg','maxiter':3, 'degree':2, 'weighting':'local'}),
            ('energy',{'krylov':'cg','maxiter':4, 'degree':3, 'weighting':'local'})]
        if definiteness == 'indefinite' or symmetry=='nonsymmetric':
           [('energy',{'krylov':'gmres','maxiter':2,'degree':1,'weighting':'local'}),
            ('energy',{'krylov':'gmres','maxiter':3,'degree':2,'weighting':'local'}),
            ('energy',{'krylov':'gmres','maxiter':3,'degree':3,'weighting':'local'})] 

    Bimprove_list : {list} 
        List of various parameter choices for the Bimprove argument sent to solver(...)

        Default: ['default', None]

    max_levels_list : {list} 
        List of various parameter choices for the max_levels argument sent to solver(...)
        
        Default: [25]
    
    cycle_list : {list} 
        List of various parameter choices for the cycle argument sent to solver.solve() 
        
        Default: ['V', 'W']
    
    krylov_list : {list} 
        List of various parameter choices for the krylov argument sent to
        solver.solve().  Basic form is (string, dict), where the string is a
        Krylov descriptor, e.g., 'cg' or 'gmres', and dict is a dictionary of
        parameters like tol and maxiter.  The dictionary dict may be empty.
      
        Default depends on the symmetry and definiteness parameters:
        if symmetry == 'nonsymmetric' or definiteness == 'indefinite':     
            [('gmres', {'tol':1e-8, 'maxiter':300})]
        else:
            [('cg', {'tol':1e-8, 'maxiter':300})]

    prepostsmoother_list : {list} 
        List of various parameter choices for the presmoother and postsmoother
        arguments sent to solver(...).  Basic form is 
        [ (presmoother_descriptor, postsmoother_descriptor), ...].
        
        Default depends on the symmetry parameter:
        if symmetry == 'nonsymmetric' or definiteness == 'indefinite':
            [ (('gauss_seidel_nr', {'sweep':'symmetric', 'iterations':2}),
               ('gauss_seidel_nr', {'sweep':'symmetric', 'iterations':2})) ]
        else:
            [ (('block_gauss_seidel',{'sweep':'symmetric','iterations':1}),
               ('block_gauss_seidel',{'sweep':'symmetric','iterations':1})) ]
        
    B_list : {list} 
        List of various B parameter choices for the B and BH arguments sent to
        solver(...).  Basic form is [ (B, BH, string), ...].  B is a vector of
        left near null-space modes used to generate prolongation, BH is a
        vector of right near null-space modes used to generate restriction, and
        string is a python command(s) that can generate your particular B and
        BH choice.  B and BH must have a row-size equal to the dimensionality
        of A.  string is only used in the automatically generated test script.

        Default depends on whether A is BSR:
        if A is CSR:
            B_list = [(ones((A.shape[0],1)), ones((A.shape[0],1)), 'B, BH are all ones')]
        if A is BSR:
            bsize = A.blocksize[0]
            B_list = [(ones((A.shape[0],1)), ones((A.shape[0],1)), 'B, BH are all ones'),
                      (kron(ones((A.shape[0]/bsize,1)), numpy.eye(bsize)), 
                       kron(ones((A.shape[0]/bsize,1)), numpy.eye(bsize)),
                       'B = kron(ones((A.shape[0]/A.blocksize[0],1), dtype=A.dtype), 
                                 eye(A.blocksize[0])); BH = B.copy()')]

    coarse_size_list : {list} 
        List of various tuples containing pairs of the (max_coarse, coarse_solver)
        parameters sent to solver(...).  

        Default: [ (300, 'pinv') ]

    Notes
    -----
    Only smoothed_aggregation_solver(...) and rootnode_solver(...) are
    supported.  The Ruge-Stuben solver framework is not used.
    
    60 total solvers are generated by the defaults for CSR SPD matrices.  For
    BSR SPD matrices, 120 total solvers are generated by the defaults.  A
    somewhat smaller number of total solvers is generated if the matrix is
    indefinite or nonsymmetric.  Every combination of the parameter lists is
    attempted.

    Generally, there are two types of parameter lists passed to this function.  
    Type 1 includes: cycle_list, strength_list, aggregate_list, smooth_list, 
                     krylov_list, Bimprove_list, max_levels_list
                     -------------------------------------------
                     Here, you pass in a list of different parameters, e.g., 
                     cycle_list=['V','W'].

    Type 2 includes: B_list, coarse_size_list, prepostsmoother_list 
                     -------------------------------------------
                     This is similar to Type 1, only these represent lists of
                     pairs of parameters, e.g., 
                     coarse_size_list=[ (300, 'pinv'), (5000, 'splu')], 
                     where coarse size_list is of the form 
                     [ (max_coarse, coarse_solver), ...].

    For detailed info on each of these parameter lists, see above.

    Returns
    -------
    Two files are written:
    (1) fname + ".py"
        Use the function defined here to generate and run the best 
        smoothed aggregation method found.  The only argument taken
        is a BSR/CSR matrix.
    (2) fname + ".txt"
        This file outputs the solver profile for each method 
        tried in a sorted table listing the best solver first.
        The detailed solver descriptions then follow the table.
    
    See Also
    --------
    smoothed_aggregation_solver

    Examples
    --------
    >>> from pyamg import gallery
    >>> from solver_diagnostics import *
    >>> A = gallery.poisson( (50,50), format='csr') 
    >>> solver_diagnostics(A, fname='isotropic_diffusion_diagnostics.txt', cycle_list=['V'])
    
    '''
    
    ##
    # Preprocess A
    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        try:
            A = csr_matrix(A)
            print 'Implicit conversion of A to CSR'
        except:
            raise TypeError('Argument A must have type csr_matrix or bsr_matrix,\
                             or be convertible to csr_matrix')
    #
    A = A.asfptype()
    #
    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')
    
    print "\nSearching for optimal smoothed aggregation method for (%d,%d) matrix"%A.shape
    print "    ..."
    
    ##
    # Detect symmetry
    if symmetry == None:
        if ishermitian(A, fast_check=True):
            symmetry = 'hermitian'
        else:
            symmetry = 'nonsymmetric'
        ##
        print "    Detected a " + symmetry + " matrix"
    else:
        print "    User specified a " + symmetry + " matrix"


    ##
    # Detect definiteness
    if definiteness == None:
        [EVect, Lambda, H, V, breakdown_flag] = _approximate_eigenvalues(A, 1e-6, 40)
        if Lambda.min() < 0.0:
            definiteness = 'indefinite'
            print "    Detected indefiniteness"
        else:
            definiteness = 'positive'
            print "    Detected positive definiteness"
    else:
        print "    User specified definiteness as " + definiteness 

    ##
    # Default B are (1) a vector of all ones, and 
    # (2) if A is BSR, the constant for each variable
    if B_list == None:
        B_list = [(ones((A.shape[0],1), dtype=A.dtype), 
                   ones((A.shape[0],1), dtype=A.dtype), 
                   'B = ones((A.shape[0],1), dtype=A.dtype); BH = B.copy()')]

        if isspmatrix_bsr(A) and A.blocksize[0] > 1:
            bsize = A.blocksize[0]
            B_list.append( (kron(ones((A.shape[0]/bsize,1), dtype=A.dtype),eye(bsize)), 
              kron(ones((A.shape[0]/bsize,1), dtype=A.dtype),eye(bsize)),
              'B = kron(ones((A.shape[0]/A.blocksize[0],1), dtype=A.dtype), eye(A.blocksize[0])); BH = B.copy()'))
    
    ##
    # Default is to try V- and W-cycles
    if cycle_list == None:
        cycle_list = ['V', 'W']

    ##
    # Default strength of connection values
    if strength_list == None:
        strength_list = [('symmetric', {'theta' : 0.0}),
                         ('evolution', {'k':2, 'proj_type':'l2', 'epsilon':2.0}),
                         ('evolution', {'k':2, 'proj_type':'l2', 'epsilon':4.0})]

    ##
    # Default aggregation strategies
    if aggregate_list is None:
        aggregate_list = ['standard']
    
    ##
    # Default prolongation smoothers
    if smooth_list == None:
        if definiteness == 'positive' and (symmetry=='hermitian' or symmetry=='symmetric'):
            if solver.func_name == 'smoothed_aggregation_solver':
                smooth_list = ['jacobi', ('jacobi', {'filter' : True, 'weighting' : 'local'})]
            else:
                smooth_list = []
            ##
            smooth_list.append( ('energy',{'krylov':'cg','maxiter':2,'degree':1,'weighting':'local'}) )
            smooth_list.append( ('energy',{'krylov':'cg','maxiter':3,'degree':2,'weighting':'local'}) )
            smooth_list.append( ('energy',{'krylov':'cg','maxiter':4,'degree':3,'weighting':'local'}) )
        elif definiteness == 'indefinite' or symmetry=='nonsymmetric':
            smooth_list =[('energy',{'krylov':'gmres','maxiter':2,'degree':1,'weighting':'local'}),
                          ('energy',{'krylov':'gmres','maxiter':3,'degree':2,'weighting':'local'}),
                          ('energy',{'krylov':'gmres','maxiter':4,'degree':3,'weighting':'local'})]
        else:
            raise ValueError('invalid string for definiteness and/or symmetry')

    ##
    # Default pre- and postsmoothers
    if prepostsmoother_list == None:
        if symmetry == 'nonsymmetric' or definiteness == 'indefinite':
            prepostsmoother_list = [ (('gauss_seidel_nr', {'sweep':'symmetric', 'iterations':2}),
                                      ('gauss_seidel_nr', {'sweep':'symmetric', 'iterations':2})) ]
        else:
            prepostsmoother_list= [ (('block_gauss_seidel',{'sweep':'symmetric','iterations':1}),
                                     ('block_gauss_seidel',{'sweep':'symmetric','iterations':1})) ]
    
    ##
    # Default Krylov wrapper
    if krylov_list == None:
        if symmetry == 'nonsymmetric' or definiteness == 'indefinite':
            krylov_list = [('gmres', {'tol':1e-8, 'maxiter':300})]
        else:
            krylov_list = [('cg', {'tol':1e-8, 'maxiter':300})]

    ##
    # Default Bimprove
    if Bimprove_list == None:
        Bimprove_list = ['default', None]

    ##
    # Default basic solver parameters
    if max_levels_list == None:
        max_levels_list = [25]
    if coarse_size_list == None:
        coarse_size_list = [ (300, 'pinv') ]
   
    ##
    # Setup for ensuing numerical tests
    # The results array will hold in each row, three values: 
    # iterations, operator complexity, and work per digit of accuracy
    num_test = len(cycle_list)*len(strength_list)*len(aggregate_list)*len(smooth_list)* \
               len(krylov_list)*len(Bimprove_list)*len(max_levels_list)*len(B_list)* \
               len(coarse_size_list)*len(prepostsmoother_list)
    results = zeros( (num_test,3) )
    solver_descriptors = []
    solver_args = []
    
    ##
    # Zero RHS and random initial guess
    random.seed(0)
    b = zeros( (A.shape[0],1), dtype=A.dtype)
    x0 = rand( A.shape[0], 1)
    if A.dtype == complex:
        x0 += 1.0j*rand( A.shape[0], 1)

    ##
    # Begin loops over parameter choices
    print "    ..."
    counter = -1
    for cycle in cycle_list:
        for krylov in krylov_list:
            for max_levels in max_levels_list:
                for max_coarse,coarse_solver in coarse_size_list:
                    for presmoother,postsmoother in prepostsmoother_list:
                        for B_index in range(len(B_list)): 
                            for strength in strength_list:
                                for aggregate in aggregate_list:
                                    for smooth in smooth_list:
                                        for Bimprove in Bimprove_list:
                                            
                                            counter += 1
                                            print "    Test %d out of %d"%(counter+1,num_test)
                                            
                                            ##
                                            # Grab B vectors
                                            B,BH,Bdescriptor = B_list[B_index]
                                            
                                            ##
                                            # Store this solver setup
                                            if krylov[1].has_key('tol'):
                                                tol = krylov[1]['tol']
                                            else:
                                                tol = 1e-6
                                            if krylov[1].has_key('maxiter'):
                                                maxiter = krylov[1]['maxiter']
                                            else:
                                                maxiter = 300
                                            ##
                                            descriptor = '  Solve phase arguments:' + '\n' \
                                                '    cycle = ' + str(cycle) + '\n' \
                                                '    krylov accel = ' + str(krylov[0]) + '\n' \
                                                '    tol = ' + str(tol) + '\n' \
                                                '    maxiter = ' + str(maxiter)+'\n'\
                                                '  Setup phase arguments:' + '\n' \
                                                '    max_levels = ' + str(max_levels) + '\n' \
                                                '    max_coarse = ' + str(max_coarse) + '\n' \
                                                '    coarse_solver = ' + str(coarse_solver)+'\n'\
                                                '    presmoother = ' + str(presmoother) + '\n' \
                                                '    postsmoother = ' + str(postsmoother) + '\n'\
                                                '    ' + Bdescriptor + '\n' \
                                                '    strength = ' + str(strength) + '\n' \
                                                '    aggregate = ' + str(aggregate) + '\n' \
                                                '    smooth = ' + str(smooth) + '\n' \
                                                '    Bimprove = ' + str(Bimprove) 
                                            solver_descriptors.append(descriptor)
                                            solver_args.append( {'cycle' : cycle, 
                                                'accel' : str(krylov[0]),
                                                'tol' : tol, 'maxiter' : maxiter, 
                                                'max_levels' : max_levels, 'max_coarse' : max_coarse,
                                                'coarse_solver' : coarse_solver, 'B_index' : B_index,
                                                'presmoother' : presmoother, 
                                                'postsmoother' : postsmoother,
                                                'strength' : strength, 'aggregate' : aggregate,
                                                'smooth' : smooth, 'Bimprove' : Bimprove} )
                                            
                                            ##
                                            # Construct solver
                                            try:
                                                sa = solver(A, B=B, BH=BH,
                                                        strength=strength,
                                                        smooth=smooth,
                                                        Bimprove=Bimprove,
                                                        aggregate=aggregate,
                                                        presmoother=presmoother,
                                                        max_levels=max_levels,
                                                        postsmoother=postsmoother,
                                                        max_coarse=max_coarse,
                                                        coarse_solver=coarse_solver)
                                            
                                                ##
                                                # Solve system
                                                residuals = []
                                                x = sa.solve(b, x0=x0, accel=krylov[0], 
                                                  cycle=cycle, tol=tol, maxiter=maxiter, 
                                                  residuals=residuals)

                                                ##
                                                # Store results: iters, operator complexity, and
                                                # work per digit-of-accuracy
                                                results[counter,0] = len(residuals)
                                                results[counter,1] = sa.operator_complexity()
                                                resid_rate = (residuals[-1]/residuals[0])**\
                                                             (1.0/(len(residuals)-1.))
                                                results[counter,2] = sa.cycle_complexity()/ \
                                                                     abs(log10(resid_rate))

                                            except:
                                                descriptor_indented = '      ' + \
                                                  descriptor.replace('\n', '\n      ')
                                                print"    --> Failed this test"
                                                print"    --> Solver descriptor is..."
                                                print descriptor_indented
                                                results[counter,:] = inf
    ##
    # Sort results and solver_descriptors according to work-per-doa
    indys = argsort(results[:,2])
    results = results[indys,:]
    solver_descriptors = list(array(solver_descriptors)[indys])
    solver_args = list(array(solver_args)[indys])

    ##
    # Create table from results and print to file
    table = [ ['solver #', 'iters', 'op complexity', 'work per DOA'] ]
    for i in range(results.shape[0]):
        if (results[i,:] == inf).all() == True:
            # in this case the test failed...
            table.append(['%d'%(i+1), 'err', 'err', 'err'])
        else:
            table.append(['%d'%(i+1),'%d'%results[i,0],'%1.1f'%results[i,1],'%1.1f'%results[i,2]])
    #
    fptr = open(fname+'.txt', 'w')
    fptr.write('****************************************************************\n' + \
               '*                Begin Solver Diagnostic Results               *\n' + \
               '*                                                              *\n' + \
               '*        \'\'solver #\'\' refers to below solver descriptors       *\n' + \
               '*                                                              *\n' + \
               '*        \'\'iters\'\' refers to iterations taken                  *\n' + \
               '*                                                              *\n' + \
               '*        \'\'op complexity\'\' refers to operator complexity       *\n' + \
               '*                                                              *\n' + \
               '*        \'\'work per DOA\'\' refers to work per digit of          *\n' + \
               '*          accuracy to solve the algebraic system, i.e. it     *\n' + \
               '*          measures the overall efficiency of the solver       *\n' + \
               '****************************************************************\n\n')
    fptr.write(print_table(table))

    ##
    # Now print each solver descriptor to file
    fptr.write('\n****************************************************************\n' + \
                 '*                 Begin Solver Descriptors                     \n' + \
                 '*       Solver used is ' + solver.func_name + '( )             \n' + \
                 '****************************************************************\n\n')

    for i in range(len(solver_descriptors)):
        fptr.write('Solver Descriptor %d\n'%(i+1))
        fptr.write(solver_descriptors[i])
        fptr.write(' \n \n')
    
    fptr.close()
    
    ##
    # Now write a function definition file that generates the "best" solver
    fptr = open(fname + '.py', 'w')
    # Helper function for file writing
    def to_string(a):
        if type(a) == type((1,)):   return(str(a))
        elif type(a) == type('s'):  return("\"%s\""%a)
        else: return str(a)
    #
    fptr.write('#######################################################################\n')
    fptr.write('# Function definition automatically generated by solver_diagnostics.py\n')
    fptr.write('#\n')
    fptr.write('# Use the function defined here to generate and run the best\n')
    fptr.write('# smoothed aggregation method found by solver_diagnostics(...).\n')
    fptr.write('# The only argument taken is a CSR/BSR matrix.\n')
    fptr.write('#\n')
    fptr.write('# To run:  >>> # User must load/generate CSR/BSR matrix A\n')
    fptr.write('#          >>> from ' + fname + ' import ' + fname + '\n' )
    fptr.write('#          >>> ' + fname + '(A)' + '\n')
    fptr.write('#######################################################################\n\n')
    fptr.write('from pyamg import ' + solver.func_name + '\n')
    fptr.write('from pyamg.util.linalg import norm\n') 
    fptr.write('from numpy import ones, array, arange, zeros, abs, random\n') 
    fptr.write('from scipy import rand, ravel, log10, kron, eye\n') 
    fptr.write('from scipy.io import loadmat\n') 
    fptr.write('from scipy.sparse import isspmatrix_bsr, isspmatrix_csr\n') 
    fptr.write('import pylab\n\n')
    fptr.write('def ' + fname + '(A):\n') 
    fptr.write('    ##\n    # Generate B\n')
    fptr.write('    ' + B_list[B_index][2] + '\n\n')
    fptr.write('    ##\n    # Random initial guess, zero right-hand side\n')
    fptr.write('    random.seed(0)\n')
    fptr.write('    b = zeros((A.shape[0],1))\n')
    fptr.write('    x0 = rand(A.shape[0],1)\n\n')
    fptr.write('    ##\n    # Create solver\n')
    fptr.write('    ml = ' + solver.func_name + '(A, B=B, BH=BH,\n' + \
               '        strength=%s,\n'%to_string(solver_args[0]['strength']) + \
               '        smooth=%s,\n'%to_string(solver_args[0]['smooth']) + \
               '        Bimprove=%s,\n'%to_string(solver_args[0]['Bimprove']) + \
               '        aggregate=%s,\n'%to_string(solver_args[0]['aggregate']) + \
               '        presmoother=%s,\n'%to_string(solver_args[0]['presmoother']) + \
               '        postsmoother=%s,\n'%to_string(solver_args[0]['postsmoother']) + \
               '        max_levels=%s,\n'%to_string(solver_args[0]['max_levels']) + \
               '        max_coarse=%s,\n'%to_string(solver_args[0]['max_coarse']) + \
               '        coarse_solver=%s)\n\n'%to_string(solver_args[0]['coarse_solver']) ) 
    fptr.write('    ##\n    # Solve system\n')
    fptr.write('    res = []\n')
    fptr.write('    x = ml.solve(b, x0=x0, tol=%s, residuals=res, accel=%s, maxiter=%s, cycle=%s)\n'%\
              (to_string(solver_args[0]['tol']),
               to_string(solver_args[0]['accel']),
               to_string(solver_args[0]['maxiter']),
               to_string(solver_args[0]['cycle'])) ) 
    fptr.write('    res_rate = (res[-1]/res[0])**(1.0/(len(res)-1.))\n')
    fptr.write('    normr0 = norm(ravel(b) - ravel(A*x0))\n')
    fptr.write('    print " "\n')
    fptr.write('    print ml\n')
    fptr.write("    print \"System size:                \" + str(A.shape)\n")
    fptr.write("    print \"Avg. Resid Reduction:       %1.2f\"%res_rate\n")
    fptr.write("    print \"Iterations:                 %d\"%len(res)\n")
    fptr.write("    print \"Operator Complexity:        %1.2f\"%ml.operator_complexity()\n")
    fptr.write("    print \"Work per DOA:               %1.2f\"%(ml.cycle_complexity()/abs(log10(res_rate)))\n")
    fptr.write("    print \"Relative residual norm:     %1.2e\"%(norm(ravel(b) - ravel(A*x))/normr0)\n\n")
    fptr.write('    ##\n    # Plot residual history\n')
    fptr.write('    pylab.semilogy(array(res)/normr0)\n') 
    fptr.write('    pylab.title(\'Residual Histories\')\n')
    fptr.write('    pylab.xlabel(\'Iteration\')\n')
    fptr.write('    pylab.ylabel(\'Relative Residual Norm\')\n')
    fptr.write('    pylab.show()\n\n')
    # Close file pointer
    fptr.close()

    print "    ..."
    print "    --> Diagnostic Results located in " + fname + '.txt'
    print "    ..."
    print "    --> See automatically generated function definition\n" + \
          "        ./" + fname + ".py.\n\n" + \
          "        Use the function defined here to generate and run the best\n" + \
          "        smoothed aggregation method found.  The only argument taken\n" + \
          "        is a CSR/BSR matrix.\n\n" + \
          "        To run: >>> # User must load/generate CSR/BSR matrix A\n" + \
          "                >>> from " + fname + " import " + fname + "\n" + \
          "                >>> " + fname + "(A)"
Esempio n. 21
0
def classical_strength_of_connection(A, theta=0.25, block=None, norm='abs', cost=[0]):
    """
    Return a strength of connection matrix using the classical AMG measure
    An off-diagonal entry A[i,j] is a strong connection iff::

            | A[i,j] | >= theta * max(| A[i,k] |), where k != i     (norm='abs')
             -A[i,j]   >= theta * max(| A[i,k] |), where k != i     (norm='min')

    Parameters
    ----------
    A : csr_matrix or bsr_matrix
        Square, sparse matrix in CSR or BSR format
    theta : float
        Threshold parameter in [0,1].
    block : string, default None for CSR matrix and 'block' for BSR matrix
        How to treat block structure of A:
            None         : Compute SOC based on A as CSR matrix.
            'block'      : Compute SOC based on norm of blocks of A.
            'amalgamate' : Compute SOC based on A as CSR matrix, then compute
                           norm of blocks in SOC matrix for a block SOC. 
    norm : 'string', default 'abs'
        Option to compute SOC between elements or blocks: 
            'abs'  : C_ij = k, where k is the maximum absolute value in block C_ij
            'min'  : C_ij = k, where k is the minimum (negative) value in block C_ij
            'fro'  : C_ij = k, where k is the Frobenius norm of block C_ij
                - Only valid for block matrices, block='block'

    Returns
    -------
    S : csr_matrix
        Matrix graph defining strong connections.  S[i,j]=1 if vertex i
        is strongly influenced by vertex j.

    See Also
    --------
    symmetric_strength_of_connection : symmetric measure used in SA
    evolution_strength_of_connection : relaxation based strength measure

    Notes
    -----
    - A symmetric A does not necessarily yield a symmetric strength matrix S
    - Calls C++ function classical_strength_of_connection

    References
    ----------

    .. [1] Briggs, W. L., Henson, V. E., McCormick, S. F., "A multigrid
       tutorial", Second edition. Society for Industrial and Applied
       Mathematics (SIAM), Philadelphia, PA, 2000. xii+193 pp.
       ISBN: 0-89871-462-1

    .. [2] Trottenberg, U., Oosterlee, C. W., Schuller, A., "Multigrid",
       Academic Press, Inc., San Diego, CA, 2001. xvi+631 pp.
       ISBN: 0-12-701070-X

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import classical_strength_of_connection
    >>> n=3
    >>> stencil = np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = classical_strength_of_connection(A, 0.0)

    """
    if (theta < 0 or theta > 1):
        raise ValueError('expected theta in [0,1]')

    if (theta < 0 or theta > 1):
        raise ValueError('expected theta in [0,1]')

    if sparse.isspmatrix_bsr(A):
        blocksize = A.blocksize[0]
    else:
        blocksize = 1

    # Block structure considered before computing SOC
    if (block == 'block') or sparse.isspmatrix_bsr(A):
        R, C = A.blocksize
        if (R != C) or (R < 1):
            raise ValueError('Matrix must have square blocks')

        N = int(A.shape[0] / R)

        # SOC based on maximum absolute value element in each block
        if norm == 'abs':
            data = np.max(np.max(np.abs(A.data),axis=1),axis=1)
            cost[0] += 1
        # SOC based on hard minimum of entry in each off-diagonal block
        elif norm == 'min':
            data = np.min(np.min(A.data,axis=1),axis=1)
            cost[0] += 1
        # SOC based on Frobenius norms of blocks
        elif norm == 'fro':
            data = (np.conjugate(A.data) * A.data).reshape(-1, R*C).sum(axis=1)
            cost[0] += 1
        else:
            raise ValueError("Invalid choice of norm.")

        data[np.abs(data)<1e-16] = 0.0
        S_rowptr = np.empty_like(A.indptr)
        S_colinds = np.empty_like(A.indices)
        S_data = np.empty_like(data)

        if norm == 'abs' or norm == 'fro':
            amg_core.classical_strength_of_connection_abs(N, theta, A.indptr, A.indices, data,
                                                          S_rowptr, S_colinds, S_data)
        elif norm == 'min':
            amg_core.classical_strength_of_connection_min(N, theta, A.indptr, A.indices, data,
                                                          S_rowptr, S_colinds, S_data)
        else:  
            raise ValueError("Unrecognized option for norm.")
    
        # One pass through nnz to find largest entry, one to filter
        S = sparse.csr_matrix((S_data, S_colinds, S_rowptr), shape=[N, N])
        cost[0] += 2
        
        # Take magnitude and scale by largest entry
        S.data = np.abs(S.data)
        S = scale_rows_by_largest_entry(S)
        S.eliminate_zeros()

        # Assume largest entry can be tracked from filtering.
        # 1 WU to scale matrix. 
        cost[0] += float(S.nnz) / A.nnz 

        return S

    # SOC computed based on A as CSR
    else:
        S_rowptr = np.empty_like(A.indptr)
        S_colinds = np.empty_like(A.indices)
        S_data = np.empty_like(A.data)

        if norm == 'abs' or norm == 'fro':
            amg_core.classical_strength_of_connection_abs(A.shape[0], theta, A.indptr,
                                                          A.indices, A.data, S_rowptr, S_colinds, S_data)
        elif norm == 'min':
            amg_core.classical_strength_of_connection_min(A.shape[0], theta, A.indptr,
                                                          A.indices, A.data, S_rowptr, S_colinds, S_data)
        else:  
            raise ValueError("Unrecognized option for norm.")

        # One pass through nnz to find largest entry, one to filter
        S = sparse.csr_matrix((S_data, S_colinds, S_rowptr), shape=A.shape)
        cost[0] += 2

        if blocksize > 1 and block == 'amalgamate':
            S = amalgamate(S, blocksize, norm=norm)

        # Take magnitude and scale by largest entry
        S.data = np.abs(S.data)
        S = scale_rows_by_largest_entry(S)
        S.eliminate_zeros()

        # Assume largest entry can be tracked from filtering.
        # 1 WU to scale matrix. 
        cost[0] += float(S.nnz) / A.nnz 

        return S
Esempio n. 22
0
def rootnode_solver(A, B=None, BH=None,
                    symmetry='hermitian', strength='symmetric',
                    aggregate='standard', smooth='energy',
                    presmoother=('block_gauss_seidel',
                                 {'sweep': 'symmetric'}),
                    postsmoother=('block_gauss_seidel',
                                  {'sweep': 'symmetric'}),
                    improve_candidates=('block_gauss_seidel',
                                        {'sweep': 'symmetric',
                                         'iterations': 4}),
                    max_levels=10, max_coarse=10,
                    diagonal_dominance=False, keep=False, **kwargs):
    """Create a multilevel solver using root-node based Smoothed Aggregation (SA).

    See the notes below, for the major differences with the classical-style
    smoothed aggregation solver in aggregation.smoothed_aggregation_solver.

    Parameters
    ----------
    A : csr_matrix, bsr_matrix
        Sparse NxN matrix in CSR or BSR format

    B : None, array_like
        Right near-nullspace candidates stored in the columns of an NxK array.
        K must be >= the blocksize of A (see reference [2011OlScTu]_). The default value
        B=None is equivalent to choosing the constant over each block-variable,
        B=np.kron(np.ones((A.shape[0]/blocksize(A), 1)), np.eye(blocksize(A)))

    BH : None, array_like
        Left near-nullspace candidates stored in the columns of an NxK array.
        BH is only used if symmetry is 'nonsymmetric'.  K must be >= the
        blocksize of A (see reference [2011OlScTu]_). The default value B=None is
        equivalent to choosing the constant over each block-variable,
        B=np.kron(np.ones((A.shape[0]/blocksize(A), 1)), np.eye(blocksize(A)))

    symmetry : string
        'symmetric' refers to both real and complex symmetric
        'hermitian' refers to both complex Hermitian and real Hermitian
        'nonsymmetric' i.e. nonsymmetric in a hermitian sense
        Note that for the strictly real case, symmetric and hermitian are
        the same
        Note that this flag does not denote definiteness of the operator.

    strength : list
        Method used to determine the strength of connection between unknowns of
        the linear system.  Method-specific parameters may be passed in using a
        tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
        all nonzero entries of the matrix are considered strong.

    aggregate : list
        Method used to aggregate nodes.

    smooth : list
        Method used to smooth the tentative prolongator.  Method-specific
        parameters may be passed in using a tuple, e.g.  smooth=
        ('energy',{'krylov' : 'gmres'}).  Only 'energy' and None are valid
        prolongation smoothing options.

    presmoother : tuple, string, list
        Defines the presmoother for the multilevel cycling.  The default block
        Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
        is CSR or is a BSR matrix with blocksize of 1.  See notes below for
        varying this parameter on a per level basis.

    postsmoother : tuple, string, list
        Same as presmoother, except defines the postsmoother.

    improve_candidates : tuple, string, list
        The ith entry defines the method used to improve the candidates B on
        level i.  If the list is shorter than max_levels, then the last entry
        will define the method for all levels lower.  If tuple or string, then
        this single relaxation descriptor defines improve_candidates on all
        levels.
        The list elements are relaxation descriptors of the form used for
        presmoother and postsmoother.  A value of None implies no action on B.

    max_levels : integer
        Maximum number of levels to be used in the multilevel solver.

    max_coarse : integer
        Maximum number of variables permitted on the coarse grid.

    diagonal_dominance : bool, tuple
        If True (or the first tuple entry is True), then avoid coarsening
        diagonally dominant rows.  The second tuple entry requires a
        dictionary, where the key value 'theta' is used to tune the diagonal
        dominance threshold.

    keep : bool
        Flag to indicate keeping extra operators in the hierarchy for
        diagnostics.  For example, if True, then strength of connection (C),
        tentative prolongation (T), aggregation (AggOp), and arrays
        storing the C-points (Cpts) and F-points (Fpts) are kept at
        each level.

    Other Parameters
    ----------------
    cycle_type : ['V','W','F']
        Structrure of multigrid cycle
    coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
        Solver used at the coarsest level of the MG hierarchy.
        Optionally, may be a tuple (fn, args), where fn is a string such as
        ['splu', 'lu', ...] or a callable function, and args is a dictionary of
        arguments to be passed to fn.

    Returns
    -------
    ml : multilevel_solver
        Multigrid hierarchy of matrices and prolongation operators

    See Also
    --------
    multilevel_solver, aggregation.smoothed_aggregation_solver,
    classical.ruge_stuben_solver

    Notes
    -----
         - Root-node style SA differs from classical SA primarily by preserving
           and identity block in the interpolation operator, P.  Each aggregate
           has a "root-node" or "center-node" associated with it, and this
           root-node is injected from the coarse grid to the fine grid.  The
           injection corresponds to the identity block.

         - Only smooth={'energy', None} is supported for prolongation
           smoothing.  See reference [2011OlScTu]_ below for more details on why the
           'energy' prolongation smoother is the natural counterpart to
           root-node style SA.

         - The additional parameters are passed through as arguments to
           multilevel_solver.  Refer to pyamg.multilevel_solver for additional
           documentation.

         - At each level, four steps are executed in order to define the coarser
           level operator.

           1. Matrix A is given and used to derive a strength matrix, C.

           2. Based on the strength matrix, indices are grouped or aggregated.

           3. The aggregates define coarse nodes and a tentative prolongation
              operator T is defined by injection

           4. The tentative prolongation operator is smoothed by a relaxation
              scheme to improve the quality and extent of interpolation from the
              aggregates to fine nodes.

         - The parameters smooth, strength, aggregate, presmoother, postsmoother
           can be varied on a per level basis.  For different methods on
           different levels, use a list as input so that the i-th entry defines
           the method at the i-th level.  If there are more levels in the
           hierarchy than list entries, the last entry will define the method
           for all levels lower.

           Examples are:
           smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
           presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
           aggregate=['standard', 'naive']
           strength=[('symmetric', {'theta':0.25}), ('symmetric', {'theta':0.08})]

         - Predefined strength of connection and aggregation schemes can be
           specified.  These options are best used together, but aggregation can
           be predefined while strength of connection is not.

           For predefined strength of connection, use a list consisting of
           tuples of the form ('predefined', {'C' : C0}), where C0 is a
           csr_matrix and each degree-of-freedom in C0 represents a supernode.
           For instance to predefine a three-level hierarchy, use
           [('predefined', {'C' : C0}), ('predefined', {'C' : C1}) ].

           Similarly for predefined aggregation, use a list of tuples.  For
           instance to predefine a three-level hierarchy, use [('predefined',
           {'AggOp' : Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the
           dimensions of A, Agg0 and Agg1 are compatible, i.e.  Agg0.shape[1] ==
           A.shape[0] and Agg1.shape[1] == Agg0.shape[0].  Each AggOp is a
           csr_matrix.

           Because this is a root-nodes solver, if a member of the predefined
           aggregation list is predefined, it must be of the form
           ('predefined', {'AggOp' : Agg, 'Cnodes' : Cnodes}).

    Examples
    --------
    >>> from pyamg import rootnode_solver
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse.linalg import cg
    >>> import numpy as np
    >>> A = poisson((100, 100), format='csr')           # matrix
    >>> b = np.ones((A.shape[0]))                   # RHS
    >>> ml = rootnode_solver(A)                     # AMG solver
    >>> M = ml.aspreconditioner(cycle='V')             # preconditioner
    >>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M)   # solve with CG

    References
    ----------
    .. [1996VaMa] Vanek, P. and Mandel, J. and Brezina, M.,
       "Algebraic Multigrid by Smoothed Aggregation for
       Second and Fourth Order Elliptic Problems",
       Computing, vol. 56, no. 3, pp. 179--196, 1996.
       http://citeseer.ist.psu.edu/vanek96algebraic.html
    .. [2011OlScTu] Olson, L. and Schroder, J. and Tuminaro, R.,
       "A general interpolation strategy for algebraic
       multigrid using energy minimization", SIAM Journal
       on Scientific Computing (SISC), vol. 33, pp.
       966--991, 2011.

    """
    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        try:
            A = csr_matrix(A)
            warn("Implicit conversion of A to CSR",
                 SparseEfficiencyWarning)
        except BaseException:
            raise TypeError('Argument A must have type csr_matrix, \
                             bsr_matrix, or be convertible to csr_matrix')

    A = A.asfptype()

    if (symmetry != 'symmetric') and (symmetry != 'hermitian') and \
            (symmetry != 'nonsymmetric'):
        raise ValueError('expected \'symmetric\', \'nonsymmetric\' \
                          or \'hermitian\' for the symmetry parameter ')
    A.symmetry = symmetry

    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')
    # Right near nullspace candidates use constant for each variable as default
    if B is None:
        B = np.kron(np.ones((int(A.shape[0]/blocksize(A)), 1), dtype=A.dtype),
                    np.eye(blocksize(A)))
    else:
        B = np.asarray(B, dtype=A.dtype)
        if len(B.shape) == 1:
            B = B.reshape(-1, 1)
        if B.shape[0] != A.shape[0]:
            raise ValueError('The near null-space modes B have incorrect \
                              dimensions for matrix A')
        if B.shape[1] < blocksize(A):
            raise ValueError('B.shape[1] must be >= the blocksize of A')

    # Left near nullspace candidates
    if A.symmetry == 'nonsymmetric':
        if BH is None:
            BH = B.copy()
        else:
            BH = np.asarray(BH, dtype=A.dtype)
            if len(BH.shape) == 1:
                BH = BH.reshape(-1, 1)
            if BH.shape[1] != B.shape[1]:
                raise ValueError('The number of left and right near \
                                  null-space modes B and BH, must be equal')
            if BH.shape[0] != A.shape[0]:
                raise ValueError('The near null-space modes BH have \
                                  incorrect dimensions for matrix A')

    # Levelize the user parameters, so that they become lists describing the
    # desired user option on each level.
    max_levels, max_coarse, strength =\
        levelize_strength_or_aggregation(strength, max_levels, max_coarse)
    max_levels, max_coarse, aggregate =\
        levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
    improve_candidates =\
        levelize_smooth_or_improve_candidates(improve_candidates, max_levels)
    smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)

    # Construct multilevel structure
    levels = []
    levels.append(multilevel_solver.level())
    levels[-1].A = A          # matrix

    # Append near nullspace candidates
    levels[-1].B = B          # right candidates
    if A.symmetry == 'nonsymmetric':
        levels[-1].BH = BH    # left candidates

    while len(levels) < max_levels and \
            int(levels[-1].A.shape[0]/blocksize(levels[-1].A)) > max_coarse:
        extend_hierarchy(levels, strength, aggregate, smooth,
                         improve_candidates, diagonal_dominance, keep)

    ml = multilevel_solver(levels, **kwargs)
    change_smoothers(ml, presmoother, postsmoother)
    return ml
Esempio n. 23
0
 def blocksize(A):
     # Helper Function: return the blocksize of a matrix 
     if isspmatrix_bsr(A):
         return A.blocksize[0]
     else:
         return 1
Esempio n. 24
0
def extend_hierarchy(levels, strength, CF, interp, restrict, filter_operator,
                     coarse_grid_P, coarse_grid_R, keep):
    """ helper function for local methods """

    # Filter operator. Need to keep original matrix on fineest level for
    # computing residuals
    if (filter_operator is not None) and (filter_operator[1] != 0): 
        if len(levels) == 1:
            A = deepcopy(levels[-1].A)
        else:
            A = levels[-1].A
        filter_matrix_rows(A, filter_operator[1], diagonal=True, lump=filter_operator[0])
    else:
        A = levels[-1].A

    # Check if matrix was filtered to be diagonal --> coarsest grid
    if A.nnz == A.shape[0]:
        return 1

    # Zero initial complexities for strength, splitting and interpolation
    levels[-1].complexity['CF'] = 0.0
    levels[-1].complexity['strength'] = 0.0
    levels[-1].complexity['interpolate'] = 0.0

    # Compute the strength-of-connection matrix C, where larger
    # C[i,j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength)
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        C = evolution_strength_of_connection(A, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))
    levels[-1].complexity['strength'] += kwargs['cost'][0] * A.nnz / float(A.nnz)

    # Generate the C/F splitting
    fn, kwargs = unpack_arg(CF)
    if fn == 'RS':
        splitting = RS(C, **kwargs)
    elif fn == 'PMIS':
        splitting = PMIS(C, **kwargs)
    elif fn == 'PMISc':
        splitting = PMISc(C, **kwargs)
    elif fn == 'CLJP':
        splitting = CLJP(C, **kwargs)
    elif fn == 'CLJPc':
        splitting = CLJPc(C, **kwargs)
    elif fn == 'CR':
        splitting = CR(C, **kwargs)
    elif fn == 'weighted_matching':
        splitting, soc = weighted_matching(C, **kwargs)
        if soc is not None:
            C = soc
    else:
        raise ValueError('unknown C/F splitting method (%s)' % CF)
    levels[-1].complexity['CF'] += kwargs['cost'][0] * C.nnz / float(A.nnz)
    temp = np.sum(splitting)
    if (temp == len(splitting)) or (temp == 0):
        return 1

    # Generate the interpolation matrix that maps from the coarse-grid to the
    # fine-grid
    r_flag = False
    fn, kwargs = unpack_arg(interp)
    if fn == 'standard':
        P = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'inject':
        P = injection_interpolation(A, splitting, **kwargs)
    elif fn == 'neumann':
        P = neumann_ideal_interpolation(A, splitting, **kwargs)
    elif fn == 'scaledAfc':
        P = scaled_Afc_interpolation(A, splitting, **kwargs)
    elif fn == 'air':
        if isspmatrix_bsr(A):
            temp_A = bsr_matrix(A.T)
            P = local_AIR(temp_A, splitting, **kwargs)
            P = bsr_matrix(P.T)
        else:
            temp_A = csr_matrix(A.T)
            P = local_AIR(temp_A, splitting, **kwargs)
            P = csr_matrix(P.T)
    elif fn == 'restrict':
        r_flag = True
    else:
        raise ValueError('unknown interpolation method (%s)' % interp)
    levels[-1].complexity['interpolate'] += kwargs['cost'][0] * A.nnz / float(A.nnz)

    # Build restriction operator
    fn, kwargs = unpack_arg(restrict)
    if fn is None:
        R = P.T
    elif fn == 'air':
        R = local_AIR(A, splitting, **kwargs)
    elif fn == 'neumann':
        R = neumann_AIR(A, splitting, **kwargs)
    elif fn == 'one_point':         # Don't need A^T here
        temp_C = C.T.tocsr()
        R = one_point_interpolation(A, temp_C, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R = R.T.tobsr()
        else:
            R = R.T.tocsr()
    elif fn == 'inject':            # Don't need A^T or C^T here
        R = injection_interpolation(A, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R = R.T.tobsr()
        else:
            R = R.T.tocsr()
    elif fn == 'standard':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        else: 
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    elif fn == 'distance_two':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        else: 
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    elif fn == 'direct':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()        
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    else:
        raise ValueError('unknown restriction method (%s)' % restrict)

    # If set P = R^T
    if r_flag:
        P = R.T

    # Optional different interpolation for RAP
    fn, kwargs = unpack_arg(coarse_grid_P)
    if fn == 'standard':
        P_temp = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P_temp = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P_temp = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P_temp = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'inject':
        P_temp = injection_interpolation(A, splitting, **kwargs)
    elif fn == 'neumann':
        P_temp = neumann_ideal_interpolation(A, splitting, **kwargs)
    elif fn == 'air':
        if isspmatrix_bsr(A): 
            temp_A = bsr_matrix(A.T)
            P_temp = local_AIR(temp_A, splitting, **kwargs)
            P_temp = bsr_matrix(P_temp.T)
        else:
            temp_A = csr_matrix(A.T)
            P_temp = local_AIR(temp_A, splitting, **kwargs)
            P_temp = csr_matrix(P_temp.T)
    else:
        P_temp = P

    # Optional different restriction for RAP
    fn, kwargs = unpack_arg(coarse_grid_R)
    if fn == 'air':
        R_temp = local_AIR(A, splitting, **kwargs)
    elif fn == 'neumann':
        R_temp = neumann_AIR(A, splitting, **kwargs)
    elif fn == 'one_point':         # Don't need A^T here
        temp_C = C.T.tocsr()
        R_temp = one_point_interpolation(A, temp_C, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R_temp = R_temp.T.tobsr()
        else:
            R_temp = R_temp.T.tocsr()
    elif fn == 'inject':            # Don't need A^T or C^T here
        R_temp = injection_interpolation(A, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R_temp = R_temp.T.tobsr()
        else:
            R_temp = R_temp.T.tocsr()
    elif fn == 'standard':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tobsr()
        else: 
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tocsr()
    elif fn == 'distance_two':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tobsr()
        else: 
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tocsr()
    elif fn == 'direct':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tobsr()        
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tocsr()
    else:
        R_temp = R

    # Store relevant information for this level
    if keep:
        levels[-1].C = C              # strength of connection matrix

    levels[-1].P = P                  # prolongation operator
    levels[-1].R = R                  # restriction operator
    levels[-1].splitting = splitting  # C/F splitting

    # Form coarse grid operator, get complexity
    #levels[-1].complexity['RAP'] = mat_mat_complexity(R_temp,A) / float(A.nnz)
    #RA = R_temp * A
    #levels[-1].complexity['RAP'] += mat_mat_complexity(RA,P_temp) / float(A.nnz)
    #A = RA * P_temp
    
    # RL: RAP = R*(A*P)
    levels[-1].complexity['RAP'] = mat_mat_complexity(A, P_temp) / float(A.nnz)
    AP = A * P_temp
    levels[-1].complexity['RAP'] += mat_mat_complexity(R_temp, AP) / float(A.nnz)
    A = R_temp * AP
    

    # Make sure coarse-grid operator is in correct sparse format
    if (isspmatrix_csr(P) and (not isspmatrix_csr(A))):
        A = A.tocsr()
    elif (isspmatrix_bsr(P) and (not isspmatrix_bsr(A))):
        A = A.tobsr()

    A.eliminate_zeros()
    levels.append(multilevel_solver.level())
    levels[-1].A = A
    return 0
Esempio n. 25
0
def solver_configuration(A, B=None, verb=True):
    """
    Given an arbitrary matrix A, generate a dictionary of parameters with
    which to generate a smoothed_aggregation_solver.

    Parameters
    ----------
    A : {array, matrix, csr_matrix, bsr_matrix}
        (n x n) matrix to invert, CSR or BSR format preferred for efficiency
    B : {None, array}
        Near null-space modes used to construct the smoothed aggregation solver
        If None, the constant vector is used
        If (n x m) array, then B is passed to smoothed_aggregation_solver
    verb : {bool}
        If True, print verbose output during runtime

    Returns
    -------
    config : {dict}
        A dictionary of solver configuration parameters that one uses to
        generate a smoothed aggregation solver

    Notes
    -----
    The config dictionary contains the following parameter entries:
        symmetry, smooth, presmoother, postsmoother, B, strength,
        max_levels, max_coarse, coarse_solver, aggregate, keep
    See smoothed_aggregtion_solver for each parameter's description.

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg import solver_configuration
    >>> A = poisson((40,40),format='csr')
    >>> solver_config = solver_configuration(A,verb=False)
    """

    # Ensure acceptable format of A
    A = make_csr(A)
    config = {}

    # Detect symmetry
    if ishermitian(A, fast_check=True):
        config["symmetry"] = "hermitian"
        if verb:
            print("  Detected a Hermitian matrix")
    else:
        config["symmetry"] = "nonsymmetric"
        if verb:
            print("  Detected a non-Hermitian matrix")

    # Symmetry dependent parameters
    if config["symmetry"] == "hermitian":
        config["smooth"] = ("energy", {"krylov": "cg", "maxiter": 3, "degree": 2, "weighting": "local"})
        config["presmoother"] = ("block_gauss_seidel", {"sweep": "symmetric", "iterations": 1})
        config["postsmoother"] = ("block_gauss_seidel", {"sweep": "symmetric", "iterations": 1})
    else:
        config["smooth"] = ("energy", {"krylov": "gmres", "maxiter": 3, "degree": 2, "weighting": "local"})
        config["presmoother"] = ("gauss_seidel_nr", {"sweep": "symmetric", "iterations": 2})
        config["postsmoother"] = ("gauss_seidel_nr", {"sweep": "symmetric", "iterations": 2})

    # Determine near null-space modes B
    if B is None:
        # B is the constant for each variable in a node
        if isspmatrix_bsr(A) and A.blocksize[0] > 1:
            bsize = A.blocksize[0]
            config["B"] = np.kron(np.ones((int(A.shape[0] / bsize), 1), dtype=A.dtype), np.eye(bsize))
        else:
            config["B"] = np.ones((A.shape[0], 1), dtype=A.dtype)
    elif isinstance(B, type(np.zeros((1,)))) or isinstance(B, type(sp.mat(np.zeros((1,))))):
        if len(B.shape) == 1:
            B = B.reshape(-1, 1)
        if (B.shape[0] != A.shape[0]) or (B.shape[1] == 0):
            raise TypeError(
                "Invalid dimensions of B, B.shape[0] must equal \
                             A.shape[0]"
            )
        else:
            config["B"] = np.array(B, dtype=A.dtype)
    else:
        raise TypeError("Invalid B")

    if config["symmetry"] == "hermitian":
        config["BH"] = None
    else:
        config["BH"] = config["B"].copy()

    # Set non-symmetry related parameters
    config["strength"] = ("evolution", {"k": 2, "proj_type": "l2", "epsilon": 3.0})
    config["max_levels"] = 15
    config["max_coarse"] = 500
    config["coarse_solver"] = "pinv"
    config["aggregate"] = "standard"
    config["keep"] = False

    return config
Esempio n. 26
0
def extend_hierarchy(levels, strength, CF, interpolation, restriction, keep):
    """ helper function for local methods """

    A = levels[-1].A

    # Compute the strength-of-connection matrix C, where larger
    # C[i,j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength)
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        C = evolution_strength_of_connection(A, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))

    levels[-1].complexity['strength'] = kwargs['cost'][0]

    # Generate the C/F splitting
    fn, kwargs = unpack_arg(CF)
    if fn == 'RS':
        splitting = split.RS(C, **kwargs)
    elif fn == 'PMIS':
        splitting = split.PMIS(C, **kwargs)
    elif fn == 'PMISc':
        splitting = split.PMISc(C, **kwargs)
    elif fn == 'CLJP':
        splitting = split.CLJP(C, **kwargs)
    elif fn == 'CLJPc':
        splitting = split.CLJPc(C, **kwargs)
    elif fn == 'CR':
        splitting = CR(C, **kwargs)
    else:
        raise ValueError('unknown C/F splitting method (%s)' % CF)

    levels[-1].complexity['CF'] = kwargs['cost'][0]

    # Generate the interpolation matrix that maps from the coarse-grid to the
    # fine-grid
    fn, kwargs = unpack_arg(interpolation)
    if fn == 'standard':
        P = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'injection':
        P = injection_interpolation(A, splitting, **kwargs)
    else:
        raise ValueError('unknown interpolation method (%s)' % interpolation)
    levels[-1].complexity['interpolate'] = kwargs['cost'][0]

    # Generate the restriction matrix that maps from the fine-grid to the
    # coarse-grid. Must make sure transpose matrices remain in CSR or BSR
    fn, kwargs = unpack_arg(restriction)
    if isspmatrix_csr(A):
        if restriction == 'galerkin':
            R = P.T.tocsr()
        elif fn == 'standard':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'distance_two':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'direct':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'one_point':         # Don't need A^T here
            temp_C = C.T.tocsr()
            R = one_point_interpolation(A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'injection':         # Don't need A^T or C^T here
            R = injection_interpolation(A, splitting, **kwargs)
            R = R.T.tocsr()
        else:
            raise ValueError('unknown interpolation method (%s)' % interpolation)
    else: 
        if restriction == 'galerkin':
            R = P.T.tobsr()
        elif fn == 'standard':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'distance_two':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'direct':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'one_point':         # Don't need A^T here
            temp_C = C.T.tocsr()
            R = one_point_interpolation(A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'injection':         # Don't need A^T or C^T here
            R = injection_interpolation(A, splitting, **kwargs)
            R = R.T.tobsr()
        else:
            raise ValueError('unknown interpolation method (%s)' % interpolation)
    
    levels[-1].complexity['restriction'] = kwargs['cost'][0]

    # Store relevant information for this level
    if keep:
        levels[-1].C = C                  # strength of connection matrix

    levels[-1].P = P                  # prolongation operator
    levels[-1].R = R                  # restriction operator
    levels[-1].splitting = splitting  # C/F splitting

    # Form coarse grid operator, get complexity
    levels[-1].complexity['RAP'] = mat_mat_complexity(R,A) / float(A.nnz)
    RA = R * A
    levels[-1].complexity['RAP'] += mat_mat_complexity(RA,P) / float(A.nnz)
    A = RA * P      # Galerkin operator, Ac = RAP

    # Make sure coarse-grid operator is in correct sparse format
    if (isspmatrix_csr(P) and (not isspmatrix_csr(A))):
        A = A.tocsr()
    elif (isspmatrix_bsr(P) and (not isspmatrix_bsr(A))):
        A = A.tobsr()

    # Form next level through Galerkin product
    levels.append(multilevel_solver.level())
    levels[-1].A = A
Esempio n. 27
0
def one_point_interpolation(A, C, splitting, by_val=False, cost=[0]):
    """ Create one-point interpolation operator, that is C-points are
    interpolated by value and F-points are interpolated by value from
    their strongest-connected C-point neighbor.

    Parameters
    ----------
    A : {csr_matrix}
        NxN matrix in CSR format
    C : {csr_matrix}
        Strength-of-Connection matrix (does not need zero diagonal)
    by_val : bool
        For CSR matrices only right now, use values of -Afc in interp as an
        approximation to P_ideal. If false, F-points are interpolated by value
        with weight 1.
    splitting : array
        C/F splitting stored in an array of length N

    Returns
    -------
    NxNc interpolation operator, P
    """
    if isspmatrix_bsr(A):
        blocksize = A.blocksize[0]
        n = A.shape[0] / blocksize
    elif isspmatrix_csr(A):
        n = A.shape[0]
        blocksize = 1
    else:
        try:
            A = A.tocsr()
            warn("Implicit conversion of A to csr", SparseEfficiencyWarning)
            n = A.shape[0]
            blocksize = 1
        except:
            raise TypeError("Invalid matrix type, must be CSR or BSR.")

    nc = np.sum(splitting)
    P_rowptr = np.empty((n + 1, ), dtype='int32')  # P: n x nc, at most 'n' nnz
    P_colinds = np.empty((n, ), dtype='int32')
    P_data = np.empty((n, ), dtype=A.dtype)

    #amg_core.one_point_interpolation(P_rowptr, P_colinds, A.indptr,
    #                                 A.indices, A.data, splitting)
    if blocksize == 1:
        if by_val:
            amg_core.one_point_interpolation(P_rowptr, P_colinds, P_data,
                                             A.indptr, A.indices, A.data,
                                             splitting)
            return csr_matrix((P_data, P_colinds, P_rowptr), shape=[n, nc])
        else:
            amg_core.one_point_interpolation(P_rowptr, P_colinds, P_data,
                                             C.indptr, C.indices, C.data,
                                             splitting)
            P_data = np.ones((n, ), dtype=A.dtype)
            return csr_matrix((P_data, P_colinds, P_rowptr), shape=[n, nc])
    else:
        amg_core.one_point_interpolation(P_rowptr, P_colinds, P_data, C.indptr,
                                         C.indices, C.data, splitting)
        P_data = np.array(n * [np.identity(blocksize, dtype=A.dtype)],
                          dtype=A.dtype)
        return bsr_matrix((P_data, P_colinds, P_rowptr),
                          blocksize=[blocksize, blocksize],
                          shape=[blocksize * n, blocksize * nc])
def smoothed_aggregation_helmholtz_solver(A, planewaves, use_constant=(True, {'last_level':0}), 
        symmetry='symmetric', strength='symmetric', aggregate='standard',
        smooth=('energy', {'krylov': 'gmres'}),
        presmoother=('gauss_seidel_nr',{'sweep':'symmetric'}),
        postsmoother=('gauss_seidel_nr',{'sweep':'symmetric'}),
        improve_candidates='default', max_levels = 10, max_coarse = 100, **kwargs):
    
    """
    Create a multilevel solver using Smoothed Aggregation (SA) for a 2D Helmholtz operator

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix in CSR or BSR format
    planewaves : { list }
        [pw_0, pw_1, ..., pw_n], where the k-th tuple pw_k is of the form (fn,
        args).  fn is a callable and args is a dictionary of arguments for fn.
        This k-th tuple is used to define any new planewaves (i.e., new coarse
        grid basis functions) to be appended to the existing B_k at that level. 
            The function fn must return functions defined on the finest level, 
        i.e., a collection of vector(s) of length A.shape[0].  These vectors
        are then restricted to the appropriate level, where they enrich the 
        coarse space.
            Instead of a tuple, None can be used to stipulate no introduction
        of planewaves at that level.  If len(planewaves) < max_levels, the 
        last entry is used to define coarser level planewaves.
    use_constant : {tuple}
        Tuple of the form (bool, {'last_level':int}).  The boolean denotes 
        whether to introduce the constant in B at level 0.  'last_level' denotes
        the final level to use the constant in B.  That is, if 'last_level' is 1,
        then the vector in B corresponding to the constant on level 0 is dropped 
        from B at level 2.
            This is important, because using constant based interpolation beyond
        the Nyquist rate will result in poor solver performance.
    symmetry : {string}
        'symmetric' refers to both real and complex symmetric
        'hermitian' refers to both complex Hermitian and real Hermitian
        'nonsymmetric' i.e. nonsymmetric in a hermitian sense
        Note that for the strictly real case, symmetric and hermitian are the same
        Note that this flag does not denote definiteness of the operator.
    strength : ['symmetric', 'classical', 'evolution', ('predefined', {'C' : csr_matrix}), None]
        Method used to determine the strength of connection between unknowns of
        the linear system.  Method-specific parameters may be passed in using a
        tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
        all nonzero entries of the matrix are considered strong.  
            See notes below for varying this parameter on a per level basis.  Also,
        see notes below for using a predefined strength matrix on each level.
    aggregate : ['standard', 'lloyd', 'naive', ('predefined', {'AggOp' : csr_matrix})]
        Method used to aggregate nodes.  See notes below for varying this
        parameter on a per level basis.  Also, see notes below for using a
        predefined aggregation on each level.
    smooth : ['jacobi', 'richardson', 'energy', None]
        Method used to smooth the tentative prolongator.  Method-specific
        parameters may be passed in using a tuple, e.g.  smooth=
        ('jacobi',{'filter' : True }).  See notes below for varying this
        parameter on a per level basis.
    presmoother : {tuple, string, list} : default ('block_gauss_seidel', {'sweep':'symmetric'})
        Defines the presmoother for the multilevel cycling.  The default block
        Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
        is CSR or is a BSR matrix with blocksize of 1.  See notes below for
        varying this parameter on a per level basis.
    postsmoother : {tuple, string, list}
        Same as presmoother, except defines the postsmoother.
    improve_candidates : {list} : default [('block_gauss_seidel', {'sweep':'symmetric'}), None]
        The ith entry defines the method used to improve the candidates B on
        level i.  If the list is shorter than max_levels, then the last entry
        will define the method for all levels lower.
            The list elements are relaxation descriptors of the form used for
        presmoother and postsmoother.  A value of None implies no action on B.
    max_levels : {integer} : default 10
        Maximum number of levels to be used in the multilevel solver.
    max_coarse : {integer} : default 500
        Maximum number of variables permitted on the coarse grid. 

    Other Parameters
    ----------------
    coarse_solver : ['splu','lu', ... ]
        Solver used at the coarsest level of the MG hierarchy 

    Returns
    -------
    ml : multilevel_solver
        Multigrid hierarchy of matrices and prolongation operators

    See Also
    --------
    multilevel_solver, smoothed_aggregation_solver

    Notes
    -----
    - The additional parameters are passed through as arguments to
      multilevel_solver.  Refer to pyamg.multilevel_solver for additional
      documentation.

    - The parameters smooth, strength, aggregate, presmoother, postsmoother can
      be varied on a per level basis.  For different methods on different
      levels, use a list as input so that the ith entry defines the method at
      the ith level.  If there are more levels in the hierarchy than list
      entries, the last entry will define the method for all levels lower.
      
      Examples are:
      smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
      presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
      aggregate=['standard', 'naive']
      strength=[('symmetric', {'theta':0.25}), ('symmetric',{'theta':0.08})]

    - Predefined strength of connection and aggregation schemes can be
      specified.  These options are best used together, but aggregation can be
      predefined while strength of connection is not.

      For predefined strength of connection, use a list consisting of tuples of
      the form ('predefined', {'C' : C0}), where C0 is a csr_matrix and each
      degree-of-freedom in C0 represents a supernode.  For instance to
      predefine a three-level hierarchy, use [('predefined', {'C' : C0}),
      ('predefined', {'C' : C1}) ].
      
      Similarly for predefined aggregation, use a list of tuples.  For instance
      to predefine a three-level hierarchy, use [('predefined', {'AggOp' :
      Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the dimensions of A,
      Agg0 and Agg1 are compatible, i.e.  Agg0.shape[1] == A.shape[0] and
      Agg1.shape[1] == Agg0.shape[0].  Each AggOp is a csr_matrix.

    Examples
    --------
    >>> from pyamg import smoothed_aggregation_helmholtz_solver, poisson
    >>> from scipy.sparse.linalg import cg
    >>> from scipy import rand
    >>> A = poisson((100,100), format='csr')           # matrix
    >>> b = rand(A.shape[0])                           # random RHS
    >>> ml = smoothed_aggregation_solver(A)            # AMG solver
    >>> M = ml.aspreconditioner(cycle='V')             # preconditioner
    >>> x,info = cg(A, b, tol=1e-8, maxiter=30, M=M)   # solve with CG

    References
    ----------
    .. [1] L. N. Olson and J. B. Schroder. Smoothed Aggregation for Helmholtz
    Problems. Numerical Linear Algebra with Applications.  pp. 361--386.  17
    (2010).

    """
    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        raise TypeError('argument A must have type csr_matrix or bsr_matrix')

    A = A.asfptype()
    
    if (symmetry != 'symmetric') and (symmetry != 'hermitian') and (symmetry != 'nonsymmetric'):
        raise ValueError('expected \'symmetric\', \'nonsymmetric\' or \'hermitian\' for the symmetry parameter ')
    A.symmetry = symmetry

    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')
    
    ##
    # Preprocess and extend planewaves to length max_levels
    planewaves = preprocess_planewaves(planewaves, max_levels)
    # Check that the user has defined functions for B at each level
    use_const, args = unpack_arg(use_constant)
    first_planewave_level = -1
    for pw in planewaves:
        first_planewave_level += 1
        if pw is not None:
            break
    ##    
    if (use_const == False) and (planewaves[0] == None):
        raise ValueError('No functions defined for B on the finest level, ' + \
              'either use_constant must be true, or planewaves must be defined for level 0')
    elif (use_const == True) and (args['last_level'] < first_planewave_level-1):
        raise ValueError('Some levels have no function(s) defined for B.  ' + \
                         'Change use_constant and/or planewave arguments.')
        
    ##
    # Levelize the user parameters, so that they become lists describing the
    # desired user option on each level.
    max_levels, max_coarse, strength =\
        levelize_strength_or_aggregation(strength, max_levels, max_coarse)
    max_levels, max_coarse, aggregate =\
        levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
    improve_candidates = levelize_smooth_or_improve_candidates(improve_candidates, max_levels)
    smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)


    ##
    # Start first level
    levels = []
    levels.append( multilevel_solver.level() )
    levels[-1].A = A                            # matrix
    levels[-1].B = numpy.zeros((A.shape[0],0))  # place-holder for near-nullspace candidates

    zeros_0 = numpy.zeros((levels[0].A.shape[0],), dtype=A.dtype)
    while len(levels) < max_levels and levels[-1].A.shape[0] > max_coarse:
        A = levels[0].A
        A_l = levels[-1].A
        zeros_l = numpy.zeros((levels[-1].A.shape[0],), dtype=A.dtype)

        ##
        # Generate additions to n-th level candidates
        if planewaves[len(levels)-1] != None:
            fn, args = unpack_arg(planewaves[len(levels)-1])
            Bcoarse2 = numpy.array(fn(**args))

            ##
            # As in alpha-SA, relax the candidates before restriction
            if improve_candidates[0] is not None:
                Bcoarse2 = relaxation_as_linear_operator(improve_candidates[0], A, zeros_0)*Bcoarse2
            
            ##
            # Restrict Bcoarse2 to current level
            for i in range(len(levels)-1):
                Bcoarse2 = levels[i].R*Bcoarse2
            # relax after restriction
            if improve_candidates[len(levels)-1] is not None:
                Bcoarse2 =relaxation_as_linear_operator(improve_candidates[len(levels)-1],A_l,zeros_l)*Bcoarse2
        else:
            Bcoarse2 = numpy.zeros((A_l.shape[0],0),dtype=A.dtype)

        ##
        # Deal with the use of constant in interpolation
        use_const, args = unpack_arg(use_constant)
        if use_const and len(levels) == 1:
            # If level 0, and the constant is to be used in interpolation
           levels[0].B = numpy.hstack( (numpy.ones((A.shape[0],1), dtype=A.dtype), Bcoarse2) )
        elif use_const and args['last_level'] == len(levels)-2: 
            # If the previous level was the last level to use the constant, then remove the
            # coarse grid function based on the constant from B
            levels[-1].B = numpy.hstack( (levels[-1].B[:,1:], Bcoarse2) )
        else:
            levels[-1].B = numpy.hstack((levels[-1].B, Bcoarse2))
        
        ##
        # Create and Append new level
        extend_hierarchy(levels, strength, aggregate, smooth, [None for i in range(max_levels)] ,keep=True)
    
    ml = multilevel_solver(levels, **kwargs)
    change_smoothers(ml, presmoother, postsmoother)
    return ml
Esempio n. 29
0
def local_AIR(A,
              splitting,
              theta=0.1,
              norm='abs',
              degree=1,
              use_gmres=False,
              maxiter=10,
              precondition=True,
              cost=[0]):
    """ Compute approximate ideal restriction by setting RA = 0, within the
    sparsity pattern of R. Sparsity pattern of R for the ith row (i.e. ith
    C-point) is the set of all strongly connected F-points, or the max_row
    *most* strongly connected F-points.

    Parameters
    ----------
    A : {csr_matrix}
        NxN matrix in CSR or BSR format
    splitting : array
        C/F splitting stored in an array of length N
    theta : float, default 0.1
        Solve local system for each row of R for all values
            |A_ij| >= 0.1 * max_{i!=k} |A_ik|
    degree : int, default 1
        Expand sparsity pattern for R by considering strongly connected
        neighbors within 'degree' of a given node. Only supports degree 1 and 2.
    use_gmres : bool
        Solve local linear system for each row of R using GMRES
    maxiter : int
        Maximum number of GMRES iterations
    precondition : bool
        Diagonally precondition GMRES

    Returns
    -------
    Approximate ideal restriction, R, in same sparse format as A.

    Notes
    -----
    - This was the original idea for approximating ideal restriction. In practice,
      however, a Neumann approximation is typically used.
    - Supports block bsr matrices as well.
    """

    # Get SOC matrix containing neighborhood to be included in local solve
    if isspmatrix_bsr(A):
        C = classical_strength_of_connection(A=A,
                                             theta=theta,
                                             block='amalgamate',
                                             norm=norm)
        blocksize = A.blocksize[0]
    elif isspmatrix_csr(A):
        blocksize = 1
        C = classical_strength_of_connection(A=A,
                                             theta=theta,
                                             block=None,
                                             norm=norm)
    else:
        try:
            A = A.tocsr()
            warn("Implicit conversion of A to csr", SparseEfficiencyWarning)
            C = classical_strength_of_connection(A=A,
                                                 theta=theta,
                                                 block=None,
                                                 norm=norm)
            blocksize = 1
        except:
            raise TypeError("Invalid matrix type, must be CSR or BSR.")

    Cpts = np.array(np.where(splitting == 1)[0], dtype='int32')
    nc = Cpts.shape[0]
    n = C.shape[0]

    R_rowptr = np.empty(nc + 1, dtype='int32')
    amg_core.approx_ideal_restriction_pass1(R_rowptr, C.indptr, C.indices,
                                            Cpts, splitting, degree)

    # Build restriction operator
    nnz = R_rowptr[-1]
    R_colinds = np.zeros(nnz, dtype='int32')

    # Block matrix
    if isspmatrix_bsr(A):
        R_data = np.zeros(nnz * blocksize * blocksize, dtype=A.dtype)
        amg_core.block_approx_ideal_restriction_pass2(
            R_rowptr, R_colinds, R_data, A.indptr, A.indices, A.data.ravel(),
            C.indptr, C.indices, C.data, Cpts, splitting, blocksize, degree,
            use_gmres, maxiter, precondition)
        R = bsr_matrix(
            (R_data.reshape(nnz, blocksize, blocksize), R_colinds, R_rowptr),
            blocksize=[blocksize, blocksize],
            shape=[nc * blocksize, A.shape[0]])
    # Not block matrix
    else:
        R_data = np.zeros(nnz, dtype=A.dtype)
        amg_core.approx_ideal_restriction_pass2(R_rowptr, R_colinds, R_data,
                                                A.indptr, A.indices, A.data,
                                                C.indptr, C.indices, C.data,
                                                Cpts, splitting, degree,
                                                use_gmres, maxiter,
                                                precondition)
        R = csr_matrix((R_data, R_colinds, R_rowptr), shape=[nc, A.shape[0]])

    R.eliminate_zeros()
    return R
Esempio n. 30
0
def solver_configuration(A, B=None, verb=True):
    """
    Given an arbitrary matrix A, generate a dictionary of parameters with
    which to generate a smoothed_aggregation_solver.

    Parameters
    ----------
    A : {array, matrix, csr_matrix, bsr_matrix}
        (n x n) matrix to invert, CSR or BSR format preferred for efficiency
    B : {None, array}
        Near null-space modes used to construct the smoothed aggregation solver
        If None, the constant vector is used
        If (n x m) array, then B is passed to smoothed_aggregation_solver
    verb : {bool}
        If True, print verbose output during runtime

    Returns
    -------
    config : {dict}
        A dictionary of solver configuration parameters that one uses to
        generate a smoothed aggregation solver

    Notes
    -----
    The config dictionary contains the following parameter entries:
        symmetry, smooth, presmoother, postsmoother, B, strength,
        max_levels, max_coarse, coarse_solver, aggregate, keep
    See smoothed_aggregtion_solver for each parameter's description.

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg import solver_configuration
    >>> A = poisson((40,40),format='csr')
    >>> solver_config = solver_configuration(A,verb=False)
    """

    # Ensure acceptable format of A
    A = make_csr(A)
    config = {}

    # Detect symmetry
    if ishermitian(A, fast_check=True):
        config['symmetry'] = 'hermitian'
        if verb:
            print("  Detected a Hermitian matrix")
    else:
        config['symmetry'] = 'nonsymmetric'
        if verb:
            print("  Detected a non-Hermitian matrix")

    # Symmetry dependent parameters
    if config['symmetry'] == 'hermitian':
        config['smooth'] = ('energy', {
            'krylov': 'cg',
            'maxiter': 3,
            'degree': 2,
            'weighting': 'local'
        })
        config['presmoother'] = ('block_gauss_seidel', {
            'sweep': 'symmetric',
            'iterations': 1
        })
        config['postsmoother'] = ('block_gauss_seidel', {
            'sweep': 'symmetric',
            'iterations': 1
        })
    else:
        config['smooth'] = ('energy', {
            'krylov': 'gmres',
            'maxiter': 3,
            'degree': 2,
            'weighting': 'local'
        })
        config['presmoother'] = ('gauss_seidel_nr', {
            'sweep': 'symmetric',
            'iterations': 2
        })
        config['postsmoother'] = ('gauss_seidel_nr', {
            'sweep': 'symmetric',
            'iterations': 2
        })

    # Determine near null-space modes B
    if B is None:
        # B is the constant for each variable in a node
        if isspmatrix_bsr(A) and A.blocksize[0] > 1:
            bsize = A.blocksize[0]
            config['B'] = np.kron(
                np.ones((int(A.shape[0] / bsize), 1), dtype=A.dtype),
                np.eye(bsize))
        else:
            config['B'] = np.ones((A.shape[0], 1), dtype=A.dtype)
    elif (isinstance(B, type(np.zeros((1, ))))
          or isinstance(B, type(sp.mat(np.zeros((1, )))))):
        if len(B.shape) == 1:
            B = B.reshape(-1, 1)
        if (B.shape[0] != A.shape[0]) or (B.shape[1] == 0):
            raise TypeError('Invalid dimensions of B, B.shape[0] must equal \
                             A.shape[0]')
        else:
            config['B'] = np.array(B, dtype=A.dtype)
    else:
        raise TypeError('Invalid B')

    if config['symmetry'] == 'hermitian':
        config['BH'] = None
    else:
        config['BH'] = config['B'].copy()

    # Set non-symmetry related parameters
    config['strength'] = ('evolution', {
        'k': 2,
        'proj_type': 'l2',
        'epsilon': 3.0
    })
    config['max_levels'] = 15
    config['max_coarse'] = 500
    config['coarse_solver'] = 'pinv'
    config['aggregate'] = 'standard'
    config['keep'] = False

    return config
Esempio n. 31
0
def convert_mat_to_petsc(mat, comm=None):
    """Convert a matrix to the relevant PETSc type, currently
    only supports csr, bsr and dense matrices formats.

    Parameters
    ----------
    mat : dense, sparse, LinearOperator or Lazy matrix.
        The operator to convert.
    comm : mpi4py.MPI.Comm instance
        The mpi communicator.

    Returns
    -------
    pmat : petsc4py.PETSc.Mat
        The matrix in petsc form - only the local part if running
        across several mpi processes.
    """
    if isinstance(mat, sp.linalg.LinearOperator):
        return linear_operator_2_petsc_shell(mat, comm=comm)

    PETSc, comm = get_petsc(comm=comm)
    mpi_sz = comm.Get_size()
    pmat = PETSc.Mat()

    # retrieve before mat is possibly built into sliced matrix
    shape = mat.shape

    pmat.create(comm=comm)
    pmat.setSizes(shape)
    pmat.setFromOptions()
    pmat.setUp()
    ri, rf = pmat.getOwnershipRange()

    # only consider the operator already sliced if owns whole
    sliced = (mpi_sz == 1)
    if isinstance(mat, qu.Lazy):
        # operator hasn't been constructed yet
        try:
            # try and and lazily construct with slicing
            mat = mat(ownership=(ri, rf))
            sliced = True
        except TypeError:
            mat = mat()

    # Sparse compressed or block row matrix
    if sp.issparse(mat):
        mat.sort_indices()

        if sliced:
            csr = (mat.indptr, mat.indices, mat.data)
        else:
            csr = slice_sparse_matrix_to_components(mat, ri, rf)

        if sp.isspmatrix_csr(mat):
            pmat.createAIJ(size=shape, nnz=mat.nnz, csr=csr, comm=comm)
        elif sp.isspmatrix_bsr(mat):
            pmat.createBAIJ(size=shape, bsize=mat.blocksize,
                            nnz=mat.nnz, csr=csr, comm=comm)

    # Dense matrix
    else:
        if mpi_sz > 1 and not sliced:
            pmat.createDense(size=shape, array=mat[ri:rf, :], comm=comm)
        else:
            pmat.createDense(size=shape, array=mat, comm=comm)

    pmat.assemble()
    return pmat
Esempio n. 32
0
def standard_interpolation(A,
                           C,
                           splitting,
                           theta=None,
                           norm='min',
                           modified=True,
                           cost=[0]):
    """Create prolongator using standard interpolation

    Parameters
    ----------
    A : {csr_matrix}
        NxN matrix in CSR format
    C : {csr_matrix}
        Strength-of-Connection matrix
        Must have zero diagonal
    splitting : array
        C/F splitting stored in an array of length N
    theta : float in [0,1), default None
        theta value defining strong connections in a classical AMG sense. Provide if
        different SOC used for P than for CF-splitting; otherwise, theta = None. 
    norm : string, default 'abs'
        Norm used in redefining classical SOC. Options are 'min' and 'abs' for CSR matrices,
        and 'min', 'abs', and 'fro' for BSR matrices. See strength.py for more information.
    modified : bool, default True
        Use modified classical interpolation. More robust if RS coarsening with second
        pass is not used for CF splitting. Ignores interpolating from strong F-connections
        without a common C-neighbor.

    Returns
    -------
    P : {csr_matrix}
        Prolongator using standard interpolation

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.classical import standard_interpolation
    >>> import numpy as np
    >>> A = poisson((5,),format='csr')
    >>> splitting = np.array([1,0,1,0,1], dtype='intc')
    >>> P = standard_interpolation(A, A, splitting)
    >>> print P.todense()
    [[ 1.   0.   0. ]
     [ 0.5  0.5  0. ]
     [ 0.   1.   0. ]
     [ 0.   0.5  0.5]
     [ 0.   0.   1. ]]

    """
    if not isspmatrix_csr(C):
        raise TypeError('Expected csr_matrix SOC matrix, C.')

    nc = np.sum(splitting)
    n = A.shape[0]

    # Block BSR format. Transfer A to CSR and the splitting and SOC matrix to have
    # DOFs corresponding to CSR A
    if isspmatrix_bsr(A):
        temp_A = A.tocsr()
        splitting0 = splitting * np.ones((A.blocksize[0], 1), dtype='intc')
        splitting0 = np.reshape(splitting0, (np.prod(splitting0.shape), ),
                                order='F')
        if theta is not None:
            C0 = classical_strength_of_connection(A,
                                                  theta=theta,
                                                  norm=norm,
                                                  cost=cost)
            C0 = UnAmal(C0, A.blocksize[0], A.blocksize[1])
        else:
            C0 = UnAmal(C, A.blocksize[0], A.blocksize[1])
        C0 = C0.tocsr()

        # Use modified standard interpolation by ignoring strong F-connections that do
        # not have a common C-point.
        if modified:
            amg_core.remove_strong_FF_connections(temp_A.shape[0], C0.indptr,
                                                  C0.indices, C0.data,
                                                  splitting)
        C0.eliminate_zeros()

        # Interpolation weights are computed based on entries in A, but subject to
        # the sparsity pattern of C.  So, copy the entries of A into the
        # sparsity pattern of C.
        C0.data[:] = 1.0
        C0 = C0.multiply(temp_A)

        P_indptr = np.empty_like(temp_A.indptr)
        amg_core.rs_standard_interpolation_pass1(temp_A.shape[0], C0.indptr,
                                                 C0.indices, splitting0,
                                                 P_indptr)
        nnz = P_indptr[-1]
        P_colinds = np.empty(nnz, dtype=P_indptr.dtype)
        P_data = np.empty(nnz, dtype=temp_A.dtype)

        if modified:
            amg_core.mod_standard_interpolation_pass2(
                temp_A.shape[0], temp_A.indptr, temp_A.indices, temp_A.data,
                C0.indptr, C0.indices, C0.data, splitting0, P_indptr,
                P_colinds, P_data)
        else:
            amg_core.rs_standard_interpolation_pass2(
                temp_A.shape[0], temp_A.indptr, temp_A.indices, temp_A.data,
                C0.indptr, C0.indices, C0.data, splitting0, P_indptr,
                P_colinds, P_data)

        nc = np.sum(splitting0)
        n = A.shape[0]
        P = csr_matrix((P_data, P_colinds, P_indptr), shape=[n, nc])
        return P.tobsr(blocksize=A.blocksize)

    # CSR format
    else:
        if theta is not None:
            C0 = classical_strength_of_connection(A,
                                                  theta=theta,
                                                  norm=norm,
                                                  cost=cost)
        else:
            C0 = C.copy()

        # Use modified standard interpolation by ignoring strong F-connections that do
        # not have a common C-point.
        if modified:
            amg_core.remove_strong_FF_connections(A.shape[0], C0.indptr,
                                                  C0.indices, C0.data,
                                                  splitting)
        C0.eliminate_zeros()

        # Interpolation weights are computed based on entries in A, but subject to
        # the sparsity pattern of C.  So, copy the entries of A into the
        # sparsity pattern of C.
        C0.data[:] = 1.0
        C0 = C0.multiply(A)

        P_indptr = np.empty_like(A.indptr)
        amg_core.rs_standard_interpolation_pass1(A.shape[0], C0.indptr,
                                                 C0.indices, splitting,
                                                 P_indptr)
        nnz = P_indptr[-1]
        P_colinds = np.empty(nnz, dtype=P_indptr.dtype)
        P_data = np.empty(nnz, dtype=A.dtype)

        if modified:
            amg_core.mod_standard_interpolation_pass2(
                A.shape[0], A.indptr, A.indices, A.data, C0.indptr, C0.indices,
                C0.data, splitting, P_indptr, P_colinds, P_data)
        else:
            amg_core.rs_standard_interpolation_pass2(
                A.shape[0], A.indptr, A.indices, A.data, C0.indptr, C0.indices,
                C0.data, splitting, P_indptr, P_colinds, P_data)
        nc = np.sum(splitting)
        n = A.shape[0]
        return csr_matrix((P_data, P_colinds, P_indptr), shape=[n, nc])
Esempio n. 33
0
def my_vis(ml, V, error=None, fname="", E2V=None, Pcols=None):
    """Coarse grid visualization for 2-D problems, for use with Paraview
       For all levels, outputs meshes, aggregates, near nullspace modes B, and selected
       prolongator basis functions.  Coarse level meshes are constructed by doing a
       Delaunay triangulation of interpolated fine grid vertices.

    Parameters
    ----------
    ml : {multilevel hiearchy}
        defines the multilevel hierarchy to visualize
    V : {array}
        coordinate array (N x D)
    Error : {array}
        Fine grid error to plot (N x D)
    fname : {string}
        string to be appended to all output files, e.g. 'diffusion1'
    E2V : {array}
        Element index array (Nel x Nelnodes) for the finest level.  If None,
        then a Delaunay triangulation is done for the finest level.  All coarse
        levels use an internally calculated Delaunay triangulation
    P_cols : {list of tuples}
        Optional input list of tuples of the form [(lvl, [ints]), ...]
        where lvl is an integer defining the level on which to output
        the list of columns in [ints].

    Returns
    -------
        - Writes data to .vtk files for use in paraview (xml 0.1 format)
    
    Notes
    -----


    Examples
    --------

     """
    system('rm -f *.vtu')

    ##
    # For the purposes of clearer plotting, perturb vertices slightly
    V += rand(V.shape[0], V.shape[1]) * 1e-6

    ##
    # Create a list of vertices and meshes for all levels
    levels = ml.levels
    Vlist = [V]
    if E2V is None:
        [circ_cent, edges, E2V, tri_nbs] = delaunay.delaunay(V[:, 0], V[:, 1])
    E2Vlist = [E2V]

    mesh_type_list = []
    mesh_num_list = []
    if E2V.shape[1] == 1:
        mesh_type_list.append('vertex')
        mesh_num_list.append(1)
    if E2V.shape[1] == 3:
        mesh_type_list.append('tri')
        mesh_num_list.append(5)
    if E2V.shape[1] == 4:
        if vertices.shape[1] == 2:
            mesh_type_list.append('quad')
            mesh_num_list.append(9)

    if sparse.isspmatrix_bsr(levels[0].A):
        nPDEs = levels[0].A.blocksize[0]
    else:
        nPDEs = 1

    Agglist = []
    Agg = sparse.eye(levels[0].A.shape[0] / nPDEs,
                     levels[0].A.shape[1] / nPDEs,
                     format='csr')
    for i in range(1, len(levels)):
        ##
        # Interpolate the vertices to the next level by taking each
        # aggregate's center of gravity (i.e. average x and y value).
        Agg = Agg.tocsr() * levels[i - 1].AggOp.tocsr()
        Agg.data[:] = 1.0
        Agglist.append(Agg)

        AggX = scale_rows(Agg, Vlist[0][:, 0], copy=True)
        AggY = scale_rows(Agg, Vlist[0][:, 1], copy=True)
        AggX = ones((1, AggX.shape[0])) * AggX
        AggY = ones((1, AggY.shape[0])) * AggY
        Agg = Agg.tocsc()
        count = Agg.indptr[1:] - Agg.indptr[:-1]
        AggX = (ravel(AggX) / count).reshape(-1, 1)
        AggY = (ravel(AggY) / count).reshape(-1, 1)
        Vlist.append(hstack((AggX, AggY)))

        [circ_cent, edges, E2Vnew,
         tri_nbs] = delaunay.delaunay(Vlist[i][:, 0], Vlist[i][:, 1])
        E2Vlist.append(E2Vnew)
        mesh_type_list.append('tri')
        mesh_num_list.append(5)

    ##
    # On each level, output aggregates, B, the mesh
    for i in range(len(levels)):
        mesh_num = mesh_num_list[i]
        mesh_type = mesh_type_list[i]
        vertices = Vlist[i]
        elements = E2Vlist[i]
        # Print mesh
        write_basic_mesh(vertices, elements, mesh_type=mesh_type, \
                             fname=fname+"mesh_lvl"+str(i)+".vtu")
        # Visualize the aggregates
        if i != (len(levels) - 1):
            dg_vis(fname+"aggs_lvl"+str(i), Vlist[0], \
                    E2Vlist[0], Agglist[i], mesh_type)
        # Visualize B
        if sparse.isspmatrix_bsr(levels[i].A):
            nPDEs = levels[i].A.blocksize[0]
        else:
            nPDEs = 1
        cell_stuff = {mesh_num: elements}
        for j in range(nPDEs):
            indys = arange(j, levels[i].A.shape[0], nPDEs)
            write_vtu(Verts=vertices, Cells=cell_stuff, pdata=levels[i].B[indys,:], \
                          fname=fname+"B_variable"+str(j)+"_lvl"+str(i)+".vtu")

    ##
    # Output requested prolongator basis functions
    if Pcols is not None:
        for (lvl, cols) in Pcols:
            P = levels[lvl].P.tocsc()
            cell_stuff = {mesh_num_list[lvl]: E2Vlist[lvl]}
            for i in cols:
                Pcol = array(P[:, i].todense())
                write_vtu(Verts=Vlist[lvl],
                          Cells=cell_stuff,
                          pdata=Pcol,
                          fname=fname + "P_lvl" + str(lvl) + "col" + str(i) +
                          ".vtu")

    ##
    # Output the error on the finest level
    if error is not None:
        error = error.reshape(-1, 1)
        cell_stuff = {mesh_num_list[0]: E2Vlist[0]}
        if sparse.isspmatrix_bsr(levels[0].A):
            nPDEs = levels[0].A.blocksize[0]
        else:
            nPDEs = 1
        for j in range(nPDEs):
            indys = arange(j, levels[0].A.shape[0], nPDEs)
            write_vtu(Verts=Vlist[0], Cells=cell_stuff, pdata=error[indys,:], \
                      fname=fname+"error_variable"+str(j)+".vtu")
Esempio n. 34
0
def reference_distance_soc(A, V, theta=2.0, relative_drop=True):
    '''
    Reference routine for distance based strength of connection
    '''

    # deal with the supernode case
    if isspmatrix_bsr(A):
        dimen = int(A.shape[0]/A.blocksize[0])
        C = csr_matrix((np.ones((A.data.shape[0],)), A.indices, A.indptr),
                       shape=(dimen, dimen))
    else:
        A = A.tocsr()
        dimen = A.shape[0]
        C = A.copy()
        C.data = np.real(C.data)

    if V.shape[1] == 2:
        three_d = False
    elif V.shape[1] == 3:
        three_d = True

    for i in range(dimen):
        rowstart = C.indptr[i]
        rowend = C.indptr[i+1]
        pt_i = V[i, :]
        for j in range(rowstart, rowend):
            if C.indices[j] == i:
                # ignore the diagonal entry by making it large
                C.data[j] = np.finfo(np.float).max
            else:
                # distance between entry j and i
                pt_j = V[C.indices[j], :]
                dist = (pt_i[0] - pt_j[0])**2
                dist += (pt_i[1] - pt_j[1])**2
                if three_d:
                    dist += (pt_i[2] - pt_j[2])**2
                C.data[j] = np.sqrt(dist)

        # apply drop tolerance
        this_row = C.data[rowstart:rowend]
        if relative_drop:
            tol_i = theta*this_row.min()
            this_row[this_row > tol_i] = 0.0
        else:
            this_row[this_row > theta] = 0.0

        C.data[rowstart:rowend] = this_row

    C.eliminate_zeros()
    C = C + 2.0*scipy.sparse.eye(C.shape[0], C.shape[1], format='csr')

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the distances.
    C.data = 1.0/C.data

    # Scale C by the largest magnitude entry in each row
    largest_row_entry = np.zeros((C.shape[0],), dtype=C.dtype)
    for i in range(C.shape[0]):
        for j in range(C.indptr[i], C.indptr[i+1]):
            val = abs(C.data[j])
            if val > largest_row_entry[i]:
                largest_row_entry[i] = val

    largest_row_entry[largest_row_entry != 0] =\
        1.0 / largest_row_entry[largest_row_entry != 0]
    C = C.tocsr()
    C = scale_rows(C, largest_row_entry, copy=True)

    return C
Esempio n. 35
0
def ParallelCR(M,f,dim,COMM):
    rank = COMM.Get_rank()      # processor rankordered_data
    numP = COMM.Get_size()      # num. processors

    assert sparse.isspmatrix_bsr(M)     # M must be BSR matrix
    dim = M.blocksize[0]                # blocks  have size [dim x dim]
    n = M.shape[0]/dim                  # M is [n x n] blocks
    h = (n+1)/numP                      # subsystem size one each processor
    k = int(np.log2(h))-1               # number of reduction levels
    
    assert power2check(n+1) 
    print_master('All good! M is ['+str(n)+' x '+str(n)+'] block matrix', COMM)      
    
    M.sort_indices()

    '''SPLIT SYSTEM ACROSS PROCESSORS'''
    if head(COMM):
        split_data = np.zeros((3*numP*h,dim,dim))    # 3(n+1) = 3(numP*h)
        split_data[2:-3,:,:] = M.data   # Mdata has length (3n - 2)
        send_data = split_data.ravel()
        
        split_f = np.zeros(h*numP*dim)
        split_f[:-dim] = f.ravel()
        send_f = split_f.ravel()
    else:
        send_data = None
        split_f = None
        
    recv_Mdata = np.empty(3*h*dim*dim)
    recv_y = np.empty(h*dim)
    
    COMM.Barrier()
    COMM.Scatter(send_data, recv_Mdata, root = 0)
    
    COMM.Barrier()
    COMM.Scatter(split_f, recv_y, root = 0)
    
    '''ASSEMBLE SUBSYSTEMS'''
    if head(COMM):
        Mi_data = recv_Mdata.reshape(3*h,dim,dim)[2:,:,:]
        temp = np.arange(1,3*h-1,dtype='int')
        indices = ((temp // 3) -1) + (temp % 3)
        indptr = np.arange(-1,3*h+2,3);     indptr[0] = 0;      indptr[-1] = indptr[-1]-1;
        Mi = sparse.bsr_matrix((Mi_data,indices,indptr),blocksize=(dim,dim))
        yi = recv_y
    elif tail(COMM):
        temp = np.arange(2,3*h-1,dtype='int')
        indices = ((temp // 3) -1) + (temp % 3)
        indptr = np.arange(-2,3*h,3);     indptr[0] = 0;      indptr[-1] = indptr[-1]-1;
        Mi_data = recv_Mdata.reshape(3*h,dim,dim)[:-3,:,:]
        Mi = sparse.bsr_matrix((Mi_data,indices,indptr),blocksize=(dim,dim))
        yi = np.zeros(h*dim)
        yi[dim:] = recv_y[:-dim]
    else:
        temp = np.arange(2,3*h+2,dtype='int')
        indices = ((temp // 3) -1) + (temp % 3)
        indptr = np.arange(-2,3*h+3,3);     indptr[0] = 0;      indptr[-1] = indptr[-1]-1;     
        Mi_data = recv_Mdata.reshape(3*h,dim,dim)
        Mi = sparse.bsr_matrix((Mi_data,indices,indptr),blocksize=(dim,dim))
        yi = np.zeros((h+1)*dim)
        yi[dim:] = recv_y

    '''REDUCE ALL SUBSYSTEMS'''
    xi = reduction_step(Mi,yi,k,COMM)   
    
    if head(COMM):
        sendX = xi.ravel()
        recvX = np.empty((1,sendX.shape[1]*numP),np.float64)
    elif tail(COMM):
        temp = np.zeros((xi.shape[0],1))
        temp[:-dim] = xi[dim:]
        sendX = temp.reshape((1,xi.shape[0]))
        recvX = None
    else: 
        sendX = xi[dim:].ravel()
        recvX = None

    # printr(xi,self.COMM)

    COMM.Barrier()

    COMM.Gather(sendX,recvX,root=0)
    if head(COMM):
        retX = recvX.ravel()[:-dim]
    else:
        retX = None

    return retX 
Esempio n. 36
0
def neumann_AIR(A, splitting, theta=0.025, degree=1, post_theta=0, cost=[0]):
    """ Approximate ideal restriction using a truncated Neumann expansion for A_ff^{-1},
    where 
        R = [-Acf*D, I],   where
        D = \sum_{i=0}^degree Lff^i

    Parameters
    ----------
    A : {csr_matrix}
        NxN matrix in CSR format
    splitting : array
        C/F splitting stored in an array of length N
    theta : float : default 0.025
        Compute approximation to ideal restriction for C, where C has rows filtered
        with tolerance theta, that is for j s.t.
            |C_ij| <= theta * |C_ii|        --> C_ij = 0.
        Helps keep R sparse. 
    degree : int in [0,4] : default 1
        Degree of Neumann expansion. Only supported up to degree 4.

    Returns
    -------
    Approximate ideal restriction in CSR format.

    Notes
    -----
    Does not support block matrices.
    """

    Cpts = np.array(np.where(splitting == 1)[0], dtype='int32')
    Fpts = np.array(np.where(splitting == 0)[0], dtype='int32')
    nf0 = len(Fpts)

    # Convert block CF-splitting into scalar CF-splitting so that we can access
    # submatrices of BSR matrix A
    if isspmatrix_bsr(A):
        bsize = A.blocksize[0]
        Cpts *= bsize
        Fpts *= bsize
        Cpts0 = Cpts
        Fpts0 = Fpts
        for i in range(1, bsize):
            Cpts = np.hstack([Cpts, Cpts0 + i])
            Fpts = np.hstack([Fpts, Fpts0 + i])
        Cpts.sort()
        Fpts.sort()

    nc = Cpts.shape[0]
    nf = Fpts.shape[0]
    n = A.shape[0]
    C = csr_matrix(A, copy=True)
    if theta > 0.0:
        filter_matrix_rows(C, theta, diagonal=True, lump=False)

    # Expand sparsity pattern for R
    C.data[np.abs(C.data) < 1e-16] = 0
    C.eliminate_zeros()

    Acf = C[Cpts, :][:, Fpts]
    Lff = -C[Fpts, :][:, Fpts]
    if isspmatrix_bsr(A):
        bsize = A.blocksize[0]
        Lff = Lff.tobsr(blocksize=[bsize, bsize])
        D_data = np.empty((nf0, bsize, bsize))
        for i in range(0, nf0):
            offset = np.where(
                Lff.indices[Lff.indptr[i]:Lff.indptr[i + 1]] == i)[0][0]
            # Save (pseudo)inverse of diagonal block
            D_data[i] = -np.linalg.pinv(Lff.data[Lff.indptr[i] + offset])
            # Set diagonal block to zero in Lff
            Lff.data[Lff.indptr[i] + offset][:] = 0.0
        Dff_inv = bsr_matrix(
            (D_data, np.arange(0, nf0), np.arange(0, nf0 + 1)),
            blocksize=[bsize, bsize])
        Lff = Dff_inv * Lff
    else:
        pts = np.arange(0, nf)
        D_data = -1.0 / Lff.diagonal()
        Lff[pts, pts] = 0.0
        Lff.eliminate_zeros()
        Dff_inv = csr_matrix((D_data, np.arange(0, nf), np.arange(0, nf + 1)))
        Lff = Dff_inv * Lff

    # Form Neuman approximation to Aff^{-1}
    Z = eye(nf, format='csr')
    if degree >= 1:
        Z += Lff
    if degree >= 2:
        Z += Lff * Lff
    if degree >= 3:
        Z += Lff * Lff * Lff
    if degree == 4:
        Z += Lff * Lff * Lff * Lff
    if degree > 4:
        raise ValueError("Only sparsity degree 0-4 supported.")
    Z = Z * Dff_inv

    # Multiply Acf by approximation to Aff^{-1}
    Z = -Acf * Z

    if post_theta > 0.0:
        if not isspmatrix_csr(Z):
            Z = Z.tocsr()
        filter_matrix_rows(Z, post_theta, diagonal=False, lump=False)

    # Get sizes and permutation matrix from [F, C] block
    # ordering to natural matrix ordering.
    permute = eye(n, format='csr')
    permute.indices = np.concatenate((Fpts, Cpts))

    # Form R = [Z, I], reorder and return
    try:
        R = hstack([Z, eye(nc, format='csr')])
    except:
        raise ValueError("Either zero C-points or zero F-points encountered.")
    if isspmatrix_bsr(A):
        R = bsr_matrix(R * permute, blocksize=[bsize, bsize])
    else:
        R = csr_matrix(R * permute)
    return R
Esempio n. 37
0
def adaptive_sa_solver(A, initial_candidates=None, symmetry='hermitian',
                       pdef=True, num_candidates=1, candidate_iters=5,
                       improvement_iters=0, epsilon=0.1,
                       max_levels=10, max_coarse=10, aggregate='standard',
                       prepostsmoother=('gauss_seidel',
                                        {'sweep': 'symmetric'}),
                       smooth=('jacobi', {}), strength='symmetric',
                       coarse_solver='pinv2',
                       eliminate_local=(False, {'Ca': 1.0}), keep=False,
                       **kwargs):
    """Create a multilevel solver using Adaptive Smoothed Aggregation (aSA).

    Parameters
    ----------
    A : csr_matrix, bsr_matrix
        Square matrix in CSR or BSR format
    initial_candidates : None, n x m dense matrix
        If a matrix, then this forms the basis for the first m candidates.
        Also in this case, the initial setup stage is skipped, because this
        provides the first candidate(s).  If None, then a random initial guess
        and relaxation are used to inform the initial candidate.
    symmetry : string
        'symmetric' refers to both real and complex symmetric
        'hermitian' refers to both complex Hermitian and real Hermitian
        Note that for the strictly real case, these two options are the same
        Note that this flag does not denote definiteness of the operator
    pdef : bool
        True or False, whether A is known to be positive definite.
    num_candidates : integer
        Number of near-nullspace candidates to generate
    candidate_iters : integer
        Number of smoothing passes/multigrid cycles used at each level of
        the adaptive setup phase
    improvement_iters : integer
        Number of times each candidate is improved
    epsilon : float
        Target convergence factor
    max_levels : integer
        Maximum number of levels to be used in the multilevel solver.
    max_coarse : integer
        Maximum number of variables permitted on the coarse grid.
    prepostsmoother : string or dict
        Pre- and post-smoother used in the adaptive method
    strength : ['symmetric', 'classical', 'evolution', ('predefined', {'C': csr_matrix}), None]
        Method used to determine the strength of connection between unknowns of
        the linear system.  See smoothed_aggregation_solver(...) documentation.
    aggregate : ['standard', 'lloyd', 'naive', ('predefined', {'AggOp': csr_matrix})]
        Method used to aggregate nodes.  See smoothed_aggregation_solver(...)
        documentation.
    smooth : ['jacobi', 'richardson', 'energy', None]
        Method used used to smooth the tentative prolongator.  See
        smoothed_aggregation_solver(...) documentation
    coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
        Solver used at the coarsest level of the MG hierarchy.
        Optionally, may be a tuple (fn, args), where fn is a string such as
        ['splu', 'lu', ...] or a callable function, and args is a dictionary of
        arguments to be passed to fn.
    eliminate_local : tuple
        Length 2 tuple.  If the first entry is True, then eliminate candidates
        where they aren't needed locally, using the second entry of the tuple
        to contain arguments to local elimination routine.  Given the rigid
        sparse data structures, this doesn't help much, if at all, with
        complexity.  Its more of a diagnostic utility.
    keep: bool
        Flag to indicate keeping extra operators in the hierarchy for
        diagnostics.  For example, if True, then strength of connection (C),
        tentative prolongation (T), and aggregation (AggOp) are kept.

    Returns
    -------
    multilevel_solver : multilevel_solver
        Smoothed aggregation solver with adaptively generated candidates

    Notes
    -----
    - Floating point value representing the "work" required to generate
      the solver.  This value is the total cost of just relaxation, relative
      to the fine grid.  The relaxation method used is assumed to symmetric
      Gauss-Seidel.

    - Unlike the standard Smoothed Aggregation (SA) method, adaptive SA does
      not require knowledge of near-nullspace candidate vectors.  Instead, an
      adaptive procedure computes one or more candidates 'from scratch'.  This
      approach is useful when no candidates are known or the candidates have
      been invalidated due to changes to matrix A.

    Examples
    --------
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.aggregation import adaptive_sa_solver
    >>> import numpy as np
    >>> A=stencil_grid([[-1,-1,-1],[-1,8.0,-1],[-1,-1,-1]], (31,31),format='csr')
    >>> [asa,work] = adaptive_sa_solver(A,num_candidates=1)
    >>> residuals=[]
    >>> x=asa.solve(b=np.ones((A.shape[0],)), x0=np.ones((A.shape[0],)), residuals=residuals)

    References
    ----------
    .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
       "Adaptive Smoothed Aggregation (alpha SA) Multigrid"
       SIAM Review Volume 47,  Issue 2  (2005)

    """
    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        try:
            A = csr_matrix(A)
            warn("Implicit conversion of A to CSR", SparseEfficiencyWarning)
        except BaseException:
            raise TypeError('Argument A must have type csr_matrix or\
                            bsr_matrix, or be convertible to csr_matrix')

    A = A.asfptype()
    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    # Track work in terms of relaxation
    work = np.zeros((1,))

    # Levelize the user parameters, so that they become lists describing the
    # desired user option on each level.
    max_levels, max_coarse, strength =\
        levelize_strength_or_aggregation(strength, max_levels, max_coarse)
    max_levels, max_coarse, aggregate =\
        levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
    smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)

    # Develop initial candidate(s).  Note that any predefined aggregation is
    # preserved.
    if initial_candidates is None:
        B, aggregate, strength =\
            initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon,
                                max_levels, max_coarse, aggregate,
                                prepostsmoother, smooth, strength, work)
        # Normalize B
        B = (1.0/norm(B, 'inf')) * B
        num_candidates -= 1
    else:
        # Otherwise, use predefined candidates
        B = initial_candidates
        num_candidates -= B.shape[1]
        # Generate Aggregation and Strength Operators (the brute force way)
        sa = smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
                                         presmoother=prepostsmoother,
                                         postsmoother=prepostsmoother,
                                         smooth=smooth, strength=strength,
                                         max_levels=max_levels,
                                         max_coarse=max_coarse,
                                         aggregate=aggregate,
                                         coarse_solver=coarse_solver,
                                         improve_candidates=None, keep=True,
                                         **kwargs)
        if len(sa.levels) > 1:
            # Set strength-of-connection and aggregation
            aggregate = [('predefined', {'AggOp': sa.levels[i].AggOp.tocsr()})
                         for i in range(len(sa.levels) - 1)]
            strength = [('predefined', {'C': sa.levels[i].C.tocsr()})
                        for i in range(len(sa.levels) - 1)]

    # Develop additional candidates
    for i in range(num_candidates):
        x = general_setup_stage(
            smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
                                        presmoother=prepostsmoother,
                                        postsmoother=prepostsmoother,
                                        smooth=smooth,
                                        coarse_solver=coarse_solver,
                                        aggregate=aggregate,
                                        strength=strength,
                                        improve_candidates=None,
                                        keep=True, **kwargs),
            symmetry, candidate_iters, prepostsmoother, smooth,
            eliminate_local, coarse_solver, work)

        # Normalize x and add to candidate list
        x = x/norm(x, 'inf')
        if np.isinf(x[0]) or np.isnan(x[0]):
            raise ValueError('Adaptive candidate is all 0.')
        B = np.hstack((B, x.reshape(-1, 1)))

    # Improve candidates
    if B.shape[1] > 1 and improvement_iters > 0:
        b = np.zeros((A.shape[0], 1), dtype=A.dtype)
        for i in range(improvement_iters):
            for j in range(B.shape[1]):
                # Run a V-cycle built on everything except candidate j, while
                # using candidate j as the initial guess
                x0 = B[:, 0]
                B = B[:, 1:]
                sa_temp =\
                    smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
                                                presmoother=prepostsmoother,
                                                postsmoother=prepostsmoother,
                                                smooth=smooth,
                                                coarse_solver=coarse_solver,
                                                aggregate=aggregate,
                                                strength=strength,
                                                improve_candidates=None,
                                                keep=True, **kwargs)
                x = sa_temp.solve(b, x0=x0,
                                  tol=float(np.finfo(np.float).tiny),
                                  maxiter=candidate_iters, cycle='V')
                work[:] += 2 * sa_temp.operator_complexity() *\
                    sa_temp.levels[0].A.nnz * candidate_iters

                # Apply local elimination
                elim, elim_kwargs = unpack_arg(eliminate_local)
                if elim is True:
                    x = x/norm(x, 'inf')
                    eliminate_local_candidates(x, sa_temp.levels[0].AggOp, A,
                                               sa_temp.levels[0].T,
                                               **elim_kwargs)

                # Normalize x and add to candidate list
                x = x/norm(x, 'inf')
                if np.isinf(x[0]) or np.isnan(x[0]):
                    raise ValueError('Adaptive candidate is all 0.')
                B = np.hstack((B, x.reshape(-1, 1)))

    elif improvement_iters > 0:
        # Special case for improving a single candidate
        max_levels = len(aggregate) + 1
        max_coarse = 0
        for i in range(improvement_iters):
            B, aggregate, strength =\
                initial_setup_stage(A, symmetry, pdef, candidate_iters,
                                    epsilon, max_levels, max_coarse,
                                    aggregate, prepostsmoother, smooth,
                                    strength, work, initial_candidate=B)
            # Normalize B
            B = (1.0/norm(B, 'inf'))*B

    return [smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
                                        presmoother=prepostsmoother,
                                        postsmoother=prepostsmoother,
                                        smooth=smooth,
                                        coarse_solver=coarse_solver,
                                        aggregate=aggregate, strength=strength,
                                        improve_candidates=None, keep=keep,
                                        **kwargs),
            work[0]/A.nnz]
Esempio n. 38
0
def smoothed_aggregation_helmholtz_solver(A,
                                          planewaves,
                                          use_constant=(True, {
                                              'last_level': 0
                                          }),
                                          symmetry='symmetric',
                                          strength='symmetric',
                                          aggregate='standard',
                                          smooth=('energy', {
                                              'krylov': 'gmres'
                                          }),
                                          presmoother=('gauss_seidel_nr', {
                                              'sweep': 'symmetric'
                                          }),
                                          postsmoother=('gauss_seidel_nr', {
                                              'sweep': 'symmetric'
                                          }),
                                          improve_candidates='default',
                                          max_levels=10,
                                          max_coarse=100,
                                          **kwargs):
    """
    Create a multilevel solver using Smoothed Aggregation (SA) for a 2D Helmholtz operator

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix in CSR or BSR format
    planewaves : { list }
        [pw_0, pw_1, ..., pw_n], where the k-th tuple pw_k is of the form (fn,
        args).  fn is a callable and args is a dictionary of arguments for fn.
        This k-th tuple is used to define any new planewaves (i.e., new coarse
        grid basis functions) to be appended to the existing B_k at that level. 
            The function fn must return functions defined on the finest level, 
        i.e., a collection of vector(s) of length A.shape[0].  These vectors
        are then restricted to the appropriate level, where they enrich the 
        coarse space.
            Instead of a tuple, None can be used to stipulate no introduction
        of planewaves at that level.  If len(planewaves) < max_levels, the 
        last entry is used to define coarser level planewaves.
    use_constant : {tuple}
        Tuple of the form (bool, {'last_level':int}).  The boolean denotes 
        whether to introduce the constant in B at level 0.  'last_level' denotes
        the final level to use the constant in B.  That is, if 'last_level' is 1,
        then the vector in B corresponding to the constant on level 0 is dropped 
        from B at level 2.
            This is important, because using constant based interpolation beyond
        the Nyquist rate will result in poor solver performance.
    symmetry : {string}
        'symmetric' refers to both real and complex symmetric
        'hermitian' refers to both complex Hermitian and real Hermitian
        'nonsymmetric' i.e. nonsymmetric in a hermitian sense
        Note that for the strictly real case, symmetric and hermitian are the same
        Note that this flag does not denote definiteness of the operator.
    strength : ['symmetric', 'classical', 'evolution', ('predefined', {'C' : csr_matrix}), None]
        Method used to determine the strength of connection between unknowns of
        the linear system.  Method-specific parameters may be passed in using a
        tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
        all nonzero entries of the matrix are considered strong.  
            See notes below for varying this parameter on a per level basis.  Also,
        see notes below for using a predefined strength matrix on each level.
    aggregate : ['standard', 'lloyd', 'naive', ('predefined', {'AggOp' : csr_matrix})]
        Method used to aggregate nodes.  See notes below for varying this
        parameter on a per level basis.  Also, see notes below for using a
        predefined aggregation on each level.
    smooth : ['jacobi', 'richardson', 'energy', None]
        Method used to smooth the tentative prolongator.  Method-specific
        parameters may be passed in using a tuple, e.g.  smooth=
        ('jacobi',{'filter' : True }).  See notes below for varying this
        parameter on a per level basis.
    presmoother : {tuple, string, list} : default ('block_gauss_seidel', {'sweep':'symmetric'})
        Defines the presmoother for the multilevel cycling.  The default block
        Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
        is CSR or is a BSR matrix with blocksize of 1.  See notes below for
        varying this parameter on a per level basis.
    postsmoother : {tuple, string, list}
        Same as presmoother, except defines the postsmoother.
    improve_candidates : {list} : default [('block_gauss_seidel', {'sweep':'symmetric'}), None]
        The ith entry defines the method used to improve the candidates B on
        level i.  If the list is shorter than max_levels, then the last entry
        will define the method for all levels lower.
            The list elements are relaxation descriptors of the form used for
        presmoother and postsmoother.  A value of None implies no action on B.
    max_levels : {integer} : default 10
        Maximum number of levels to be used in the multilevel solver.
    max_coarse : {integer} : default 500
        Maximum number of variables permitted on the coarse grid. 

    Other Parameters
    ----------------
    coarse_solver : ['splu','lu', ... ]
        Solver used at the coarsest level of the MG hierarchy 

    Returns
    -------
    ml : multilevel_solver
        Multigrid hierarchy of matrices and prolongation operators

    See Also
    --------
    multilevel_solver, smoothed_aggregation_solver

    Notes
    -----
    - The additional parameters are passed through as arguments to
      multilevel_solver.  Refer to pyamg.multilevel_solver for additional
      documentation.

    - The parameters smooth, strength, aggregate, presmoother, postsmoother can
      be varied on a per level basis.  For different methods on different
      levels, use a list as input so that the ith entry defines the method at
      the ith level.  If there are more levels in the hierarchy than list
      entries, the last entry will define the method for all levels lower.
      
      Examples are:
      smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
      presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
      aggregate=['standard', 'naive']
      strength=[('symmetric', {'theta':0.25}), ('symmetric',{'theta':0.08})]

    - Predefined strength of connection and aggregation schemes can be
      specified.  These options are best used together, but aggregation can be
      predefined while strength of connection is not.

      For predefined strength of connection, use a list consisting of tuples of
      the form ('predefined', {'C' : C0}), where C0 is a csr_matrix and each
      degree-of-freedom in C0 represents a supernode.  For instance to
      predefine a three-level hierarchy, use [('predefined', {'C' : C0}),
      ('predefined', {'C' : C1}) ].
      
      Similarly for predefined aggregation, use a list of tuples.  For instance
      to predefine a three-level hierarchy, use [('predefined', {'AggOp' :
      Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the dimensions of A,
      Agg0 and Agg1 are compatible, i.e.  Agg0.shape[1] == A.shape[0] and
      Agg1.shape[1] == Agg0.shape[0].  Each AggOp is a csr_matrix.

    Examples
    --------
    >>> from pyamg import smoothed_aggregation_helmholtz_solver, poisson
    >>> from scipy.sparse.linalg import cg
    >>> from scipy import rand
    >>> A = poisson((100,100), format='csr')           # matrix
    >>> b = rand(A.shape[0])                           # random RHS
    >>> ml = smoothed_aggregation_solver(A)            # AMG solver
    >>> M = ml.aspreconditioner(cycle='V')             # preconditioner
    >>> x,info = cg(A, b, tol=1e-8, maxiter=30, M=M)   # solve with CG

    References
    ----------
    .. [1] L. N. Olson and J. B. Schroder. Smoothed Aggregation for Helmholtz
    Problems. Numerical Linear Algebra with Applications.  pp. 361--386.  17
    (2010).

    """
    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        raise TypeError('argument A must have type csr_matrix or bsr_matrix')

    A = A.asfptype()

    if (symmetry != 'symmetric') and (symmetry != 'hermitian') and (
            symmetry != 'nonsymmetric'):
        raise ValueError(
            'expected \'symmetric\', \'nonsymmetric\' or \'hermitian\' for the symmetry parameter '
        )
    A.symmetry = symmetry

    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    ##
    # Preprocess and extend planewaves to length max_levels
    planewaves = preprocess_planewaves(planewaves, max_levels)
    # Check that the user has defined functions for B at each level
    use_const, args = unpack_arg(use_constant)
    first_planewave_level = -1
    for pw in planewaves:
        first_planewave_level += 1
        if pw is not None:
            break
    ##
    if (use_const == False) and (planewaves[0] == None):
        raise ValueError('No functions defined for B on the finest level, ' + \
              'either use_constant must be true, or planewaves must be defined for level 0')
    elif (use_const
          == True) and (args['last_level'] < first_planewave_level - 1):
        raise ValueError('Some levels have no function(s) defined for B.  ' + \
                         'Change use_constant and/or planewave arguments.')

    ##
    # Levelize the user parameters, so that they become lists describing the
    # desired user option on each level.
    max_levels, max_coarse, strength =\
        levelize_strength_or_aggregation(strength, max_levels, max_coarse)
    max_levels, max_coarse, aggregate =\
        levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
    improve_candidates = levelize_smooth_or_improve_candidates(
        improve_candidates, max_levels)
    smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)

    ##
    # Start first level
    levels = []
    levels.append(multilevel_solver.level())
    levels[-1].A = A  # matrix
    levels[-1].B = numpy.zeros(
        (A.shape[0], 0))  # place-holder for near-nullspace candidates

    zeros_0 = numpy.zeros((levels[0].A.shape[0], ), dtype=A.dtype)
    while len(levels) < max_levels and levels[-1].A.shape[0] > max_coarse:
        A = levels[0].A
        A_l = levels[-1].A
        zeros_l = numpy.zeros((levels[-1].A.shape[0], ), dtype=A.dtype)

        ##
        # Generate additions to n-th level candidates
        if planewaves[len(levels) - 1] != None:
            fn, args = unpack_arg(planewaves[len(levels) - 1])
            Bcoarse2 = numpy.array(fn(**args))

            ##
            # As in alpha-SA, relax the candidates before restriction
            if improve_candidates[0] is not None:
                Bcoarse2 = relaxation_as_linear_operator(
                    improve_candidates[0], A, zeros_0) * Bcoarse2

            ##
            # Restrict Bcoarse2 to current level
            for i in range(len(levels) - 1):
                Bcoarse2 = levels[i].R * Bcoarse2
            # relax after restriction
            if improve_candidates[len(levels) - 1] is not None:
                Bcoarse2 = relaxation_as_linear_operator(
                    improve_candidates[len(levels) - 1], A_l,
                    zeros_l) * Bcoarse2
        else:
            Bcoarse2 = numpy.zeros((A_l.shape[0], 0), dtype=A.dtype)

        ##
        # Deal with the use of constant in interpolation
        use_const, args = unpack_arg(use_constant)
        if use_const and len(levels) == 1:
            # If level 0, and the constant is to be used in interpolation
            levels[0].B = numpy.hstack((numpy.ones((A.shape[0], 1),
                                                   dtype=A.dtype), Bcoarse2))
        elif use_const and args['last_level'] == len(levels) - 2:
            # If the previous level was the last level to use the constant, then remove the
            # coarse grid function based on the constant from B
            levels[-1].B = numpy.hstack((levels[-1].B[:, 1:], Bcoarse2))
        else:
            levels[-1].B = numpy.hstack((levels[-1].B, Bcoarse2))

        ##
        # Create and Append new level
        extend_hierarchy(levels,
                         strength,
                         aggregate,
                         smooth, [None for i in range(max_levels)],
                         keep=True)

    ml = multilevel_solver(levels, **kwargs)
    change_smoothers(ml, presmoother, postsmoother)
    return ml
Esempio n. 39
0
def initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon,
                        max_levels, max_coarse, aggregate, prepostsmoother,
                        smooth, strength, work, initial_candidate=None):
    """Compute aggregation and the first near-nullspace candidate following Algorithm 3 in Brezina et al.

    Parameters
    ----------
    candidate_iters
        number of test relaxation iterations
    epsilon
        minimum acceptable relaxation convergence factor

    References
    ----------
    .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
       "Adaptive Smoothed Aggregation (aSA) Multigrid"
       SIAM Review Volume 47,  Issue 2  (2005)
       http://www.cs.umn.edu/~maclach/research/aSA2.pdf

    """
    # Define relaxation routine
    def relax(A, x):
        fn, kwargs = unpack_arg(prepostsmoother)
        if fn == 'gauss_seidel':
            gauss_seidel(A, x, np.zeros_like(x),
                         iterations=candidate_iters, sweep='symmetric')
        elif fn == 'gauss_seidel_nr':
            gauss_seidel_nr(A, x, np.zeros_like(x),
                            iterations=candidate_iters, sweep='symmetric')
        elif fn == 'gauss_seidel_ne':
            gauss_seidel_ne(A, x, np.zeros_like(x),
                            iterations=candidate_iters, sweep='symmetric')
        elif fn == 'jacobi':
            jacobi(A, x, np.zeros_like(x), iterations=1,
                   omega=1.0 / rho_D_inv_A(A))
        elif fn == 'richardson':
            polynomial(A, x, np.zeros_like(x), iterations=1,
                       coefficients=[1.0/approximate_spectral_radius(A)])
        elif fn == 'gmres':
            x[:] = (gmres(A, np.zeros_like(x), x0=x,
                          maxiter=candidate_iters)[0]).reshape(x.shape)
        else:
            raise TypeError('Unrecognized smoother')

    # flag for skipping steps f-i in step 4
    skip_f_to_i = True

    # step 1
    A_l = A
    if initial_candidate is None:
        x = sp.rand(A_l.shape[0], 1).astype(A_l.dtype)
        # The following type check matches the usual 'complex' type,
        # but also numpy data types such as 'complex64', 'complex128'
        # and 'complex256'.
        if A_l.dtype.name.startswith('complex'):
            x = x + 1.0j*sp.rand(A_l.shape[0], 1)
    else:
        x = np.array(initial_candidate, dtype=A_l.dtype)

    # step 2
    relax(A_l, x)
    work[:] += A_l.nnz * candidate_iters*2

    # step 3
    # not advised to stop the iteration here: often the first relaxation pass
    # _is_ good, but the remaining passes are poor
    # if x_A_x/x_A_x_old < epsilon:
    #    # relaxation alone is sufficient
    #    print 'relaxation alone works: %g'%(x_A_x/x_A_x_old)
    #    return x, []

    # step 4
    As = [A]
    xs = [x]
    Ps = []
    AggOps = []
    StrengthOps = []

    while A.shape[0] > max_coarse and max_levels > 1:
        # The real check to break from the while loop is below

        # Begin constructing next level
        fn, kwargs = unpack_arg(strength[len(As)-1])  # step 4b
        if fn == 'symmetric':
            C_l = symmetric_strength_of_connection(A_l, **kwargs)
            # Diagonal must be nonzero
            C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr')
        elif fn == 'classical':
            C_l = classical_strength_of_connection(A_l, **kwargs)
            # Diagonal must be nonzero
            C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr')
            if isspmatrix_bsr(A_l):
                C_l = amalgamate(C_l, A_l.blocksize[0])
        elif (fn == 'ode') or (fn == 'evolution'):
            C_l = evolution_strength_of_connection(A_l,
                                                   np.ones(
                                                       (A_l.shape[0], 1),
                                                       dtype=A.dtype),
                                                   **kwargs)
        elif fn == 'predefined':
            C_l = kwargs['C'].tocsr()
        elif fn is None:
            C_l = A_l.tocsr()
        else:
            raise ValueError('unrecognized strength of connection method: %s' %
                             str(fn))

        # In SA, strength represents "distance", so we take magnitude of
        # complex values
        if C_l.dtype.name.startswith('complex'):
            C_l.data = np.abs(C_l.data)

        # Create a unified strength framework so that large values represent
        # strong connections and small values represent weak connections
        if (fn == 'ode') or (fn == 'evolution') or (fn == 'energy_based'):
            C_l.data = 1.0 / C_l.data

        # aggregation
        fn, kwargs = unpack_arg(aggregate[len(As) - 1])
        if fn == 'standard':
            AggOp = standard_aggregation(C_l, **kwargs)[0]
        elif fn == 'lloyd':
            AggOp = lloyd_aggregation(C_l, **kwargs)[0]
        elif fn == 'predefined':
            AggOp = kwargs['AggOp'].tocsr()
        else:
            raise ValueError('unrecognized aggregation method %s' % str(fn))

        T_l, x = fit_candidates(AggOp, x)  # step 4c

        fn, kwargs = unpack_arg(smooth[len(As)-1])  # step 4d
        if fn == 'jacobi':
            P_l = jacobi_prolongation_smoother(A_l, T_l, C_l, x, **kwargs)
        elif fn == 'richardson':
            P_l = richardson_prolongation_smoother(A_l, T_l, **kwargs)
        elif fn == 'energy':
            P_l = energy_prolongation_smoother(A_l, T_l, C_l, x, None,
                                               (False, {}), **kwargs)
        elif fn is None:
            P_l = T_l
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # R should reflect A's structure # step 4e
        if symmetry == 'symmetric':
            A_l = P_l.T.asformat(P_l.format) * A_l * P_l
        elif symmetry == 'hermitian':
            A_l = P_l.H.asformat(P_l.format) * A_l * P_l

        StrengthOps.append(C_l)
        AggOps.append(AggOp)
        Ps.append(P_l)
        As.append(A_l)

        # skip to step 5 as in step 4e
        if (A_l.shape[0] <= max_coarse) or (len(AggOps) + 1 >= max_levels):
            break

        if not skip_f_to_i:
            x_hat = x.copy()  # step 4g
            relax(A_l, x)  # step 4h
            work[:] += A_l.nnz*candidate_iters*2
            if pdef is True:
                x_A_x = np.dot(np.conjugate(x).T, A_l*x)
                xhat_A_xhat = np.dot(np.conjugate(x_hat).T, A_l*x_hat)
                err_ratio = (x_A_x/xhat_A_xhat)**(1.0/candidate_iters)
            else:
                # use A.H A inner-product
                Ax = A_l * x
                # Axhat = A_l * x_hat
                x_A_x = np.dot(np.conjugate(Ax).T, Ax)
                xhat_A_xhat = np.dot(np.conjugate(x_hat).T, A_l*x_hat)
                err_ratio = (x_A_x/xhat_A_xhat)**(1.0/candidate_iters)

            if err_ratio < epsilon:  # step 4i
                # print "sufficient convergence, skipping"
                skip_f_to_i = True
                if x_A_x == 0:
                    x = x_hat  # need to restore x
        else:
            # just carry out relaxation, don't check for convergence
            relax(A_l, x)  # step 4h
            work[:] += 2 * A_l.nnz * candidate_iters

        # store xs for diagnostic use and for use in step 5
        xs.append(x)

    # step 5
    # Extend coarse-level candidate to the finest level
    # --> note that we start with the x from the second coarsest level
    x = xs[-1]
    # make sure that xs[-1] has been relaxed by step 4h, i.e. relax(As[-2], x)
    for lev in range(len(Ps)-2, -1, -1):  # lev = coarsest ... finest-1
        P = Ps[lev]                     # I: lev --> lev+1
        A = As[lev]                     # A on lev+1
        x = P * x
        relax(A, x)
        work[:] += A.nnz*candidate_iters*2

    # Set predefined strength of connection and aggregation
    if len(AggOps) > 1:
        aggregate = [('predefined', {'AggOp': AggOps[i]})
                     for i in range(len(AggOps))]
        strength = [('predefined', {'C': StrengthOps[i]})
                    for i in range(len(StrengthOps))]

    return x, aggregate, strength  # first candidate
Esempio n. 40
0
def reference_distance_soc(A, V, theta=2.0, relative_drop=True):
    '''
    Reference routine for distance based strength of connection
    '''

    # deal with the supernode case
    if isspmatrix_bsr(A):
        dimen = int(A.shape[0] / A.blocksize[0])
        C = csr_matrix((np.ones((A.data.shape[0], )), A.indices, A.indptr),
                       shape=(dimen, dimen))
    else:
        A = A.tocsr()
        dimen = A.shape[0]
        C = A.copy()
        C.data = np.real(C.data)

    if V.shape[1] == 2:
        three_d = False
    elif V.shape[1] == 3:
        three_d = True

    for i in range(dimen):
        rowstart = C.indptr[i]
        rowend = C.indptr[i + 1]
        pt_i = V[i, :]
        for j in range(rowstart, rowend):
            if C.indices[j] == i:
                # ignore the diagonal entry by making it large
                C.data[j] = np.finfo(np.float).max
            else:
                # distance between entry j and i
                pt_j = V[C.indices[j], :]
                dist = (pt_i[0] - pt_j[0])**2
                dist += (pt_i[1] - pt_j[1])**2
                if three_d:
                    dist += (pt_i[2] - pt_j[2])**2
                C.data[j] = np.sqrt(dist)

        # apply drop tolerance
        this_row = C.data[rowstart:rowend]
        if relative_drop:
            tol_i = theta * this_row.min()
            this_row[this_row > tol_i] = 0.0
        else:
            this_row[this_row > theta] = 0.0

        C.data[rowstart:rowend] = this_row

    C.eliminate_zeros()
    C = C + 2.0 * scipy.sparse.eye(C.shape[0], C.shape[1], format='csr')

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the distances.
    C.data = 1.0 / C.data

    # Scale C by the largest magnitude entry in each row
    largest_row_entry = np.zeros((C.shape[0], ), dtype=C.dtype)
    for i in range(C.shape[0]):
        for j in range(C.indptr[i], C.indptr[i + 1]):
            val = abs(C.data[j])
            if val > largest_row_entry[i]:
                largest_row_entry[i] = val

    largest_row_entry[largest_row_entry != 0] =\
        1.0 / largest_row_entry[largest_row_entry != 0]
    C = C.tocsr()
    C = scale_rows(C, largest_row_entry, copy=True)

    return C
Esempio n. 41
0
def energy_prolongation_smoother(A, T, Atilde, B, Bf, Cpt_params,
                                 krylov='cg', maxiter=4, tol=1e-8,
                                 degree=1, weighting='local',
                                 prefilter={}, postfilter={}):
    """Minimize the energy of the coarse basis functions (columns of T).  Both
    root-node and non-root-node style prolongation smoothing is available, see
    Cpt_params description below.

    Parameters
    ----------

    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    T : {bsr_matrix}
        Tentative prolongator, a NxM sparse matrix (M < N)
    Atilde : {csr_matrix}
        Strength of connection matrix
    B : {array}
        Near-nullspace modes for coarse grid.  Has shape (M,k) where
        k is the number of coarse candidate vectors.
    Bf : {array}
        Near-nullspace modes for fine grid.  Has shape (N,k) where
        k is the number of coarse candidate vectors.
    Cpt_params : {tuple}
        Tuple of the form (bool, dict).  If the Cpt_params[0] = False, then the
        standard SA prolongation smoothing is carried out.  If True, then
        root-node style prolongation smoothing is carried out.  The dict must
        be a dictionary of parameters containing, (1) P_I: P_I.T is the
        injection matrix for the Cpts, (2) I_F: an identity matrix for only the
        F-points (i.e. I, but with zero rows and columns for C-points) and I_C:
        the C-point analogue to I_F.  See Notes below for more information.
    krylov : {string}
        'cg' : for SPD systems.  Solve A T = 0 in a constraint space with CG
        'cgnr' : for nonsymmetric and/or indefinite systems.
                 Solve A T = 0 in a constraint space with CGNR
        'gmres' : for nonsymmetric and/or indefinite systems.
                 Solve A T = 0 in a constraint space with GMRES
    maxiter : integer
        Number of energy minimization steps to apply to the prolongator
    tol : {scalar}
        Minimization tolerance
    degree : {int}
        Generate sparsity pattern for P based on (Atilde^degree T)
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the
            diagonal preconditioning
        'local': Uses a local row-wise weight based on the Gershgorin estimate.
            Avoids any potential under-damping due to inaccurate spectral
            radius estimates.
        'block': If A is a BSR matrix, use a block diagonal inverse of A
        'diagonal': Use inverse of the diagonal of A
    prefilter : {dictionary} : Default {}
        Filters elements by row in sparsity pattern for P to reduce operator and
        setup complexity. If None or empty dictionary, no dropping in P is done.
        If postfilter has key 'k', then the largest 'k' entries  are kept in each
        row.  If postfilter has key 'theta', all entries such that
            P[i,j] < kwargs['theta']*max(abs(P[i,:]))
        are dropped.  If postfilter['k'] and postfiler['theta'] are present, then
        they are used in conjunction, with the union of their patterns used.
    postfilter : {dictionary} : Default {}
        Filters elements by row in smoothed P to reduce operator complexity. 
        Only supported if using the rootnode_solver. If None or empty dictionary,
        no dropping in P is done. If postfilter has key 'k', then the largest 'k'
        entries  are kept in each row.  If postfilter has key 'theta', all entries
        such that
            P[i,j] < kwargs['theta']*max(abs(P[i,:]))
        are dropped.  If postfilter['k'] and postfiler['theta'] are present, then
        they are used in conjunction, with the union of their patterns used.

    Returns
    -------
    T : {bsr_matrix}
        Smoothed prolongator

    Notes
    -----
    Only 'diagonal' weighting is supported for the CGNR method, because
    we are working with A^* A and not A.

    When Cpt_params[0] == True, root-node style prolongation smoothing
    is used to minimize the energy of columns of T.  Essentially, an
    identity block is maintained in T, corresponding to injection from
    the coarse-grid to the fine-grid root-nodes.  See [2] for more details,
    and see util.utils.get_Cpt_params for the helper function to generate
    Cpt_params.

    If Cpt_params[0] == False, the energy of columns of T are still
    minimized, but without maintaining the identity block.

    Examples
    --------
    >>> from pyamg.aggregation import energy_prolongation_smoother
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse import coo_matrix
    >>> import numpy as np
    >>> data = np.ones((6,))
    >>> row = np.arange(0,6)
    >>> col = np.kron([0,1],np.ones((3,)))
    >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
    >>> print T.todense()
    [[ 1.  0.]
     [ 1.  0.]
     [ 1.  0.]
     [ 0.  1.]
     [ 0.  1.]
     [ 0.  1.]]
    >>> A = poisson((6,),format='csr')
    >>> B = np.ones((2,1),dtype=float)
    >>> P = energy_prolongation_smoother(A,T,A,B, None, (False,{}))
    >>> print P.todense()
    [[ 1.          0.        ]
     [ 1.          0.        ]
     [ 0.66666667  0.33333333]
     [ 0.33333333  0.66666667]
     [ 0.          1.        ]
     [ 0.          1.        ]]

    References
    ----------
    .. [1] Jan Mandel, Marian Brezina, and Petr Vanek
       "Energy Optimization of Algebraic Multigrid Bases"
       Computing 62, 205-228, 1999
       http://dx.doi.org/10.1007/s006070050022
    .. [2] Olson, L. and Schroder, J. and Tuminaro, R.,
       "A general interpolation strategy for algebraic
       multigrid using energy minimization", SIAM Journal
       on Scientific Computing (SISC), vol. 33, pp.
       966--991, 2011.
    """

    # Test Inputs
    if maxiter < 0:
        raise ValueError('maxiter must be > 0')
    if tol > 1:
        raise ValueError('tol must be <= 1')

    if sparse.isspmatrix_csr(A):
        A = A.tobsr(blocksize=(1, 1), copy=False)
    elif sparse.isspmatrix_bsr(A):
        pass
    else:
        raise TypeError("A must be csr_matrix or bsr_matrix")

    if sparse.isspmatrix_csr(T):
        T = T.tobsr(blocksize=(1, 1), copy=False)
    elif sparse.isspmatrix_bsr(T):
        pass
    else:
        raise TypeError("T must be csr_matrix or bsr_matrix")

    if T.blocksize[0] != A.blocksize[0]:
        raise ValueError("T row-blocksize should be the same as A blocksize")

    if B.shape[0] != T.shape[1]:
        raise ValueError("B is the candidates for the coarse grid. \
                            num_rows(b) = num_cols(T)")

    if min(T.nnz, A.nnz) == 0:
        return T
        
    if not sparse.isspmatrix_csr(Atilde):
        raise TypeError("Atilde must be csr_matrix")

    if ('theta' in prefilter) and (prefilter['theta'] == 0):
        prefilter.pop('theta', None)

    if ('theta' in postfilter) and (postfilter['theta'] == 0):
        postfilter.pop('theta', None)

    # Prepocess Atilde, the strength matrix
    if Atilde is None:
        Atilde = sparse.csr_matrix((np.ones(len(A.indices)),
                                    A.indices.copy(), A.indptr.copy()),
                                    shape=(A.shape[0]/A.blocksize[0],
                                           A.shape[1]/A.blocksize[1]))
    
    # If Atilde has no nonzeros, then return T
    if min(T.nnz, A.nnz) == 0:
        return T

    # Expand allowed sparsity pattern for P through multiplication by Atilde
    if degree > 0:

        # Construct Sparsity_Pattern by multiplying with Atilde
        T.sort_indices()
        shape = (int(T.shape[0]/T.blocksize[0]), int(T.shape[1]/T.blocksize[1]))
        Sparsity_Pattern = sparse.csr_matrix((np.ones(T.indices.shape),
                                              T.indices, T.indptr),
                                              shape=shape)
        
        AtildeCopy = Atilde.copy()
        AtildeCopy.data[:] = 1.0
        for i in range(degree):
            Sparsity_Pattern = AtildeCopy*Sparsity_Pattern

        # Optional filtering of sparsity pattern before smoothing
        if 'theta' in prefilter and 'k' in prefilter:
            Sparsity_theta = filter_matrix_rows(Sparsity_Pattern, prefilter['theta'])
            Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k'])
            # Union two sparsity patterns
            Sparsity_Pattern += Sparsity_theta
        elif 'k' in prefilter:
            Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k'])
        elif 'theta' in prefilter:
            Sparsity_Pattern = filter_matrix_rows(Sparsity_Pattern, prefilter['theta'])
        elif len(prefilter) > 0:
            raise ValueError("Unrecognized prefilter option")

        # UnAmal returns a BSR matrix with 1's in the nonzero locations
        Sparsity_Pattern = UnAmal(Sparsity_Pattern,
                                  T.blocksize[0], T.blocksize[1])
        Sparsity_Pattern.sort_indices()

    else:
        # If degree is 0, just copy T for the sparsity pattern
        Sparsity_Pattern = T.copy()
        if 'theta' in prefilter and 'k' in prefilter:
            Sparsity_theta = filter_matrix_rows(Sparsity_Pattern, prefilter['theta'])
            Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k'])
            # Union two sparsity patterns
            Sparsity_Pattern += Sparsity_theta
        elif 'k' in prefilter:
            Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k'])
        elif 'theta' in prefilter:
            Sparsity_Pattern = filter_matrix_rows(Sparsity_Pattern, prefilter['theta'])
        elif len(prefilter) > 0:
            raise ValueError("Unrecognized prefilter option")

        Sparsity_Pattern.data[:] = 1.0
        Sparsity_Pattern.sort_indices()

    # If using root nodes, enforce identity at C-points
    if Cpt_params[0]:
        Sparsity_Pattern = Cpt_params[1]['I_F'] * Sparsity_Pattern
        Sparsity_Pattern = Cpt_params[1]['P_I'] + Sparsity_Pattern

    # Construct array of inv(Bi'Bi), where Bi is B restricted to row i's
    # sparsity pattern in Sparsity Pattern. This array is used multiple times
    # in Satisfy_Constraints(...).
    BtBinv = compute_BtBinv(B, Sparsity_Pattern)

    # If using root nodes and B has more columns that A's blocksize, then
    # T must be updated so that T*B = Bfine.  Note, if this is a 'secondpass'
    # after dropping entries in P, then we must re-enforce the constraints
    if (Cpt_params[0] and (B.shape[1] > A.blocksize[0])) or ('secondpass' in postfilter):
        T = filter_operator(T, Sparsity_Pattern, B, Bf, BtBinv)
        # Ensure identity at C-pts
        if Cpt_params[0]:
            T = Cpt_params[1]['I_F']*T + Cpt_params[1]['P_I']

    # Iteratively minimize the energy of T subject to the constraints of
    # Sparsity_Pattern and maintaining T's effect on B, i.e. T*B =
    # (T+Update)*B, i.e. Update*B = 0
    if krylov == 'cg':
        T = cg_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern,
                                      maxiter, tol, weighting, Cpt_params)
    elif krylov == 'cgnr':
        T = cgnr_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern,
                                        maxiter, tol, weighting, Cpt_params)
    elif krylov == 'gmres':
        T = gmres_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern,
                                         maxiter, tol, weighting, Cpt_params)

    T.eliminate_zeros()

    # Filter entries in P, only in the rootnode case, i.e., Cpt_params[0] == True
    if (len(postfilter) == 0) or ('secondpass' in postfilter) or (Cpt_params[0] is False):
        return T
    else:
        if 'theta' in postfilter and 'k' in postfilter:
            T_theta = filter_matrix_rows(T, postfilter['theta'])
            T_k = truncate_rows(T, postfilter['k'])

            # Union two sparsity patterns
            T_theta.data[:] = 1.0
            T_k.data[:] = 1.0
            T_filter = T_theta + T_k
            T_filter.data[:] = 1.0
            T_filter = T.multiply(T_filter)

        elif 'k' in postfilter:
            T_filter = truncate_rows(T, postfilter['k'])
        elif 'theta' in postfilter:
            T_filter = filter_matrix_rows(T, postfilter['theta'])
        else:
            raise ValueError("Unrecognized postfilter option")

        # Re-smooth T_filter and re-fit the modes B into the span. 
        # Note, we set 'secondpass', because this is the second 
        # filtering pass
        T = energy_prolongation_smoother(A, T_filter,
                                         Atilde, B, Bf, Cpt_params,
                                         krylov=krylov, maxiter=1,
                                         tol=1e-8, degree=0,
                                         weighting=weighting,
                                         prefilter={},
                                         postfilter={'secondpass' : True} )

    return T
Esempio n. 42
0
def smoothed_aggregation_solver(A, B=None, BH=None,
                                symmetry='hermitian', strength='symmetric',
                                aggregate='standard',
                                smooth=('jacobi', {'omega': 4.0/3.0}),
                                presmoother=('block_gauss_seidel',
                                             {'sweep': 'symmetric'}),
                                postsmoother=('block_gauss_seidel',
                                              {'sweep': 'symmetric'}),
                                improve_candidates=[('block_gauss_seidel',
                                                    {'sweep': 'symmetric',
                                                     'iterations': 4}),
                                                    None],
                                max_levels = 10, max_coarse = 10,
                                diagonal_dominance=False,
                                keep=False, **kwargs):
    """
    Create a multilevel solver using classical-style Smoothed Aggregation (SA)

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix in CSR or BSR format
    B : {None, array_like}
        Right near-nullspace candidates stored in the columns of an NxK array.
        The default value B=None is equivalent to B=ones((N,1))
    BH : {None, array_like}
        Left near-nullspace candidates stored in the columns of an NxK array.
        BH is only used if symmetry is 'nonsymmetric'.
        The default value B=None is equivalent to BH=B.copy()
    symmetry : {string}
        'symmetric' refers to both real and complex symmetric
        'hermitian' refers to both complex Hermitian and real Hermitian
        'nonsymmetric' i.e. nonsymmetric in a hermitian sense
        Note, in the strictly real case, symmetric and hermitian are the same
        Note, this flag does not denote definiteness of the operator.
    strength : {list} : default ['symmetric', 'classical', 'evolution',
               'algebraic_distance', 'affinity',
               ('predefined', {'C' : csr_matrix}), None]
        Method used to determine the strength of connection between unknowns of
        the linear system.  Method-specific parameters may be passed in using a
        tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
        all nonzero entries of the matrix are considered strong.
        See notes below for varying this parameter on a per level basis.  Also,
        see notes below for using a predefined strength matrix on each level.
    aggregate : {list} : default ['standard', 'lloyd', 'naive',
                ('predefined', {'AggOp' : csr_matrix})]
        Method used to aggregate nodes.  See notes below for varying this
        parameter on a per level basis.  Also, see notes below for using a
        predefined aggregation on each level.
    smooth : {list} : default ['jacobi', 'richardson', 'energy', None]
        Method used to smooth the tentative prolongator.  Method-specific
        parameters may be passed in using a tuple, e.g.  smooth=
        ('jacobi',{'filter' : True }).  See notes below for varying this
        parameter on a per level basis.
    presmoother : {tuple, string, list} : default ('block_gauss_seidel',
                  {'sweep':'symmetric'})
        Defines the presmoother for the multilevel cycling.  The default block
        Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
        is CSR or is a BSR matrix with blocksize of 1.  See notes below for
        varying this parameter on a per level basis.
    postsmoother : {tuple, string, list}
        Same as presmoother, except defines the postsmoother.
    improve_candidates : {tuple, string, list} : default
                        [('block_gauss_seidel',
                         {'sweep': 'symmetric', 'iterations': 4}), None]
        The ith entry defines the method used to improve the candidates B on
        level i.  If the list is shorter than max_levels, then the last entry
        will define the method for all levels lower.  If tuple or string, then
        this single relaxation descriptor defines improve_candidates on all
        levels.
        The list elements are relaxation descriptors of the form used for
        presmoother and postsmoother.  A value of None implies no action on B.
    max_levels : {integer} : default 10
        Maximum number of levels to be used in the multilevel solver.
    max_coarse : {integer} : default 500
        Maximum number of variables permitted on the coarse grid.
    diagonal_dominance : {bool, tuple} : default False
        If True (or the first tuple entry is True), then avoid coarsening
        diagonally dominant rows.  The second tuple entry requires a
        dictionary, where the key value 'theta' is used to tune the diagonal
        dominance threshold.
    keep : {bool} : default False
        Flag to indicate keeping extra operators in the hierarchy for
        diagnostics.  For example, if True, then strength of connection (C),
        tentative prolongation (T), and aggregation (AggOp) are kept.

    Other Parameters
    ----------------
    cycle_type : ['V','W','F']
        Structrure of multigrid cycle
    coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
        Solver used at the coarsest level of the MG hierarchy.
            Optionally, may be a tuple (fn, args), where fn is a string such as
        ['splu', 'lu', ...] or a callable function, and args is a dictionary of
        arguments to be passed to fn.
    setup_complexity : bool
        For a detailed, more accurate setup complexity, pass in 
        'setup_complexity' = True. This will slow down performance, but
        increase accuracy of complexity count. 

    Returns
    -------
    ml : multilevel_solver
        Multigrid hierarchy of matrices and prolongation operators

    See Also
    --------
    multilevel_solver, classical.ruge_stuben_solver,
    aggregation.smoothed_aggregation_solver

    Notes
    -----
        - This method implements classical-style SA, not root-node style SA
          (see aggregation.rootnode_solver).

        - The additional parameters are passed through as arguments to
          multilevel_solver.  Refer to pyamg.multilevel_solver for additional
          documentation.

        - At each level, four steps are executed in order to define the coarser
          level operator.

          1. Matrix A is given and used to derive a strength matrix, C.

          2. Based on the strength matrix, indices are grouped or aggregated.

          3. The aggregates define coarse nodes and a tentative prolongation
             operator T is defined by injection

          4. The tentative prolongation operator is smoothed by a relaxation
             scheme to improve the quality and extent of interpolation from the
             aggregates to fine nodes.

        - The parameters smooth, strength, aggregate, presmoother, postsmoother
          can be varied on a per level basis.  For different methods on
          different levels, use a list as input so that the i-th entry defines
          the method at the i-th level.  If there are more levels in the
          hierarchy than list entries, the last entry will define the method
          for all levels lower.

          Examples are:
          smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
          presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
          aggregate=['standard', 'naive']
          strength=[('symmetric', {'theta':0.25}), ('symmetric',
                                                    {'theta':0.08})]

        - Predefined strength of connection and aggregation schemes can be
          specified.  These options are best used together, but aggregation can
          be predefined while strength of connection is not.

          For predefined strength of connection, use a list consisting of
          tuples of the form ('predefined', {'C' : C0}), where C0 is a
          csr_matrix and each degree-of-freedom in C0 represents a supernode.
          For instance to predefine a three-level hierarchy, use
          [('predefined', {'C' : C0}), ('predefined', {'C' : C1}) ].

          Similarly for predefined aggregation, use a list of tuples.  For
          instance to predefine a three-level hierarchy, use [('predefined',
          {'AggOp' : Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the
          dimensions of A, Agg0 and Agg1 are compatible, i.e.  Agg0.shape[1] ==
          A.shape[0] and Agg1.shape[1] == Agg0.shape[0].  Each AggOp is a
          csr_matrix.

    Examples
    --------
    >>> from pyamg import smoothed_aggregation_solver
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse.linalg import cg
    >>> import numpy as np
    >>> A = poisson((100,100), format='csr')           # matrix
    >>> b = np.ones((A.shape[0]))                      # RHS
    >>> ml = smoothed_aggregation_solver(A)            # AMG solver
    >>> M = ml.aspreconditioner(cycle='V')             # preconditioner
    >>> x,info = cg(A, b, tol=1e-8, maxiter=30, M=M)   # solve with CG

    References
    ----------
    .. [1] Vanek, P. and Mandel, J. and Brezina, M.,
       "Algebraic Multigrid by Smoothed Aggregation for
       Second and Fourth Order Elliptic Problems",
       Computing, vol. 56, no. 3, pp. 179--196, 1996.
       http://citeseer.ist.psu.edu/vanek96algebraic.html

    """

    if ('setup_complexity' in kwargs):
        if kwargs['setup_complexity'] == True:
            mat_mat_complexity.__detailed__ = True
        del kwargs['setup_complexity']

    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        try:
            A = csr_matrix(A)
            warn("Implicit conversion of A to CSR",
                 SparseEfficiencyWarning)
        except:
            raise TypeError('Argument A must have type csr_matrix or '
                            'bsr_matrix, or be convertible to csr_matrix')

    A = A.asfptype()

    if (symmetry != 'symmetric') and (symmetry != 'hermitian') and\
            (symmetry != 'nonsymmetric'):
        raise ValueError('expected \'symmetric\', \'nonsymmetric\' or '
                         'hermitian\' for the symmetry parameter ')
    A.symmetry = symmetry

    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    # Right near nullspace candidates use constant for each variable as default
    if B is None:
        B = np.kron(np.ones((int(A.shape[0]/blocksize(A)), 1), dtype=A.dtype),
                    np.eye(blocksize(A)))
    else:
        B = np.asarray(B, dtype=A.dtype)
        if len(B.shape) == 1:
            B = B.reshape(-1, 1)
        if B.shape[0] != A.shape[0]:
            raise ValueError('The near null-space modes B have incorrect \
                              dimensions for matrix A')
        if B.shape[1] < blocksize(A):
            warn('Having less target vectors, B.shape[1], than \
                  blocksize of A can degrade convergence factors.')

    # Left near nullspace candidates
    if A.symmetry == 'nonsymmetric':
        if BH is None:
            BH = B.copy()
        else:
            BH = np.asarray(BH, dtype=A.dtype)
            if len(BH.shape) == 1:
                BH = BH.reshape(-1, 1)
            if BH.shape[1] != B.shape[1]:
                raise ValueError('The number of left and right near \
                                  null-space modes B and BH, must be equal')
            if BH.shape[0] != A.shape[0]:
                raise ValueError('The near null-space modes BH have \
                                  incorrect dimensions for matrix A')

    # Levelize the user parameters, so that they become lists describing the
    # desired user option on each level.
    max_levels, max_coarse, strength =\
        levelize_strength_or_aggregation(strength, max_levels, max_coarse)
    max_levels, max_coarse, aggregate =\
        levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
    improve_candidates =\
        levelize_smooth_or_improve_candidates(improve_candidates, max_levels)
    smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)

    # Construct multilevel structure
    levels = []
    levels.append(multilevel_solver.level())
    levels[-1].A = A          # matrix

    # Append near nullspace candidates
    levels[-1].B = B          # right candidates
    if A.symmetry == 'nonsymmetric':
        levels[-1].BH = BH    # left candidates

    while len(levels) < max_levels and\
            int(levels[-1].A.shape[0]/blocksize(levels[-1].A)) > max_coarse:
        extend_hierarchy(levels, strength, aggregate, smooth,
                         improve_candidates, diagonal_dominance, keep)

    # Construct and return multilevel hierarchy
    ml = multilevel_solver(levels, **kwargs)
    change_smoothers(ml, presmoother, postsmoother)
    return ml
Esempio n. 43
0
def make_system(A, x, b, formats=None):
    """
    Return A,x,b suitable for relaxation or raise an exception
    
    Parameters
    ----------
    A : {sparse-matrix}
        n x n system
    x : {array}
        n-vector, initial guess
    b : {array}
        n-vector, right-hand side
    formats: {'csr', 'csc', 'bsr', 'lil', 'dok',...}
        desired sparse matrix format
        default is no change to A's format

    Returns
    -------
    (A,x,b), where A is in the desired sparse-matrix format
    and x and b are "raveled", i.e. (n,) vectors.

    Notes
    -----
    Does some rudimentary error checking on the system,
    such as checking for compatible dimensions and checking
    for compatible type, i.e. float or complex.

    Examples
    --------
    >>> from pyamg.relaxation.relaxation import make_system 
    >>> from pyamg.gallery import poisson
    >>> import numpy
    >>> A = poisson((10,10), format='csr')
    >>> x = numpy.zeros((A.shape[0],1))
    >>> b = numpy.ones((A.shape[0],1))
    >>> (A,x,b) = make_system(A,x,b,formats=['csc'])
    >>> print str(x.shape)
    (100,)
    >>> print str(b.shape)
    (100,)
    >>> print A.format
    csc
    """

    if formats is None:
        pass
    elif formats == ['csr']:
        if sparse.isspmatrix_csr(A):
            pass
        elif sparse.isspmatrix_bsr(A):
            A = A.tocsr()
        else:
            warn('implicit conversion to CSR', sparse.SparseEfficiencyWarning)
            A = sparse.csr_matrix(A)
    else:
        if sparse.isspmatrix(A) and A.format in formats:
            pass
        else:
            A = sparse.csr_matrix(A).asformat(formats[0])

    if not isinstance(x, numpy.ndarray):
        raise ValueError('expected numpy array for argument x')
    if not isinstance(b, numpy.ndarray):
        raise ValueError('expected numpy array for argument b')

    M,N = A.shape

    if M != N:
        raise ValueError('expected square matrix')

    if x.shape not in [(M,), (M,1)]:
        raise ValueError('x has invalid dimensions')
    if b.shape not in [(M,), (M,1)]:
        raise ValueError('b has invalid dimensions')

    if A.dtype != x.dtype or A.dtype != b.dtype:
        raise TypeError('arguments A, x, and b must have the same dtype')
    
    if not x.flags.carray:
        raise ValueError('x must be contiguous in memory')

    x = numpy.ravel(x)
    b = numpy.ravel(b)

    return A,x,b
Esempio n. 44
0
def jacobi_prolongation_smoother(S,
                                 T,
                                 C,
                                 B,
                                 omega=4.0 / 3.0,
                                 degree=1,
                                 filter=False,
                                 weighting='diagonal',
                                 cost=[0.0]):
    """Jacobi prolongation smoother

    Parameters
    ----------
    S : {csr_matrix, bsr_matrix}
        Sparse NxN matrix used for smoothing.  Typically, A.
    T : {csr_matrix, bsr_matrix}
        Tentative prolongator
    C : {csr_matrix, bsr_matrix}
        Strength-of-connection matrix
    B : {array}
        Near nullspace modes for the coarse grid such that T*B
        exactly reproduces the fine grid near nullspace modes
    omega : {scalar}
        Damping parameter
    filter : {boolean}
        If true, filter S before smoothing T.  This option can greatly control
        complexity.
    weighting : {string}
        'block', 'diagonal' or 'local' weighting for constructing the Jacobi D
        'local': Uses a local row-wise weight based on the Gershgorin estimate.
          Avoids any potential under-damping due to inaccurate spectral radius
          estimates.
        'block': If A is a BSR matrix, use a block diagonal inverse of A
        'diagonal': Classic Jacobi D = diagonal(A)

    Returns
    -------
    P : {csr_matrix, bsr_matrix}
        Smoothed (final) prolongator defined by P = (I - omega/rho(K) K) * T
        where K = diag(S)^-1 * S and rho(K) is an approximation to the
        spectral radius of K.

    Notes
    -----
    If weighting is not 'local', then results using Jacobi prolongation
    smoother are not precisely reproducible due to a random initial guess used
    for the spectral radius approximation.  For precise reproducibility,
    set numpy.random.seed(..) to the same value before each test.

    Examples
    --------
    >>> from pyamg.aggregation import jacobi_prolongation_smoother
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse import coo_matrix
    >>> import numpy as np
    >>> data = np.ones((6,))
    >>> row = np.arange(0,6)
    >>> col = np.kron([0,1],np.ones((3,)))
    >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
    >>> T.todense()
    matrix([[ 1.,  0.],
            [ 1.,  0.],
            [ 1.,  0.],
            [ 0.,  1.],
            [ 0.,  1.],
            [ 0.,  1.]])
    >>> A = poisson((6,),format='csr')
    >>> P = jacobi_prolongation_smoother(A,T,A,np.ones((2,1)))
    >>> P.todense()
    matrix([[ 0.64930164,  0.        ],
            [ 1.        ,  0.        ],
            [ 0.64930164,  0.35069836],
            [ 0.35069836,  0.64930164],
            [ 0.        ,  1.        ],
            [ 0.        ,  0.64930164]])

    """

    # preprocess weighting
    if weighting == 'block':
        if sparse.isspmatrix_csr(S):
            weighting = 'diagonal'
        elif sparse.isspmatrix_bsr(S):
            if S.blocksize[0] == 1:
                weighting = 'diagonal'

    if filter:
        # Implement filtered prolongation smoothing for the general case by
        # utilizing satisfy constraints

        if sparse.isspmatrix_bsr(S):
            numPDEs = S.blocksize[0]
        else:
            numPDEs = 1

        # Create a filtered S with entries dropped that aren't in C
        C = UnAmal(C, numPDEs, numPDEs)
        S = S.multiply(C)
        S.eliminate_zeros()
        cost[0] += 1.0

    if weighting == 'diagonal':
        # Use diagonal of S
        D_inv = get_diagonal(S, inv=True)
        D_inv_S = scale_rows(S, D_inv, copy=True)
        D_inv_S = (omega / approximate_spectral_radius(D_inv_S)) * D_inv_S
        # 15 WU to find spectral radius, 2 to scale D_inv_S twice
        cost[0] += 17
    elif weighting == 'block':
        # Use block diagonal of S
        D_inv = get_block_diag(S, blocksize=S.blocksize[0], inv_flag=True)
        D_inv = sparse.bsr_matrix(
            (D_inv, np.arange(D_inv.shape[0]), np.arange(D_inv.shape[0] + 1)),
            shape=S.shape)
        D_inv_S = D_inv * S
        # 15 WU to find spectral radius, 2 to scale D_inv_S twice
        D_inv_S = (omega / approximate_spectral_radius(D_inv_S)) * D_inv_S
        cost[0] += 17
    elif weighting == 'local':
        # Use the Gershgorin estimate as each row's weight, instead of a global
        # spectral radius estimate
        D = np.abs(S) * np.ones((S.shape[0], 1), dtype=S.dtype)
        D_inv = np.zeros_like(D)
        D_inv[D != 0] = 1.0 / np.abs(D[D != 0])

        D_inv_S = scale_rows(S, D_inv, copy=True)
        D_inv_S = omega * D_inv_S
        cost[0] += 3
    else:
        raise ValueError('Incorrect weighting option')

    if filter:
        # Carry out Jacobi, but after calculating the prolongator update, U,
        # apply satisfy constraints so that U*B = 0
        P = T
        for i in range(degree):
            if sparse.isspmatrix_bsr(P):
                U = (D_inv_S * P).tobsr(blocksize=P.blocksize)
            else:
                U = D_inv_S * P

            cost[0] += P.nnz / float(S.nnz)

            # (1) Enforce U*B = 0. Construct array of inv(Bi'Bi), where Bi is B
            # restricted to row i's sparsity pattern in Sparsity Pattern. This
            # array is used multiple times in Satisfy_Constraints(...).
            temp_cost = [0.0]
            BtBinv = compute_BtBinv(B, U, cost=temp_cost)
            cost[0] += temp_cost[0] / float(S.nnz)

            # (2) Apply satisfy constraints
            temp_cost = [0.0]
            Satisfy_Constraints(U, B, BtBinv, cost=temp_cost)
            cost[0] += temp_cost[0] / float(S.nnz)

            # Update P
            P = P - U
            cost[0] += max(P.nnz, U.nnz) / float(S.nnz)
    else:
        # Carry out Jacobi as normal
        P = T
        for i in range(degree):
            P = P - (D_inv_S * P)
            cost[0] += P.nnz / float(S.nnz)

    return P
Esempio n. 45
0
def classical_strength_of_connection(A, theta=0.0):
    """
    Return a strength of connection matrix using the classical AMG measure
    An off-diagonal entry A[i,j] is a strong connection iff::

            | A[i,j] | >= theta * max(| A[i,k] |), where k != i

    Parameters
    ----------
    A : csr_matrix or bsr_matrix
        Square, sparse matrix in CSR or BSR format
    theta : float
        Threshold parameter in [0,1].

    Returns
    -------
    S : csr_matrix
        Matrix graph defining strong connections.  S[i,j]=1 if vertex i
        is strongly influenced by vertex j.

    See Also
    --------
    symmetric_strength_of_connection : symmetric measure used in SA
    evolution_strength_of_connection : relaxation based strength measure

    Notes
    -----
    - A symmetric A does not necessarily yield a symmetric strength matrix S
    - Calls C++ function classical_strength_of_connection
    - The version as implemented is designed form M-matrices.  Trottenberg et
      al. use max A[i,k] over all negative entries, which is the same.  A
      positive edge weight never indicates a strong connection.

    References
    ----------

    .. [1] Briggs, W. L., Henson, V. E., McCormick, S. F., "A multigrid
       tutorial", Second edition. Society for Industrial and Applied
       Mathematics (SIAM), Philadelphia, PA, 2000. xii+193 pp.
       ISBN: 0-89871-462-1

    .. [2] Trottenberg, U., Oosterlee, C. W., Schuller, A., "Multigrid",
       Academic Press, Inc., San Diego, CA, 2001. xvi+631 pp.
       ISBN: 0-12-701070-X

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import classical_strength_of_connection
    >>> n=3
    >>> stencil = np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = classical_strength_of_connection(A, 0.0)

    """

    if sparse.isspmatrix_bsr(A):
        blocksize = A.blocksize[0]
    else:
        blocksize = 1

    if not sparse.isspmatrix_csr(A):
        warn("Implicit conversion of A to csr", sparse.SparseEfficiencyWarning)
        A = sparse.csr_matrix(A)

    if (theta < 0 or theta > 1):
        raise ValueError('expected theta in [0,1]')

    Sp = np.empty_like(A.indptr)
    Sj = np.empty_like(A.indices)
    Sx = np.empty_like(A.data)

    fn = amg_core.classical_strength_of_connection
    fn(A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)
    S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape)

    if blocksize > 1:
        S = amalgamate(S, blocksize)

    # Strength represents "distance", so take the magnitude
    S.data = np.abs(S.data)

    # Scale S by the largest magnitude entry in each row
    S = scale_rows_by_largest_entry(S)

    return S
Esempio n. 46
0
def jacobi_prolongation_smoother(S, T, C, B, omega=4.0/3.0, degree=1, filter=False, weighting='diagonal'):
    """Jacobi prolongation smoother
   
    Parameters
    ----------
    S : {csr_matrix, bsr_matrix}
        Sparse NxN matrix used for smoothing.  Typically, A.
    T : {csr_matrix, bsr_matrix}
        Tentative prolongator
    C : {csr_matrix, bsr_matrix}
        Strength-of-connection matrix
    B : {array}
        Near nullspace modes for the coarse grid such that T*B 
        exactly reproduces the fine grid near nullspace modes
    omega : {scalar}
        Damping parameter
    filter : {boolean}
        If true, filter S before smoothing T.  This option can greatly control
        complexity.
    weighting : {string}
        'block', 'diagonal' or 'local' weighting for constructing the Jacobi D
        'local': Uses a local row-wise weight based on the Gershgorin estimate.
          Avoids any potential under-damping due to inaccurate spectral radius
          estimates.
        'block': If A is a BSR matrix, use a block diagonal inverse of A  
        'diagonal': Classic Jacobi D = diagonal(A)

    Returns
    -------
    P : {csr_matrix, bsr_matrix}
        Smoothed (final) prolongator defined by P = (I - omega/rho(K) K) * T
        where K = diag(S)^-1 * S and rho(K) is an approximation to the 
        spectral radius of K.

    Notes
    -----
    If weighting is not 'local', then results using Jacobi prolongation
    smoother are not precisely reproducible due to a random initial guess used
    for the spectral radius approximation.  For precise reproducibility, 
    set numpy.random.seed(..) to the same value before each test. 
    
    Examples
    --------
    >>> from pyamg.aggregation import jacobi_prolongation_smoother
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse import coo_matrix
    >>> import numpy
    >>> data = numpy.ones((6,))
    >>> row = numpy.arange(0,6)
    >>> col = numpy.kron([0,1],numpy.ones((3,)))
    >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
    >>> T.todense()
    matrix([[ 1.,  0.],
            [ 1.,  0.],
            [ 1.,  0.],
            [ 0.,  1.],
            [ 0.,  1.],
            [ 0.,  1.]])
    >>> A = poisson((6,),format='csr')
    >>> P = jacobi_prolongation_smoother(A,T,A,numpy.ones((2,1)))
    >>> P.todense()
    matrix([[ 0.64930164,  0.        ],
            [ 1.        ,  0.        ],
            [ 0.64930164,  0.35069836],
            [ 0.35069836,  0.64930164],
            [ 0.        ,  1.        ],
            [ 0.        ,  0.64930164]])

    """

    # preprocess weighting
    if weighting == 'block':
        if isspmatrix_csr(S):
            weighting = 'diagonal'
        elif isspmatrix_bsr(S):
            if S.blocksize[0] == 1:
                weighting = 'diagonal'
    
    if filter:
        ##
        # Implement filtered prolongation smoothing for the general case by
        # utilizing satisfy constraints

        if isspmatrix_bsr(S):
            numPDEs = S.blocksize[0]
        else:
            numPDEs = 1

        # Create a filtered S with entries dropped that aren't in C
        C = UnAmal(C, numPDEs, numPDEs)
        S = S.multiply(C)
        S.eliminate_zeros()

    if weighting == 'diagonal':
        # Use diagonal of S
        D_inv = get_diagonal(S, inv=True)
        D_inv_S = scale_rows(S, D_inv, copy=True)
        D_inv_S = (omega/approximate_spectral_radius(D_inv_S))*D_inv_S
    elif weighting == 'block':
        # Use block diagonal of S
        D_inv = get_block_diag(S, blocksize=S.blocksize[0], inv_flag=True)
        D_inv = bsr_matrix( (D_inv, numpy.arange(D_inv.shape[0]), \
                         numpy.arange(D_inv.shape[0]+1)), shape = S.shape)
        D_inv_S = D_inv*S
        D_inv_S = (omega/approximate_spectral_radius(D_inv_S))*D_inv_S
    elif weighting == 'local':
        # Use the Gershgorin estimate as each row's weight, instead of a global
        # spectral radius estimate
        D = numpy.abs(S)*numpy.ones((S.shape[0],1), dtype=S.dtype)
        D_inv = numpy.zeros_like(D)
        D_inv[D != 0] = 1.0 / numpy.abs(D[D != 0])

        D_inv_S = scale_rows(S, D_inv, copy=True)
        D_inv_S = omega*D_inv_S
    else:
        raise ValueError('Incorrect weighting option')

    
    if filter: 
        ##
        # Carry out Jacobi, but after calculating the prolongator update, U,
        # apply satisfy constraints so that U*B = 0
        P = T
        for i in range(degree):
            U =  (D_inv_S*P).tobsr(blocksize=P.blocksize)
            
            ##
            # Enforce U*B = 0 
            # (1) Construct array of inv(Bi'Bi), where Bi is B restricted to row
            # i's sparsity pattern in Sparsity Pattern. This array is used
            # multiple times in Satisfy_Constraints(...).
            BtBinv = compute_BtBinv(B, U)
            # (2) Apply satisfy constraints
            Satisfy_Constraints(U, B, BtBinv)
            
            ##
            # Update P
            P = P - U

    else:
        ##
        # Carry out Jacobi as normal
        P = T
        for i in range(degree):
            P = P - (D_inv_S*P)

    return P
Esempio n. 47
0
def symmetric_strength_of_connection(A, theta=0, cost=[0]):
    """
    Compute strength of connection matrix using the standard symmetric measure

    An off-diagonal connection A[i,j] is strong iff::

        abs(A[i,j]) >= theta * sqrt( abs(A[i,i]) * abs(A[j,j]) )

    Parameters
    ----------
    A : csr_matrix
        Matrix graph defined in sparse format.  Entry A[i,j] describes the
        strength of edge [i,j]
    theta : float
        Threshold parameter (positive).

    Returns
    -------
    S : csr_matrix
        Matrix graph defining strong connections.  S[i,j]=1 if vertex i
        is strongly influenced by vertex j.

    See Also
    --------
    symmetric_strength_of_connection : symmetric measure used in SA
    evolution_strength_of_connection : relaxation based strength measure

    Notes
    -----
        - For vector problems, standard strength measures may produce
          undesirable aggregates.  A "block approach" from Vanek et al. is used
          to replace vertex comparisons with block-type comparisons.  A
          connection between nodes i and j in the block case is strong if::

          ||AB[i,j]|| >= theta * sqrt( ||AB[i,i]||*||AB[j,j]|| ) where AB[k,l]

          is the matrix block (degrees of freedom) associated with nodes k and
          l and ||.|| is a matrix norm, such a Frobenius.

    References
    ----------
    .. [1] Vanek, P. and Mandel, J. and Brezina, M.,
       "Algebraic Multigrid by Smoothed Aggregation for
       Second and Fourth Order Elliptic Problems",
       Computing, vol. 56, no. 3, pp. 179--196, 1996.
       http://citeseer.ist.psu.edu/vanek96algebraic.html

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import symmetric_strength_of_connection
    >>> n=3
    >>> stencil = np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = symmetric_strength_of_connection(A, 0.0)
    """

    if theta < 0:
        raise ValueError('expected a positive theta')

    if sparse.isspmatrix_csr(A):

        S_rowptr = np.empty_like(A.indptr)
        S_colinds = np.empty_like(A.indices)
        S_data = np.empty_like(A.data)

        fn = amg_core.symmetric_strength_of_connection
        fn(A.shape[0], theta, A.indptr, A.indices, A.data, S_rowptr, S_colinds,
           S_data)
        # Assume takes ~0.5 pass to find diagonals, 1 pass to filter
        cost[0] += 1.5

        S = sparse.csr_matrix((S_data, S_colinds, S_rowptr), shape=A.shape)

    elif sparse.isspmatrix_bsr(A):
        M, N = A.shape
        R, C = A.blocksize

        if R != C:
            raise ValueError('matrix must have square blocks')

        if theta == 0:
            data = np.ones(len(A.indices), dtype=A.dtype)
            S = sparse.csr_matrix((data, A.indices.copy(), A.indptr.copy()),
                                  shape=(int(M / R), int(N / C)))
        else:
            # the strength of connection matrix is based on the
            # Frobenius norms of the blocks
            data = (np.conjugate(A.data) * A.data).reshape(-1,
                                                           R * C).sum(axis=1)
            cost[0] += 1
            A = sparse.csr_matrix((data, A.indices, A.indptr),
                                  shape=(int(M / R), int(N / C)))
            return symmetric_strength_of_connection(A, theta, cost)
    else:
        raise TypeError('expected csr_matrix or bsr_matrix')

    # Strength represents "distance", so take the magnitude
    S.data = np.abs(S.data)

    # Scale S by the largest magnitude entry in each row
    S = scale_rows_by_largest_entry(S)

    # One pass to find largest entry, 1 pass to scale all elements
    # by it and adjust signs
    cost[0] += 2 * float(S.nnz) / A.nnz

    return S
Esempio n. 48
0
def solver_configuration(A, B=None, verb=True):
    """
    Given an arbitrary matrix A, generate a dictionary of parameters with
    which to generate a smoothed_aggregation_solver.

    Parameters
    ----------
    A : {array, matrix, csr_matrix, bsr_matrix}
        (n x n) matrix to invert, CSR or BSR format preferred for efficiency
    B : {None, array}
        Near null-space modes used to construct the smoothed aggregation solver
        If None, the constant vector is used
        If (n x m) array, then B is passed to smoothed_aggregation_solver
    verb : {bool}
        If True, print verbose output during runtime

    Returns
    -------
    config : {dict}
        A dictionary of solver configuration parameters that one uses to
        generate a smoothed aggregation solver

    Notes
    -----
    The config dictionary contains the following parameter entries:
        symmetry, smooth, presmoother, postsmoother, B, strength,
        max_levels, max_coarse, coarse_solver, aggregate, keep
    See smoothed_aggregtion_solver for each parameter's description.

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg import solver_configuration
    >>> A = poisson((40,40),format='csr')
    >>> solver_config = solver_configuration(A,verb=False)
    """

    # Ensure acceptable format of A
    A = make_csr(A)
    config = {}

    # Detect symmetry
    if ishermitian(A, fast_check=True):
        config['symmetry'] = 'hermitian'
        if verb:
            print "  Detected a Hermitian matrix"
    else:
        config['symmetry'] = 'nonsymmetric'
        if verb:
            print "  Detected a non-Hermitian matrix"

    # Symmetry dependent parameters
    if config['symmetry'] == 'hermitian':
        config['smooth'] = ('energy', {'krylov': 'cg', 'maxiter': 3,
                            'degree': 2, 'weighting': 'local'})
        config['presmoother'] = ('block_gauss_seidel',
                                 {'sweep': 'symmetric', 'iterations': 1})
        config['postsmoother'] = ('block_gauss_seidel',
                                  {'sweep': 'symmetric', 'iterations': 1})
    else:
        config['smooth'] = ('energy', {'krylov': 'gmres', 'maxiter': 3,
                            'degree': 2, 'weighting': 'local'})
        config['presmoother'] = ('gauss_seidel_nr',
                                 {'sweep': 'symmetric', 'iterations': 2})
        config['postsmoother'] = ('gauss_seidel_nr',
                                  {'sweep': 'symmetric', 'iterations': 2})

    # Determine near null-space modes B
    if B is None:
        # B is the constant for each variable in a node
        if isspmatrix_bsr(A) and A.blocksize[0] > 1:
            bsize = A.blocksize[0]
            config['B'] = np.kron(np.ones((A.shape[0] / bsize, 1),
                                  dtype=A.dtype), np.eye(bsize))
        else:
            config['B'] = np.ones((A.shape[0], 1), dtype=A.dtype)
    elif (type(B) == type(np.zeros((1,)))) or\
            (type(B) == type(sp.mat(np.zeros((1,))))):
        if len(B.shape) == 1:
            B = B.reshape(-1, 1)
        if (B.shape[0] != A.shape[0]) or (B.shape[1] == 0):
            raise TypeError('Invalid dimensions of B, B.shape[0] must equal \
                             A.shape[0]')
        else:
            config['B'] = np.array(B, dtype=A.dtype)
    else:
        raise TypeError('Invalid B')

    if config['symmetry'] == 'hermitian':
        config['BH'] = None
    else:
        config['BH'] = config['B'].copy()

    # Set non-symmetry related parameters
    config['strength'] = ('evolution', {'k': 2, 'proj_type': 'l2',
                          'epsilon': 3.0})
    config['max_levels'] = 15
    config['max_coarse'] = 500
    config['coarse_solver'] = 'pinv'
    config['aggregate'] = 'standard'
    config['keep'] = False

    return config
Esempio n. 49
0
def energy_based_strength_of_connection(A, theta=0.0, k=2, cost=[0]):
    """
    Compute a strength of connection matrix using an energy-based measure.

    Parameters
    ----------
    A : {sparse-matrix}
        matrix from which to generate strength of connection information
    theta : {float}
        Threshold parameter in [0,1]
    k : {int}
        Number of relaxation steps used to generate strength information

    Returns
    -------
    S : {csr_matrix}
        Matrix graph defining strong connections.  The sparsity pattern
        of S matches that of A.  For BSR matrices, S is a reduced strength
        of connection matrix that describes connections between supernodes.

    Notes
    -----
    This method relaxes with weighted-Jacobi in order to approximate the
    matrix inverse.  A normalized change of energy is then used to define
    point-wise strength of connection values.  Specifically, let v be the
    approximation to the i-th column of the inverse, then

    (S_ij)^2 = <v_j, v_j>_A / <v, v>_A,

    where v_j = v, such that entry j in v has been zeroed out.  As is common,
    larger values imply a stronger connection.

    Current implementation is a very slow pure-python implementation for
    experimental purposes, only.

    References
    ----------
    .. [1] Brannick, Brezina, MacLachlan, Manteuffel, McCormick.
       "An Energy-Based AMG Coarsening Strategy",
       Numerical Linear Algebra with Applications,
       vol. 13, pp. 133-148, 2006.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import energy_based_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = energy_based_strength_of_connection(A, 0.0)
    """

    if (theta < 0):
        raise ValueError('expected a positive theta')
    if not sparse.isspmatrix(A):
        raise ValueError('expected sparse matrix')
    if (k < 0):
        raise ValueError('expected positive number of steps')
    if not isinstance(k, int):
        raise ValueError('expected integer')

    if sparse.isspmatrix_bsr(A):
        bsr_flag = True
        numPDEs = A.blocksize[0]
        if A.blocksize[0] != A.blocksize[1]:
            raise ValueError('expected square blocks in BSR matrix A')
    else:
        bsr_flag = False

    # Convert A to csc and Atilde to csr
    if sparse.isspmatrix_csr(A):
        Atilde = A.copy()
        A = A.tocsc()
    else:
        A = A.tocsc()
        Atilde = A.copy()
        Atilde = Atilde.tocsr()

    # Calculate the weighted-Jacobi parameter
    from pyamg.util.linalg import approximate_spectral_radius
    D = A.diagonal()
    Dinv = 1.0 / D
    Dinv[D == 0] = 0.0
    Dinv = sparse.csc_matrix(
        (Dinv, (np.arange(A.shape[0]), np.arange(A.shape[1]))), shape=A.shape)
    DinvA = Dinv * A
    omega = 1.0 / approximate_spectral_radius(DinvA)
    del DinvA

    # Approximate A-inverse with k steps of w-Jacobi and a zero initial guess
    S = sparse.csc_matrix(A.shape, dtype=A.dtype)  # empty matrix
    I = sparse.eye(A.shape[0], A.shape[1], format='csc')
    for i in range(k + 1):
        S = S + omega * (Dinv * (I - A * S))

    # Calculate the strength entries in S column-wise, but only strength
    # values at the sparsity pattern of A
    for i in range(Atilde.shape[0]):
        v = np.mat(S[:, i].todense())
        Av = np.mat(A * v)
        denom = np.sqrt(np.conjugate(v).T * Av)
        # replace entries in row i with strength values
        for j in range(Atilde.indptr[i], Atilde.indptr[i + 1]):
            col = Atilde.indices[j]
            vj = v[col].copy()
            v[col] = 0.0
            #   =  (||v_j||_A - ||v||_A) / ||v||_A
            val = np.sqrt(np.conjugate(v).T * A * v) / denom - 1.0

            # Negative values generally imply a weak connection
            if val > -0.01:
                Atilde.data[j] = abs(val)
            else:
                Atilde.data[j] = 0.0

            v[col] = vj

    # Apply drop tolerance
    Atilde = classical_strength_of_connection_abs(Atilde, theta=theta)
    Atilde.eliminate_zeros()

    # Put ones on the diagonal
    Atilde = Atilde + I.tocsr()
    Atilde.sort_indices()

    # Amalgamate Atilde for the BSR case, using ones for all strong connections
    if bsr_flag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))
        nblocks = Atilde.indices.shape[0]
        uone = np.ones((nblocks, ))
        Atilde = sparse.csr_matrix((uone, Atilde.indices, Atilde.indptr),
                                   shape=(int(Atilde.shape[0] / numPDEs),
                                          int(Atilde.shape[1] / numPDEs)))

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)

    return Atilde
Esempio n. 50
0
def my_vis(ml, V, error=None, fname="", E2V=None, Pcols=None):
    """Coarse grid visualization for 2-D problems, for use with Paraview
       For all levels, outputs meshes, aggregates, near nullspace modes B, and selected
       prolongator basis functions.  Coarse level meshes are constructed by doing a
       Delaunay triangulation of interpolated fine grid vertices.

    Parameters
    ----------
    ml : {multilevel hiearchy}
        defines the multilevel hierarchy to visualize
    V : {array}
        coordinate array (N x D)
    Error : {array}
        Fine grid error to plot (N x D)
    fname : {string}
        string to be appended to all output files, e.g. 'diffusion1'
    E2V : {array}
        Element index array (Nel x Nelnodes) for the finest level.  If None,
        then a Delaunay triangulation is done for the finest level.  All coarse
        levels use an internally calculated Delaunay triangulation
    P_cols : {list of tuples}
        Optional input list of tuples of the form [(lvl, [ints]), ...]
        where lvl is an integer defining the level on which to output
        the list of columns in [ints].

    Returns
    -------
        - Writes data to .vtk files for use in paraview (xml 0.1 format)
    
    Notes
    -----


    Examples
    --------

     """
    system('rm -f *.vtu')

    ##
    # For the purposes of clearer plotting, perturb vertices slightly
    V += rand(V.shape[0], V.shape[1])*1e-6

    ## 
    # Create a list of vertices and meshes for all levels
    levels = ml.levels
    Vlist = [V]
    if E2V is None:
        [circ_cent,edges,E2V,tri_nbs]=delaunay.delaunay(V[:,0], V[:,1])
    E2Vlist = [E2V]

    mesh_type_list = []
    mesh_num_list = []
    if E2V.shape[1] == 1:
        mesh_type_list.append('vertex')
        mesh_num_list.append(1)
    if E2V.shape[1] == 3:
        mesh_type_list.append('tri')
        mesh_num_list.append(5)
    if E2V.shape[1] == 4:
        if vertices.shape[1] == 2:
            mesh_type_list.append('quad')
            mesh_num_list.append(9)
    
    if sparse.isspmatrix_bsr(levels[0].A):
        nPDEs = levels[0].A.blocksize[0]
    else:
        nPDEs = 1
    
    Agglist = []
    Agg = sparse.eye(levels[0].A.shape[0]/nPDEs, levels[0].A.shape[1]/nPDEs, format='csr') 
    for i in range(1,len(levels)):
        ##
        # Interpolate the vertices to the next level by taking each
        # aggregate's center of gravity (i.e. average x and y value).
        Agg = Agg.tocsr()*levels[i-1].AggOp.tocsr()
        Agg.data[:] = 1.0
        Agglist.append(Agg)
            
        AggX = scale_rows(Agg, Vlist[0][:,0], copy=True) 
        AggY = scale_rows(Agg, Vlist[0][:,1], copy=True) 
        AggX = ones((1, AggX.shape[0]))*AggX
        AggY = ones((1, AggY.shape[0]))*AggY
        Agg = Agg.tocsc()
        count = Agg.indptr[1:]-Agg.indptr[:-1]
        AggX = (ravel(AggX)/count).reshape(-1,1)
        AggY = (ravel(AggY)/count).reshape(-1,1)
        Vlist.append(hstack((AggX, AggY)))

        [circ_cent,edges,E2Vnew,tri_nbs]=delaunay.delaunay(Vlist[i][:,0], Vlist[i][:,1])
        E2Vlist.append(E2Vnew)
        mesh_type_list.append('tri')
        mesh_num_list.append(5)

        
    ##
    # On each level, output aggregates, B, the mesh
    for i in range(len(levels)):
        mesh_num = mesh_num_list[i]
        mesh_type = mesh_type_list[i]
        vertices = Vlist[i]
        elements = E2Vlist[i]
        # Print mesh
        write_basic_mesh(vertices, elements, mesh_type=mesh_type, \
                             fname=fname+"mesh_lvl"+str(i)+".vtu")    
        # Visualize the aggregates
        if i != (len(levels)-1):
            dg_vis(fname+"aggs_lvl"+str(i), Vlist[0], \
                    E2Vlist[0], Agglist[i], mesh_type)
        # Visualize B
        if sparse.isspmatrix_bsr(levels[i].A):
            nPDEs = levels[i].A.blocksize[0]
        else:
            nPDEs = 1
        cell_stuff = {mesh_num : elements}
        for j in range(nPDEs):
            indys = arange(j,levels[i].A.shape[0],nPDEs)
            write_vtu(Verts=vertices, Cells=cell_stuff, pdata=levels[i].B[indys,:], \
                          fname=fname+"B_variable"+str(j)+"_lvl"+str(i)+".vtu")


    ##
    # Output requested prolongator basis functions
    if Pcols is not None:
        for (lvl,cols) in Pcols:
            P = levels[lvl].P.tocsc()
            cell_stuff = {mesh_num_list[lvl] : E2Vlist[lvl]}
            for i in cols:           
                Pcol = array(P[:,i].todense())
                write_vtu(Verts=Vlist[lvl], Cells=cell_stuff, pdata=Pcol, 
                          fname=fname+"P_lvl"+str(lvl)+"col"+str(i)+".vtu")
    
    ##
    # Output the error on the finest level
    if error is not None:
        error = error.reshape(-1,1)
        cell_stuff = {mesh_num_list[0] : E2Vlist[0]}
        if sparse.isspmatrix_bsr(levels[0].A):
            nPDEs = levels[0].A.blocksize[0]
        else:
            nPDEs = 1
        for j in range(nPDEs):
            indys = arange(j, levels[0].A.shape[0], nPDEs)
            write_vtu(Verts=Vlist[0], Cells=cell_stuff, pdata=error[indys,:], \
                      fname=fname+"error_variable"+str(j)+".vtu")
Esempio n. 51
0
def classical_strength_of_connection(A, theta=0.0):
    """
    Return a strength of connection matrix using the classical AMG measure
    An off-diagonal entry A[i,j] is a strong connection iff::

            | A[i,j] | >= theta * max(| A[i,k] |), where k != i

    Parameters
    ----------
    A : csr_matrix or bsr_matrix
        Square, sparse matrix in CSR or BSR format
    theta : float
        Threshold parameter in [0,1].

    Returns
    -------
    S : csr_matrix
        Matrix graph defining strong connections.  S[i,j]=1 if vertex i
        is strongly influenced by vertex j.

    See Also
    --------
    symmetric_strength_of_connection : symmetric measure used in SA
    evolution_strength_of_connection : relaxation based strength measure

    Notes
    -----
    - A symmetric A does not necessarily yield a symmetric strength matrix S
    - Calls C++ function classical_strength_of_connection
    - The version as implemented is designed form M-matrices.  Trottenberg et
      al. use max A[i,k] over all negative entries, which is the same.  A
      positive edge weight never indicates a strong connection.

    References
    ----------

    .. [1] Briggs, W. L., Henson, V. E., McCormick, S. F., "A multigrid
       tutorial", Second edition. Society for Industrial and Applied
       Mathematics (SIAM), Philadelphia, PA, 2000. xii+193 pp.
       ISBN: 0-89871-462-1

    .. [2] Trottenberg, U., Oosterlee, C. W., Schuller, A., "Multigrid",
       Academic Press, Inc., San Diego, CA, 2001. xvi+631 pp.
       ISBN: 0-12-701070-X

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import classical_strength_of_connection
    >>> n=3
    >>> stencil = np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = classical_strength_of_connection(A, 0.0)

    """

    if sparse.isspmatrix_bsr(A):
        blocksize = A.blocksize[0]
    else:
        blocksize = 1

    if not sparse.isspmatrix_csr(A):
        warn("Implicit conversion of A to csr", sparse.SparseEfficiencyWarning)
        A = sparse.csr_matrix(A)

    if (theta < 0 or theta > 1):
        raise ValueError('expected theta in [0,1]')

    Sp = np.empty_like(A.indptr)
    Sj = np.empty_like(A.indices)
    Sx = np.empty_like(A.data)

    fn = amg_core.classical_strength_of_connection
    fn(A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)
    S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape)

    if blocksize > 1:
        S = amalgamate(S, blocksize)

    # Strength represents "distance", so take the magnitude
    S.data = np.abs(S.data)

    # Scale S by the largest magnitude entry in each row
    S = scale_rows_by_largest_entry(S)

    return S
Esempio n. 52
0
def classical_strength_of_connection(A,
                                     theta=0.25,
                                     block=None,
                                     norm='abs',
                                     cost=[0]):
    """
    Return a strength of connection matrix using the classical AMG measure
    An off-diagonal entry A[i,j] is a strong connection iff::

            | A[i,j] | >= theta * max(| A[i,k] |), where k != i     (norm='abs')
             -A[i,j]   >= theta * max(| A[i,k] |), where k != i     (norm='min')

    Parameters
    ----------
    A : csr_matrix or bsr_matrix
        Square, sparse matrix in CSR or BSR format
    theta : float
        Threshold parameter in [0,1].
    block : string, default None for CSR matrix and 'block' for BSR matrix
        How to treat block structure of A:
            None         : Compute SOC based on A as CSR matrix.
            'block'      : Compute SOC based on norm of blocks of A.
            'amalgamate' : Compute SOC based on A as CSR matrix, then compute
                           norm of blocks in SOC matrix for a block SOC. 
    norm : 'string', default 'abs'
        Option to compute SOC between elements or blocks: 
            'abs'  : C_ij = k, where k is the maximum absolute value in block C_ij
            'min'  : C_ij = k, where k is the minimum (negative) value in block C_ij
            'fro'  : C_ij = k, where k is the Frobenius norm of block C_ij
                - Only valid for block matrices, block='block'

    Returns
    -------
    S : csr_matrix
        Matrix graph defining strong connections.  S[i,j]=1 if vertex i
        is strongly influenced by vertex j.

    See Also
    --------
    symmetric_strength_of_connection : symmetric measure used in SA
    evolution_strength_of_connection : relaxation based strength measure

    Notes
    -----
    - A symmetric A does not necessarily yield a symmetric strength matrix S
    - Calls C++ function classical_strength_of_connection

    References
    ----------

    .. [1] Briggs, W. L., Henson, V. E., McCormick, S. F., "A multigrid
       tutorial", Second edition. Society for Industrial and Applied
       Mathematics (SIAM), Philadelphia, PA, 2000. xii+193 pp.
       ISBN: 0-89871-462-1

    .. [2] Trottenberg, U., Oosterlee, C. W., Schuller, A., "Multigrid",
       Academic Press, Inc., San Diego, CA, 2001. xvi+631 pp.
       ISBN: 0-12-701070-X

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import classical_strength_of_connection
    >>> n=3
    >>> stencil = np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = classical_strength_of_connection(A, 0.0)

    """
    if (theta < 0 or theta > 1):
        raise ValueError('expected theta in [0,1]')

    if (theta < 0 or theta > 1):
        raise ValueError('expected theta in [0,1]')

    if sparse.isspmatrix_bsr(A):
        blocksize = A.blocksize[0]
    else:
        blocksize = 1

    # Block structure considered before computing SOC
    if (block == 'block') or sparse.isspmatrix_bsr(A):
        R, C = A.blocksize
        if (R != C) or (R < 1):
            raise ValueError('Matrix must have square blocks')

        N = int(A.shape[0] / R)

        # SOC based on maximum absolute value element in each block
        if norm == 'abs':
            data = np.max(np.max(np.abs(A.data), axis=1), axis=1)
            cost[0] += 1
        # SOC based on hard minimum of entry in each off-diagonal block
        elif norm == 'min':
            data = np.min(np.min(A.data, axis=1), axis=1)
            cost[0] += 1
        # SOC based on Frobenius norms of blocks
        elif norm == 'fro':
            data = (np.conjugate(A.data) * A.data).reshape(-1,
                                                           R * C).sum(axis=1)
            cost[0] += 1
        else:
            raise ValueError("Invalid choice of norm.")

        data[np.abs(data) < 1e-16] = 0.0
        S_rowptr = np.empty_like(A.indptr)
        S_colinds = np.empty_like(A.indices)
        S_data = np.empty_like(data)

        if norm == 'abs' or norm == 'fro':
            amg_core.classical_strength_of_connection_abs(
                N, theta, A.indptr, A.indices, data, S_rowptr, S_colinds,
                S_data)
        elif norm == 'min':
            amg_core.classical_strength_of_connection_min(
                N, theta, A.indptr, A.indices, data, S_rowptr, S_colinds,
                S_data)
        else:
            raise ValueError("Unrecognized option for norm.")

        # One pass through nnz to find largest entry, one to filter
        S = sparse.csr_matrix((S_data, S_colinds, S_rowptr), shape=[N, N])
        cost[0] += 2

        # Take magnitude and scale by largest entry
        S.data = np.abs(S.data)
        S = scale_rows_by_largest_entry(S)
        S.eliminate_zeros()

        # Assume largest entry can be tracked from filtering.
        # 1 WU to scale matrix.
        cost[0] += float(S.nnz) / A.nnz

        return S

    # SOC computed based on A as CSR
    else:
        S_rowptr = np.empty_like(A.indptr)
        S_colinds = np.empty_like(A.indices)
        S_data = np.empty_like(A.data)

        if norm == 'abs' or norm == 'fro':
            amg_core.classical_strength_of_connection_abs(
                A.shape[0], theta, A.indptr, A.indices, A.data, S_rowptr,
                S_colinds, S_data)
        elif norm == 'min':
            amg_core.classical_strength_of_connection_min(
                A.shape[0], theta, A.indptr, A.indices, A.data, S_rowptr,
                S_colinds, S_data)
        else:
            raise ValueError("Unrecognized option for norm.")

        # One pass through nnz to find largest entry, one to filter
        S = sparse.csr_matrix((S_data, S_colinds, S_rowptr), shape=A.shape)
        cost[0] += 2

        if blocksize > 1 and block == 'amalgamate':
            S = amalgamate(S, blocksize, norm=norm)

        # Take magnitude and scale by largest entry
        S.data = np.abs(S.data)
        S = scale_rows_by_largest_entry(S)
        S.eliminate_zeros()

        # Assume largest entry can be tracked from filtering.
        # 1 WU to scale matrix.
        cost[0] += float(S.nnz) / A.nnz

        return S
Esempio n. 53
0
def symmetric_strength_of_connection(A, theta=0):
    """
    Compute strength of connection matrix using the standard symmetric measure

    An off-diagonal connection A[i,j] is strong iff::

        abs(A[i,j]) >= theta * sqrt( abs(A[i,i]) * abs(A[j,j]) )

    Parameters
    ----------
    A : csr_matrix
        Matrix graph defined in sparse format.  Entry A[i,j] describes the
        strength of edge [i,j]
    theta : float
        Threshold parameter (positive).

    Returns
    -------
    S : csr_matrix
        Matrix graph defining strong connections.  S[i,j]=1 if vertex i
        is strongly influenced by vertex j.

    See Also
    --------
    symmetric_strength_of_connection : symmetric measure used in SA
    evolution_strength_of_connection : relaxation based strength measure

    Notes
    -----
        - For vector problems, standard strength measures may produce
          undesirable aggregates.  A "block approach" from Vanek et al. is used
          to replace vertex comparisons with block-type comparisons.  A
          connection between nodes i and j in the block case is strong if::

          ||AB[i,j]|| >= theta * sqrt( ||AB[i,i]||*||AB[j,j]|| ) where AB[k,l]

          is the matrix block (degrees of freedom) associated with nodes k and
          l and ||.|| is a matrix norm, such a Frobenius.

    References
    ----------
    .. [1] Vanek, P. and Mandel, J. and Brezina, M.,
       "Algebraic Multigrid by Smoothed Aggregation for
       Second and Fourth Order Elliptic Problems",
       Computing, vol. 56, no. 3, pp. 179--196, 1996.
       http://citeseer.ist.psu.edu/vanek96algebraic.html

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import symmetric_strength_of_connection
    >>> n=3
    >>> stencil = np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = symmetric_strength_of_connection(A, 0.0)
    """

    if theta < 0:
        raise ValueError('expected a positive theta')

    if sparse.isspmatrix_csr(A):
        # if theta == 0:
        #     return A

        Sp = np.empty_like(A.indptr)
        Sj = np.empty_like(A.indices)
        Sx = np.empty_like(A.data)

        fn = amg_core.symmetric_strength_of_connection
        fn(A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)

        S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape)

    elif sparse.isspmatrix_bsr(A):
        M, N = A.shape
        R, C = A.blocksize

        if R != C:
            raise ValueError('matrix must have square blocks')

        if theta == 0:
            data = np.ones(len(A.indices), dtype=A.dtype)
            S = sparse.csr_matrix((data, A.indices.copy(), A.indptr.copy()),
                                  shape=(M / R, N / C))
        else:
            # the strength of connection matrix is based on the
            # Frobenius norms of the blocks
            data = (np.conjugate(A.data) * A.data).reshape(-1, R*C).sum(axis=1)
            A = sparse.csr_matrix((data, A.indices, A.indptr),
                                  shape=(M / R, N / C))
            return symmetric_strength_of_connection(A, theta)
    else:
        raise TypeError('expected csr_matrix or bsr_matrix')

    # Strength represents "distance", so take the magnitude
    S.data = np.abs(S.data)

    # Scale S by the largest magnitude entry in each row
    S = scale_rows_by_largest_entry(S)

    return S
Esempio n. 54
0
def distance_strength_of_connection(A,
                                    V,
                                    theta=2.0,
                                    relative_drop=True,
                                    cost=[0]):
    """
    Distance based strength-of-connection

    Parameters
    ----------
    A : csr_matrix or bsr_matrix
        Square, sparse matrix in CSR or BSR format
    V : array
        Coordinates of the vertices of the graph of A
    theta : float
        Drop tolerance of distance between points, see relative_drop
    relative_drop : bool
        If false, then a connection must be within a distance of theta
        from a point to be strongly connected.
        If true, then the closest connection is always strong, and other points
        must be within theta times the smallest distance to be strong

    Returns
    -------
    C : csr_matrix
        C(i,j) = distance(point_i, point_j)
        Strength of connection matrix where strength values are
        distances, i.e. the smaller the value, the stronger the connection.
        Sparsity pattern of C is copied from A.

    Notes
    -----
    - theta is a drop tolerance that is applied row-wise
    - If a BSR matrix given, then the return matrix is still CSR.  The strength
      is given between super nodes based on the BSR block size.

    Examples
    --------
    >>> from pyamg.gallery import load_example
    >>> from pyamg.strength import distance_strength_of_connection
    >>> data = load_example('airfoil')
    >>> A = data['A'].tocsr()
    >>> S = distance_strength_of_connection(data['A'], data['vertices'])

    """
    # Amalgamate for the supernode case
    if sparse.isspmatrix_bsr(A):
        sn = int(A.shape[0] / A.blocksize[0])
        u = np.ones((A.data.shape[0], ))
        A = sparse.csr_matrix((u, A.indices, A.indptr), shape=(sn, sn))

    if not sparse.isspmatrix_csr(A):
        warn("Implicit conversion of A to csr", sparse.SparseEfficiencyWarning)
        A = sparse.csr_matrix(A)

    dim = V.shape[1]

    # Create two arrays for differencing the different coordinates such
    # that C(i,j) = distance(point_i, point_j)
    cols = A.indices
    rows = np.repeat(np.arange(A.shape[0]), A.indptr[1:] - A.indptr[0:-1])

    # Insert difference for each coordinate into C
    C = (V[rows, 0] - V[cols, 0])**2
    for d in range(1, dim):
        C += (V[rows, d] - V[cols, d])**2
    C = np.sqrt(C)
    C[C < 1e-6] = 1e-6

    C = sparse.csr_matrix((C, A.indices.copy(), A.indptr.copy()),
                          shape=A.shape)

    # 2 len(rows) operations initially, 3 each loop iteration,
    # and one after --> 3*dim*len(rows) / A.nnz WUs = 3*dim WUs
    cost[0] += 3 * dim

    # Apply drop tolerance
    if relative_drop is True:
        if theta != np.inf:
            amg_core.apply_distance_filter(C.shape[0], theta, C.indptr,
                                           C.indices, C.data)
            cost[0] += float(2.0 * C.nnz) / A.nnz
    else:
        amg_core.apply_absolute_distance_filter(C.shape[0], theta, C.indptr,
                                                C.indices, C.data)
        cost[0] += float(C.nnz) / A.nnz

    C.eliminate_zeros()

    C = C + sparse.eye(C.shape[0], C.shape[1], format='csr')
    cost[0] += float(C.shape[0]) / A.nnz

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the distances.
    C.data = 1.0 / C.data
    cost[0] += float(C.nnz) / A.nnz

    # Scale C by the largest magnitude entry in each row
    C = scale_rows_by_largest_entry(C)

    # Assume largest entry can be tracked in applying distance filter.
    # 1 WU to scale matrix.
    cost[0] += float(C.nnz) / A.nnz

    return C
Esempio n. 55
0
def distance_strength_of_connection(A, V, theta=2.0, relative_drop=True):
    """
    Distance based strength-of-connection

    Parameters
    ----------
    A : csr_matrix or bsr_matrix
        Square, sparse matrix in CSR or BSR format
    V : array
        Coordinates of the vertices of the graph of A
    relative_drop : bool
        If false, then a connection must be within a distance of theta
        from a point to be strongly connected.
        If true, then the closest connection is always strong, and other points
        must be within theta times the smallest distance to be strong

    Returns
    -------
    C : csr_matrix
        C(i,j) = distance(point_i, point_j)
        Strength of connection matrix where strength values are
        distances, i.e. the smaller the value, the stronger the connection.
        Sparsity pattern of C is copied from A.

    Notes
    -----
    - theta is a drop tolerance that is applied row-wise
    - If a BSR matrix given, then the return matrix is still CSR.  The strength
      is given between super nodes based on the BSR block size.

    Examples
    --------
    >>> from pyamg.gallery import load_example
    >>> from pyamg.strength import distance_strength_of_connection
    >>> data = load_example('airfoil')
    >>> A = data['A'].tocsr()
    >>> S = distance_strength_of_connection(data['A'], data['vertices'])

    """
    # Amalgamate for the supernode case
    if sparse.isspmatrix_bsr(A):
        sn = A.shape[0]/A.blocksize[0]
        u = np.ones((A.data.shape[0],))
        A = sparse.csr_matrix((u, A.indices, A.indptr), shape=(sn, sn))

    if not sparse.isspmatrix_csr(A):
        warn("Implicit conversion of A to csr", sparse.SparseEfficiencyWarning)
        A = sparse.csr_matrix(A)

    dim = V.shape[1]

    # Create two arrays for differencing the different coordinates such
    # that C(i,j) = distance(point_i, point_j)
    cols = A.indices
    rows = np.repeat(np.arange(A.shape[0]), A.indptr[1:] - A.indptr[0:-1])

    # Insert difference for each coordinate into C
    C = (V[rows, 0] - V[cols, 0])**2
    for d in range(1, dim):
        C += (V[rows, d] - V[cols, d])**2
    C = np.sqrt(C)
    C[C < 1e-6] = 1e-6

    C = sparse.csr_matrix((C, A.indices.copy(), A.indptr.copy()),
                          shape=A.shape)

    # Apply drop tolerance
    if relative_drop is True:
        if theta != np.inf:
            amg_core.apply_distance_filter(C.shape[0], theta, C.indptr,
                                           C.indices, C.data)
    else:
        amg_core.apply_absolute_distance_filter(C.shape[0], theta, C.indptr,
                                                C.indices, C.data)
    C.eliminate_zeros()

    C = C + sparse.eye(C.shape[0], C.shape[1], format='csr')

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the distances.
    C.data = 1.0/C.data

    # Scale C by the largest magnitude entry in each row
    C = scale_rows_by_largest_entry(C)

    return C
Esempio n. 56
0
def evolution_strength_of_connection(A,
                                     B=None,
                                     epsilon=4.0,
                                     k=2,
                                     proj_type="l2",
                                     weighting='diagonal',
                                     symmetrize_measure=True,
                                     cost=[0]):
    """
    Construct strength of connection matrix using an Evolution-based measure

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    B : {string, array}
        If B=None, then the near nullspace vector used is all ones.  If B is
        an (NxK) array, then B is taken to be the near nullspace vectors.
    epsilon : scalar
        Drop tolerance
    k : integer
        ODE num time steps, step size is assumed to be 1/rho(DinvA)
    proj_type : {'l2','D_A'}
        Define norm for constrained min prob, i.e. define projection
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the D-inverse 
        used to precondition A before "evolving" delta-functions.  The
        local option is the cheapest.

    Returns
    -------
    Atilde : {csr_matrix}
        Sparse matrix of strength values

    References
    ----------
    .. [1] Olson, L. N., Schroder, J., Tuminaro, R. S.,
       "A New Perspective on Strength Measures in Algebraic Multigrid",
       submitted, June, 2008.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import evolution_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = evolution_strength_of_connection(A,  np.ones((A.shape[0],1)))
    """
    # local imports for evolution_strength_of_connection
    from pyamg.util.utils import scale_rows, get_block_diag, scale_columns
    from pyamg.util.linalg import approximate_spectral_radius

    # ====================================================================
    # Check inputs
    if epsilon < 1.0:
        raise ValueError("expected epsilon > 1.0")
    if k <= 0:
        raise ValueError("number of time steps must be > 0")
    if proj_type not in ['l2', 'D_A']:
        raise ValueError("proj_type must be 'l2' or 'D_A'")
    if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
        raise TypeError("expected csr_matrix or bsr_matrix")

    # ====================================================================
    # Format A and B correctly.
    # B must be in mat format, this isn't a deep copy
    if B is None:
        Bmat = np.mat(np.ones((A.shape[0], 1), dtype=A.dtype))
    else:
        Bmat = np.mat(B)

    # Is matrix A CSR?
    if (not sparse.isspmatrix_csr(A)):
        numPDEs = A.blocksize[0]
        csrflag = False
    else:
        numPDEs = 1
        csrflag = True

    # Pre-process A.  We need A in CSR, to be devoid of explicit 0's, have
    # sorted indices and be scaled by D-inverse
    if weighting == 'block':
        Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
        Dinv = sparse.bsr_matrix(
            (Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0] + 1)),
            shape=A.shape)
        Dinv_A = (Dinv * A).tocsr()
        cost[0] += 1
    elif weighting == 'diagonal':
        D = A.diagonal()
        Dinv = get_diagonal(A, norm_eq=False, inv=True)
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)
        cost[0] += 1
    elif weighting == 'local':
        D = np.abs(A) * np.ones((A.shape[0], 1), dtype=A.dtype)
        Dinv = np.zeros_like(D)
        Dinv[D != 0] = 1.0 / np.abs(D[D != 0])
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)
        cost[0] += 1
    else:
        raise ValueError('Unrecognized weighting for Evolution measure')

    A = A.tocsr()
    A.eliminate_zeros()
    A.sort_indices()

    # Handle preliminaries for the algorithm
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    if weighting == 'diagonal' or weighting == 'block':
        # Get spectral radius of Dinv*A, scales the time step size for the ODE
        rho_DinvA = approximate_spectral_radius(Dinv_A)
        cost[0] += 15  # 15 lanczos iterations to approximate spectral radius
    else:
        # Using local weighting, no need for spectral radius
        rho_DinvA = 1.0

    # Calculate D_A for later use in the minimization problem
    if proj_type == "D_A":
        D = A.diagonal()
        D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr')
    else:
        D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)

    # Calculate (I - delta_t Dinv A)^k
    # We transpose the product, so that we can efficiently access
    # the columns in CSR format.  We want the columns (not rows) because
    # strength is based on the columns of (I - delta_t Dinv A)^k, i.e.,
    # relaxed delta functions

    # Calculate the number of time steps that can be done by squaring, and
    # the number of time steps that must be done incrementally
    nsquare = int(np.log2(k))
    ninc = k - 2**nsquare

    # Calculate one time step
    I = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
    Atilde = (I - (1.0 / rho_DinvA) * Dinv_A)
    Atilde = Atilde.T.tocsr()
    cost[0] += 1

    # Construct a sparsity mask for Atilde that will restrict Atilde^T to the
    # nonzero pattern of A, with the added constraint that row i of Atilde^T
    # retains only the nonzeros that are also in the same PDE as i.
    mask = A.copy()

    # Restrict to same PDE
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(np.arange(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # If the total number of time steps is a power of two, then there is
    # a very efficient computational short-cut.  Otherwise, we support
    # other numbers of time steps, through an inefficient algorithm.
    if ninc > 0:
        warn("The most efficient time stepping for the Evolution Strength\
             Method is done in powers of two.\nYou have chosen " + str(k) +
             " time steps.")

        JacobiStep = csr_matrix(Atilde, copy=True)
        # Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare
        for i in range(nsquare):
            cost[0] += mat_mat_complexity(Atilde, Atilde)
            Atilde = Atilde * Atilde

        for i in range(ninc):
            cost[0] += mat_mat_complexity(Atilde, JacobiStep)
            Atilde = Atilde * JacobiStep

        del JacobiStep

        # Apply mask to Atilde, zeros in mask have already been eliminated at
        # start of routine.
        mask.data[:] = 1.0
        Atilde = Atilde.multiply(mask)
        Atilde.eliminate_zeros()
        Atilde.sort_indices()
        cost[0] += Atilde.nnz / float(A.nnz)

    elif nsquare == 0:
        if numPDEs > 1:
            # Apply mask to Atilde, zeros in mask have already been eliminated
            # at start of routine.
            mask.data[:] = 1.0
            Atilde = Atilde.multiply(mask)
            Atilde.eliminate_zeros()
            Atilde.sort_indices()

    else:
        # Use computational short-cut for case (ninc == 0) and (nsquare > 0)
        # Calculate Atilde^k only at the sparsity pattern of mask.
        for i in range(nsquare - 1):
            cost[0] += mat_mat_complexity(Atilde, Atilde)
            Atilde = Atilde * Atilde

        # Call incomplete mat-mat mult
        AtildeCSC = Atilde.tocsc()
        AtildeCSC.sort_indices()
        mask.sort_indices()
        Atilde.sort_indices()
        amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices,
                                         Atilde.data, AtildeCSC.indptr,
                                         AtildeCSC.indices, AtildeCSC.data,
                                         mask.indptr, mask.indices, mask.data,
                                         dimen)
        cost[0] += mat_mat_complexity(Atilde, mask, incomplete=True) / float(
            A.nnz)

        del AtildeCSC, Atilde
        Atilde = mask
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    del Dinv, Dinv_A, mask

    # Calculate strength based on constrained min problem of
    # min( z - B*x ), such that
    # (B*x)|_i = z|_i, i.e. they are equal at point i
    # z = (I - (t/k) Dinv A)^k delta_i
    #
    # Strength is defined as the relative point-wise approx. error between
    # B*x and z.  We don't use the full z in this problem, only that part of
    # z that is in the sparsity pattern of A.
    #
    # Can use either the D-norm, and inner product, or l2-norm and inner-prod
    # to solve the constrained min problem.  Using D gives scale invariance.
    #
    # This is a quadratic minimization problem with a linear constraint, so
    # we can build a linear system and solve it to find the critical point,
    # i.e. minimum.
    #
    # We exploit a known shortcut for the case of NullDim = 1.  The shortcut is
    # mathematically equivalent to the longer constrained min. problem

    if NullDim == 1:
        # Use shortcut to solve constrained min problem if B is only a vector
        # Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) |
        # These ratios can be calculated by diagonal row and column scalings

        # Create necessary vectors for scaling Atilde
        #   Its not clear what to do where B == 0.  This is an
        #   an easy programming solution, that may make sense.
        Bmat_forscaling = np.ravel(Bmat)
        Bmat_forscaling[Bmat_forscaling == 0] = 1.0
        DAtilde = Atilde.diagonal()
        DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling
        cost[0] += Atilde.shape[0] / float(A.nnz)

        # Calculate best approximation, z_tilde, in span(B)
        #   Importantly, scale_rows and scale_columns leave zero entries
        #   in the matrix.  For previous implementations this was useful
        #   because we assume data and Atilde.data are the same length below
        data = Atilde.data.copy()
        Atilde.data[:] = 1.0
        Atilde = scale_rows(Atilde, DAtildeDivB)
        Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling))
        cost[0] += 2.0 * Atilde.nnz / float(A.nnz)

        # If angle in the complex plane between z and z_tilde is
        # greater than 90 degrees, then weak.  We can just look at the
        # dot product to determine if angle is greater than 90 degrees.
        angle = np.real(Atilde.data) * np.real(data) +\
            np.imag(Atilde.data) * np.imag(data)
        angle = angle < 0.0
        angle = np.array(angle, dtype=bool)
        cost[0] += Atilde.nnz / float(A.nnz)
        if Atilde.dtype is 'complex':
            cost[0] += Atilde.nnz / float(A.nnz)

        # Calculate Approximation ratio
        Atilde.data = Atilde.data / data
        cost[0] += Atilde.nnz / float(A.nnz)

        # If approximation ratio is less than tol, then weak connection
        weak_ratio = (np.abs(Atilde.data) < 1e-4)

        # Calculate Approximation error
        Atilde.data = abs(1.0 - Atilde.data)
        cost[0] += Atilde.nnz / float(A.nnz)

        # Set small ratios and large angles to weak
        Atilde.data[weak_ratio] = 0.0
        Atilde.data[angle] = 0.0

        # Set near perfect connections to 1e-4
        Atilde.eliminate_zeros()
        Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4

        del data, weak_ratio, angle

    else:
        # For use in computing local B_i^H*B, precompute the element-wise
        # multiply of each column of B with each other column.  We also scale
        # by 2.0 to account for BDB's eventual use in a constrained
        # minimization problem
        BDBCols = int(np.sum(np.arange(NullDim + 1)))
        BDB = np.zeros((dimen, BDBCols), dtype=A.dtype)
        counter = 0
        for i in range(NullDim):
            for j in range(i, NullDim):
                BDB[:, counter] = 2.0 *\
                    (np.conjugate(np.ravel(np.asarray(B[:, i]))) *
                        np.ravel(np.asarray(D_A * B[:, j])))
                counter = counter + 1
                cost[0] += B.shape[0] / float(A.nnz)

        # Choose tolerance for dropping "numerically zero" values later
        t = Atilde.dtype.char
        eps = np.finfo(np.float).eps
        feps = np.finfo(np.single).eps
        geps = np.finfo(np.longfloat).eps
        _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
        tol = {0: feps * 1e3, 1: eps * 1e6, 2: geps * 1e6}[_array_precision[t]]

        # Use constrained min problem to define strength.
        # This function is doing similar to NullDim=1 with more bad guys.
        # Complexity accounts for computing the block inverse, and
        #   hat{z_i} = B_i*x, hat{z_i} .* hat{z_i},
        #   hat{z_i} = hat{z_i} / z_i, and abs(1.0 - hat{z_i}).
        cost[0] += (Atilde.nnz * (3 + NullDim) +
                    (NullDim**3) * dimen) / float(A.nnz)
        amg_core.evolution_strength_helper(
            Atilde.data, Atilde.indptr, Atilde.indices, Atilde.shape[0],
            np.ravel(np.asarray(B)),
            np.ravel(np.asarray((D_A * np.conjugate(B)).T)),
            np.ravel(np.asarray(BDB)), BDBCols, NullDim, tol)

        Atilde.eliminate_zeros()

    # All of the strength values are real by this point, so ditch the complex
    # part
    Atilde.data = np.array(np.real(Atilde.data), dtype=float)

    # Apply drop tolerance
    if epsilon != np.inf:
        cost[0] += Atilde.nnz / float(A.nnz)
        amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr,
                                       Atilde.indices, Atilde.data)
        Atilde.eliminate_zeros()

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    I = sparse.eye(dimen, dimen, format="csr")
    I.data -= Atilde.diagonal()
    Atilde = Atilde + I
    cost[0] += Atilde.shape[0] / float(A.nnz)

    # If converted BSR to CSR, convert back and return amalgamated matrix,
    #   i.e. the sparsity structure of the blocks of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        n_blocks = Atilde.indices.shape[0]
        blocksize = Atilde.blocksize[0] * Atilde.blocksize[1]
        CSRdata = np.zeros((n_blocks, ))
        amg_core.min_blocks(n_blocks, blocksize,
                            np.ravel(np.asarray(Atilde.data)), CSRdata)
        # Atilde = sparse.csr_matrix((data, row, col), shape=(*,*))
        Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr),
                                   shape=(int(Atilde.shape[0] / numPDEs),
                                          int(Atilde.shape[1] / numPDEs)))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0 / Atilde.data
    cost[0] += Atilde.nnz / float(A.nnz)

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)
    cost[0] += Atilde.nnz / float(A.nnz)

    # Symmetrize
    if symmetrize_measure:
        Atilde = 0.5 * (Atilde + Atilde.T)
        cost[0] += Atilde.nnz / float(A.nnz)

    return Atilde
Esempio n. 57
0
def energy_based_strength_of_connection(A, theta=0.0, k=2):
    """
    Compute a strength of connection matrix using an energy-based measure.

    Parameters
    ----------
    A : {sparse-matrix}
        matrix from which to generate strength of connection information
    theta : {float}
        Threshold parameter in [0,1]
    k : {int}
        Number of relaxation steps used to generate strength information

    Returns
    -------
    S : {csr_matrix}
        Matrix graph defining strong connections.  The sparsity pattern
        of S matches that of A.  For BSR matrices, S is a reduced strength
        of connection matrix that describes connections between supernodes.

    Notes
    -----
    This method relaxes with weighted-Jacobi in order to approximate the
    matrix inverse.  A normalized change of energy is then used to define
    point-wise strength of connection values.  Specifically, let v be the
    approximation to the i-th column of the inverse, then

    (S_ij)^2 = <v_j, v_j>_A / <v, v>_A,

    where v_j = v, such that entry j in v has been zeroed out.  As is common,
    larger values imply a stronger connection.

    Current implementation is a very slow pure-python implementation for
    experimental purposes, only.

    References
    ----------
    .. [1] Brannick, Brezina, MacLachlan, Manteuffel, McCormick.
       "An Energy-Based AMG Coarsening Strategy",
       Numerical Linear Algebra with Applications,
       vol. 13, pp. 133-148, 2006.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import energy_based_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = energy_based_strength_of_connection(A, 0.0)
    """

    if (theta < 0):
        raise ValueError('expected a positive theta')
    if not sparse.isspmatrix(A):
        raise ValueError('expected sparse matrix')
    if (k < 0):
        raise ValueError('expected positive number of steps')
    if not isinstance(k, int):
        raise ValueError('expected integer')

    if sparse.isspmatrix_bsr(A):
        bsr_flag = True
        numPDEs = A.blocksize[0]
        if A.blocksize[0] != A.blocksize[1]:
            raise ValueError('expected square blocks in BSR matrix A')
    else:
        bsr_flag = False

    # Convert A to csc and Atilde to csr
    if sparse.isspmatrix_csr(A):
        Atilde = A.copy()
        A = A.tocsc()
    else:
        A = A.tocsc()
        Atilde = A.copy()
        Atilde = Atilde.tocsr()

    # Calculate the weighted-Jacobi parameter
    from pyamg.util.linalg import approximate_spectral_radius
    D = A.diagonal()
    Dinv = 1.0 / D
    Dinv[D == 0] = 0.0
    Dinv = sparse.csc_matrix((Dinv, (np.arange(A.shape[0]),
                             np.arange(A.shape[1]))), shape=A.shape)
    DinvA = Dinv*A
    omega = 1.0/approximate_spectral_radius(DinvA)
    del DinvA

    # Approximate A-inverse with k steps of w-Jacobi and a zero initial guess
    S = sparse.csc_matrix(A.shape, dtype=A.dtype)  # empty matrix
    I = sparse.eye(A.shape[0], A.shape[1], format='csc')
    for i in range(k+1):
        S = S + omega*(Dinv*(I - A * S))

    # Calculate the strength entries in S column-wise, but only strength
    # values at the sparsity pattern of A
    for i in range(Atilde.shape[0]):
        v = np.mat(S[:, i].todense())
        Av = np.mat(A * v)
        denom = np.sqrt(np.conjugate(v).T * Av)
        # replace entries in row i with strength values
        for j in range(Atilde.indptr[i], Atilde.indptr[i+1]):
            col = Atilde.indices[j]
            vj = v[col].copy()
            v[col] = 0.0
            #   =  (||v_j||_A - ||v||_A) / ||v||_A
            val = np.sqrt(np.conjugate(v).T * A * v)/denom - 1.0

            # Negative values generally imply a weak connection
            if val > -0.01:
                Atilde.data[j] = abs(val)
            else:
                Atilde.data[j] = 0.0

            v[col] = vj

    # Apply drop tolerance
    Atilde = classical_strength_of_connection(Atilde, theta=theta)
    Atilde.eliminate_zeros()

    # Put ones on the diagonal
    Atilde = Atilde + I.tocsr()
    Atilde.sort_indices()

    # Amalgamate Atilde for the BSR case, using ones for all strong connections
    if bsr_flag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))
        nblocks = Atilde.indices.shape[0]
        uone = np.ones((nblocks,))
        Atilde = sparse.csr_matrix((uone, Atilde.indices, Atilde.indptr),
                                   shape=(
                                       Atilde.shape[0] / numPDEs,
                                       Atilde.shape[1] / numPDEs))

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)

    return Atilde
Esempio n. 58
0
def smoothed_aggregation_solver(A,
                                B=None,
                                BH=None,
                                symmetry='hermitian',
                                strength='symmetric',
                                aggregate='standard',
                                smooth=('jacobi', {
                                    'omega': 4.0 / 3.0
                                }),
                                presmoother=('block_gauss_seidel', {
                                    'sweep': 'symmetric'
                                }),
                                postsmoother=('block_gauss_seidel', {
                                    'sweep': 'symmetric'
                                }),
                                improve_candidates=[('block_gauss_seidel', {
                                    'sweep': 'symmetric',
                                    'iterations': 4
                                }), None],
                                max_levels=10,
                                max_coarse=10,
                                diagonal_dominance=False,
                                keep=False,
                                **kwargs):
    """Create a multilevel solver using classical-style Smoothed Aggregation (SA).

    Parameters
    ----------
    A : csr_matrix, bsr_matrix
        Sparse NxN matrix in CSR or BSR format

    B : None, array_like
        Right near-nullspace candidates stored in the columns of an NxK array.
        The default value B=None is equivalent to B=ones((N,1))

    BH : None, array_like
        Left near-nullspace candidates stored in the columns of an NxK array.
        BH is only used if symmetry is 'nonsymmetric'.
        The default value B=None is equivalent to BH=B.copy()

    symmetry : string
        'symmetric' refers to both real and complex symmetric
        'hermitian' refers to both complex Hermitian and real Hermitian
        'nonsymmetric' i.e. nonsymmetric in a hermitian sense
        Note, in the strictly real case, symmetric and hermitian are the same.
        Note, this flag does not denote definiteness of the operator.

    strength : string or list
        Method used to determine the strength of connection between unknowns of
        the linear system.  Method-specific parameters may be passed in using a
        tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
        all nonzero entries of the matrix are considered strong.
        Choose from 'symmetric', 'classical', 'evolution', 'algebraic_distance',
        'affinity', ('predefined', {'C' : csr_matrix}), None

    aggregate : string or list
        Method used to aggregate nodes.
        Choose from 'standard', 'lloyd', 'naive',
        ('predefined', {'AggOp' : csr_matrix})

    smooth : list
        Method used to smooth the tentative prolongator.  Method-specific
        parameters may be passed in using a tuple, e.g.  smooth=
        ('jacobi',{'filter' : True }).
        Choose from 'jacobi', 'richardson', 'energy', None

    presmoother : tuple, string, list
        Defines the presmoother for the multilevel cycling.  The default block
        Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
        is CSR or is a BSR matrix with blocksize of 1.

    postsmoother : tuple, string, list
        Same as presmoother, except defines the postsmoother.

    improve_candidates : tuple, string, list
        The ith entry defines the method used to improve the candidates B on
        level i.  If the list is shorter than max_levels, then the last entry
        will define the method for all levels lower.  If tuple or string, then
        this single relaxation descriptor defines improve_candidates on all
        levels.
        The list elements are relaxation descriptors of the form used for
        presmoother and postsmoother.  A value of None implies no action on B.

    max_levels : integer
        Maximum number of levels to be used in the multilevel solver.

    max_coarse : integer
        Maximum number of variables permitted on the coarse grid.

    diagonal_dominance : bool, tuple
        If True (or the first tuple entry is True), then avoid coarsening
        diagonally dominant rows.  The second tuple entry requires a
        dictionary, where the key value 'theta' is used to tune the diagonal
        dominance threshold.

    keep : bool
        Flag to indicate keeping extra operators in the hierarchy for
        diagnostics.  For example, if True, then strength of connection (C),
        tentative prolongation (T), and aggregation (AggOp) are kept.

    Other Parameters
    ----------------
    cycle_type : ['V','W','F']
        Structrure of multigrid cycle

    coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
        Solver used at the coarsest level of the MG hierarchy.
        Optionally, may be a tuple (fn, args), where fn is a string such as
        ['splu', 'lu', ...] or a callable function, and args is a dictionary of
        arguments to be passed to fn.

    Returns
    -------
    ml : multilevel_solver
        Multigrid hierarchy of matrices and prolongation operators

    See Also
    --------
    multilevel_solver, classical.ruge_stuben_solver,
    aggregation.smoothed_aggregation_solver

    Notes
    -----
        - This method implements classical-style SA, not root-node style SA
          (see aggregation.rootnode_solver).

        - The additional parameters are passed through as arguments to
          multilevel_solver.  Refer to pyamg.multilevel_solver for additional
          documentation.

        - At each level, four steps are executed in order to define the coarser
          level operator.

          1. Matrix A is given and used to derive a strength matrix, C.

          2. Based on the strength matrix, indices are grouped or aggregated.

          3. The aggregates define coarse nodes and a tentative prolongation
             operator T is defined by injection

          4. The tentative prolongation operator is smoothed by a relaxation
             scheme to improve the quality and extent of interpolation from the
             aggregates to fine nodes.

        - The parameters smooth, strength, aggregate, presmoother, postsmoother
          can be varied on a per level basis.  For different methods on
          different levels, use a list as input so that the i-th entry defines
          the method at the i-th level.  If there are more levels in the
          hierarchy than list entries, the last entry will define the method
          for all levels lower.

          Examples are:
          smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
          presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
          aggregate=['standard', 'naive']
          strength=[('symmetric', {'theta':0.25}), ('symmetric', {'theta':0.08})]

        - Predefined strength of connection and aggregation schemes can be
          specified.  These options are best used together, but aggregation can
          be predefined while strength of connection is not.

          For predefined strength of connection, use a list consisting of
          tuples of the form ('predefined', {'C' : C0}), where C0 is a
          csr_matrix and each degree-of-freedom in C0 represents a supernode.
          For instance to predefine a three-level hierarchy, use
          [('predefined', {'C' : C0}), ('predefined', {'C' : C1}) ].

          Similarly for predefined aggregation, use a list of tuples.  For
          instance to predefine a three-level hierarchy, use [('predefined',
          {'AggOp' : Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the
          dimensions of A, Agg0 and Agg1 are compatible, i.e.  Agg0.shape[1] ==
          A.shape[0] and Agg1.shape[1] == Agg0.shape[0].  Each AggOp is a
          csr_matrix.

    Examples
    --------
    >>> from pyamg import smoothed_aggregation_solver
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse.linalg import cg
    >>> import numpy as np
    >>> A = poisson((100,100), format='csr')           # matrix
    >>> b = np.ones((A.shape[0]))                      # RHS
    >>> ml = smoothed_aggregation_solver(A)            # AMG solver
    >>> M = ml.aspreconditioner(cycle='V')             # preconditioner
    >>> x,info = cg(A, b, tol=1e-8, maxiter=30, M=M)   # solve with CG

    References
    ----------
    .. [1996VaMaBr] Vanek, P. and Mandel, J. and Brezina, M.,
       "Algebraic Multigrid by Smoothed Aggregation for
       Second and Fourth Order Elliptic Problems",
       Computing, vol. 56, no. 3, pp. 179--196, 1996.
       http://citeseer.ist.psu.edu/vanek96algebraic.html

    """
    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        try:
            A = csr_matrix(A)
            warn("Implicit conversion of A to CSR", SparseEfficiencyWarning)
        except BaseException:
            raise TypeError(
                'Argument A must have type csr_matrix or bsr_matrix, or be convertible to csr_matrix'
            )

    A = A.asfptype()

    if (symmetry != 'symmetric') and (symmetry != 'hermitian') and\
            (symmetry != 'nonsymmetric'):
        raise ValueError(
            'expected \'symmetric\', \'nonsymmetric\' or \'hermitian\' for the symmetry parameter '
        )
    A.symmetry = symmetry

    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    # Right near nullspace candidates use constant for each variable as default
    if B is None:
        B = np.kron(
            np.ones((int(A.shape[0] / blocksize(A)), 1), dtype=A.dtype),
            np.eye(blocksize(A), dtype=A.dtype))
    else:
        B = np.asarray(B, dtype=A.dtype)
        if len(B.shape) == 1:
            B = B.reshape(-1, 1)
        if B.shape[0] != A.shape[0]:
            raise ValueError(
                'The near null-space modes B have incorrect dimensions for matrix A'
            )
        if B.shape[1] < blocksize(A):
            warn(
                'Having less target vectors, B.shape[1], than blocksize of A can degrade convergence factors.'
            )

    # Left near nullspace candidates
    if A.symmetry == 'nonsymmetric':
        if BH is None:
            BH = B.copy()
        else:
            BH = np.asarray(BH, dtype=A.dtype)
            if len(BH.shape) == 1:
                BH = BH.reshape(-1, 1)
            if BH.shape[1] != B.shape[1]:
                raise ValueError(
                    'The number of left and right near null-space modes B and BH, must be equal'
                )
            if BH.shape[0] != A.shape[0]:
                raise ValueError(
                    'The near null-space modes BH have incorrect dimensions for matrix A'
                )

    # Levelize the user parameters, so that they become lists describing the
    # desired user option on each level.
    max_levels, max_coarse, strength =\
        levelize_strength_or_aggregation(strength, max_levels, max_coarse)
    max_levels, max_coarse, aggregate =\
        levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
    improve_candidates =\
        levelize_smooth_or_improve_candidates(improve_candidates, max_levels)
    smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)

    # Construct multilevel structure
    levels = []
    levels.append(multilevel_solver.level())
    levels[-1].A = A  # matrix

    # Append near nullspace candidates
    levels[-1].B = B  # right candidates
    if A.symmetry == 'nonsymmetric':
        levels[-1].BH = BH  # left candidates

    while len(levels) < max_levels and\
            int(levels[-1].A.shape[0]/blocksize(levels[-1].A)) > max_coarse:
        extend_hierarchy(levels, strength, aggregate, smooth,
                         improve_candidates, diagonal_dominance, keep)

    ml = multilevel_solver(levels, **kwargs)
    change_smoothers(ml, presmoother, postsmoother)
    return ml
Esempio n. 59
0
def evolution_strength_of_connection(A, B='ones', epsilon=4.0, k=2,
                                     proj_type="l2", block_flag=False,
                                     symmetrize_measure=True):
    """
    Construct strength of connection matrix using an Evolution-based measure

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    B : {string, array}
        If B='ones', then the near nullspace vector used is all ones.  If B is
        an (NxK) array, then B is taken to be the near nullspace vectors.
    epsilon : scalar
        Drop tolerance
    k : integer
        ODE num time steps, step size is assumed to be 1/rho(DinvA)
    proj_type : {'l2','D_A'}
        Define norm for constrained min prob, i.e. define projection
    block_flag : {boolean}
        If True, use a block D inverse as preconditioner for A during
        weighted-Jacobi

    Returns
    -------
    Atilde : {csr_matrix}
        Sparse matrix of strength values

    References
    ----------
    .. [1] Olson, L. N., Schroder, J., Tuminaro, R. S.,
       "A New Perspective on Strength Measures in Algebraic Multigrid",
       submitted, June, 2008.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import evolution_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = evolution_strength_of_connection(A,  np.ones((A.shape[0],1)))
    """
    # local imports for evolution_strength_of_connection
    from pyamg.util.utils import scale_rows, get_block_diag, scale_columns
    from pyamg.util.linalg import approximate_spectral_radius

    # ====================================================================
    # Check inputs
    if epsilon < 1.0:
        raise ValueError("expected epsilon > 1.0")
    if k <= 0:
        raise ValueError("number of time steps must be > 0")
    if proj_type not in ['l2', 'D_A']:
        raise ValueError("proj_type must be 'l2' or 'D_A'")
    if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
        raise TypeError("expected csr_matrix or bsr_matrix")

    # ====================================================================
    # Format A and B correctly.
    # B must be in mat format, this isn't a deep copy
    if B == 'ones':
        Bmat = np.mat(np.ones((A.shape[0], 1), dtype=A.dtype))
    else:
        Bmat = np.mat(B)

    # Pre-process A.  We need A in CSR, to be devoid of explicit 0's and have
    # sorted indices
    if (not sparse.isspmatrix_csr(A)):
        csrflag = False
        numPDEs = A.blocksize[0]
        D = A.diagonal()
        # Calculate Dinv*A
        if block_flag:
            Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
            Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]),
                                     np.arange(Dinv.shape[0] + 1)),
                                     shape=A.shape)
            Dinv_A = (Dinv * A).tocsr()
        else:
            Dinv = np.zeros_like(D)
            mask = (D != 0.0)
            Dinv[mask] = 1.0 / D[mask]
            Dinv[D == 0] = 1.0
            Dinv_A = scale_rows(A, Dinv, copy=True)
        A = A.tocsr()
    else:
        csrflag = True
        numPDEs = 1
        D = A.diagonal()
        Dinv = np.zeros_like(D)
        mask = (D != 0.0)
        Dinv[mask] = 1.0 / D[mask]
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)

    A.eliminate_zeros()
    A.sort_indices()

    # Handle preliminaries for the algorithm
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    # Get spectral radius of Dinv*A, this will be used to scale the time step
    # size for the ODE
    rho_DinvA = approximate_spectral_radius(Dinv_A)

    # Calculate D_A for later use in the minimization problem
    if proj_type == "D_A":
        D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr')
    else:
        D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)

    # Calculate (I - delta_t Dinv A)^k
    #      In order to later access columns, we calculate the transpose in
    #      CSR format so that columns will be accessed efficiently
    # Calculate the number of time steps that can be done by squaring, and
    # the number of time steps that must be done incrementally
    nsquare = int(np.log2(k))
    ninc = k - 2**nsquare

    # Calculate one time step
    I = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
    Atilde = (I - (1.0/rho_DinvA)*Dinv_A)
    Atilde = Atilde.T.tocsr()

    # Construct a sparsity mask for Atilde that will restrict Atilde^T to the
    # nonzero pattern of A, with the added constraint that row i of Atilde^T
    # retains only the nonzeros that are also in the same PDE as i.
    mask = A.copy()

    # Restrict to same PDE
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(range(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # If the total number of time steps is a power of two, then there is
    # a very efficient computational short-cut.  Otherwise, we support
    # other numbers of time steps, through an inefficient algorithm.
    if ninc > 0:
        warn("The most efficient time stepping for the Evolution Strength\
             Method is done in powers of two.\nYou have chosen " + str(k) +
             " time steps.")

        # Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare
        for i in range(nsquare):
            Atilde = Atilde*Atilde

        JacobiStep = (I - (1.0/rho_DinvA)*Dinv_A).T.tocsr()
        for i in range(ninc):
            Atilde = Atilde*JacobiStep
        del JacobiStep

        # Apply mask to Atilde, zeros in mask have already been eliminated at
        # start of routine.
        mask.data[:] = 1.0
        Atilde = Atilde.multiply(mask)
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    elif nsquare == 0:
        if numPDEs > 1:
            # Apply mask to Atilde, zeros in mask have already been eliminated
            # at start of routine.
            mask.data[:] = 1.0
            Atilde = Atilde.multiply(mask)
            Atilde.eliminate_zeros()
            Atilde.sort_indices()

    else:
        # Use computational short-cut for case (ninc == 0) and (nsquare > 0)
        # Calculate Atilde^k only at the sparsity pattern of mask.
        for i in range(nsquare-1):
            Atilde = Atilde*Atilde

        # Call incomplete mat-mat mult
        AtildeCSC = Atilde.tocsc()
        AtildeCSC.sort_indices()
        mask.sort_indices()
        Atilde.sort_indices()
        amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices,
                                         Atilde.data, AtildeCSC.indptr,
                                         AtildeCSC.indices, AtildeCSC.data,
                                         mask.indptr, mask.indices, mask.data,
                                         dimen)

        del AtildeCSC, Atilde
        Atilde = mask
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    del Dinv, Dinv_A, mask

    # Calculate strength based on constrained min problem of
    # min( z - B*x ), such that
    # (B*x)|_i = z|_i, i.e. they are equal at point i
    # z = (I - (t/k) Dinv A)^k delta_i
    #
    # Strength is defined as the relative point-wise approx. error between
    # B*x and z.  We don't use the full z in this problem, only that part of
    # z that is in the sparsity pattern of A.
    #
    # Can use either the D-norm, and inner product, or l2-norm and inner-prod
    # to solve the constrained min problem.  Using D gives scale invariance.
    #
    # This is a quadratic minimization problem with a linear constraint, so
    # we can build a linear system and solve it to find the critical point,
    # i.e. minimum.
    #
    # We exploit a known shortcut for the case of NullDim = 1.  The shortcut is
    # mathematically equivalent to the longer constrained min. problem

    if NullDim == 1:
        # Use shortcut to solve constrained min problem if B is only a vector
        # Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) |
        # These ratios can be calculated by diagonal row and column scalings

        # Create necessary vectors for scaling Atilde
        #   Its not clear what to do where B == 0.  This is an
        #   an easy programming solution, that may make sense.
        Bmat_forscaling = np.ravel(Bmat)
        Bmat_forscaling[Bmat_forscaling == 0] = 1.0
        DAtilde = Atilde.diagonal()
        DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling

        # Calculate best approximation, z_tilde, in span(B)
        #   Importantly, scale_rows and scale_columns leave zero entries
        #   in the matrix.  For previous implementations this was useful
        #   because we assume data and Atilde.data are the same length below
        data = Atilde.data.copy()
        Atilde.data[:] = 1.0
        Atilde = scale_rows(Atilde, DAtildeDivB)
        Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling))

        # If angle in the complex plane between z and z_tilde is
        # greater than 90 degrees, then weak.  We can just look at the
        # dot product to determine if angle is greater than 90 degrees.
        angle = np.real(Atilde.data) * np.real(data) +\
            np.imag(Atilde.data) * np.imag(data)
        angle = angle < 0.0
        angle = np.array(angle, dtype=bool)

        # Calculate Approximation ratio
        Atilde.data = Atilde.data/data

        # If approximation ratio is less than tol, then weak connection
        weak_ratio = (np.abs(Atilde.data) < 1e-4)

        # Calculate Approximation error
        Atilde.data = abs(1.0 - Atilde.data)

        # Set small ratios and large angles to weak
        Atilde.data[weak_ratio] = 0.0
        Atilde.data[angle] = 0.0

        # Set near perfect connections to 1e-4
        Atilde.eliminate_zeros()
        Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4

        del data, weak_ratio, angle

    else:
        # For use in computing local B_i^H*B, precompute the element-wise
        # multiply of each column of B with each other column.  We also scale
        # by 2.0 to account for BDB's eventual use in a constrained
        # minimization problem
        BDBCols = int(np.sum(range(NullDim + 1)))
        BDB = np.zeros((dimen, BDBCols), dtype=A.dtype)
        counter = 0
        for i in range(NullDim):
            for j in range(i, NullDim):
                BDB[:, counter] = 2.0 *\
                    (np.conjugate(np.ravel(np.asarray(B[:, i]))) *
                        np.ravel(np.asarray(D_A * B[:, j])))
                counter = counter + 1

        # Choose tolerance for dropping "numerically zero" values later
        t = Atilde.dtype.char
        eps = np.finfo(np.float).eps
        feps = np.finfo(np.single).eps
        geps = np.finfo(np.longfloat).eps
        _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
        tol = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]

        # Use constrained min problem to define strength
        amg_core.evolution_strength_helper(Atilde.data,
                                           Atilde.indptr,
                                           Atilde.indices,
                                           Atilde.shape[0],
                                           np.ravel(np.asarray(B)),
                                           np.ravel(np.asarray(
                                               (D_A * np.conjugate(B)).T)),
                                           np.ravel(np.asarray(BDB)),
                                           BDBCols, NullDim, tol)

        Atilde.eliminate_zeros()

    # All of the strength values are real by this point, so ditch the complex
    # part
    Atilde.data = np.array(np.real(Atilde.data), dtype=float)

    # Apply drop tolerance
    if symmetrize_measure:
        Atilde = 0.5*(Atilde + Atilde.T)

    if epsilon != np.inf:
        amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr,
                                       Atilde.indices, Atilde.data)
        Atilde.eliminate_zeros()

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    I = sparse.eye(dimen, dimen, format="csr")
    I.data -= Atilde.diagonal()
    Atilde = Atilde + I

    # If converted BSR to CSR, convert back and return amalgamated matrix,
    #   i.e. the sparsity structure of the blocks of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        n_blocks = Atilde.indices.shape[0]
        blocksize = Atilde.blocksize[0]*Atilde.blocksize[1]
        CSRdata = np.zeros((n_blocks,))
        amg_core.min_blocks(n_blocks, blocksize,
                            np.ravel(np.asarray(Atilde.data)), CSRdata)
        # Atilde = sparse.csr_matrix((data, row, col), shape=(*,*))
        Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr),
                                   shape=(Atilde.shape[0] / numPDEs,
                                          Atilde.shape[1] / numPDEs))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0/Atilde.data

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)

    return Atilde