Beispiel #1
0
def rho_D_inv_A(A):
    """Return the (approx.) spectral radius of D^-1 * A.

    Parameters
    ----------
    A : sparse-matrix

    Returns
    -------
    approximate spectral radius of diag(A)^{-1} A

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.relaxation.smoothing import rho_D_inv_A
    >>> from scipy.sparse import csr_matrix
    >>> import numpy as np
    >>> A = csr_matrix(np.array([[1.0,0,0],[0,2.0,0],[0,0,3.0]]))
    >>> print rho_D_inv_A(A)
    1.0

    """
    if not hasattr(A, 'rho_D_inv'):
        D_inv = get_diagonal(A, inv=True)
        D_inv_A = scale_rows(A, D_inv, copy=True)
        A.rho_D_inv = approximate_spectral_radius(D_inv_A)

    return A.rho_D_inv
Beispiel #2
0
def setup_richardson(lvl, iterations=1, omega=1.0):
    omega = omega/approximate_spectral_radius(lvl.A)

    def smoother(A, x, b):
        relaxation.polynomial(A, x, b, coefficients=[omega],
                              iterations=iterations)
    return smoother
Beispiel #3
0
def rho_D_inv_A(A):
    """
    Return the (approx.) spectral radius of D^-1 * A

    Parameters
    ----------
    A : {sparse-matrix}

    Returns
    -------
    approximate spectral radius of diag(A)^{-1} A

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.relaxation.smoothing import rho_D_inv_A
    >>> from scipy.sparse import csr_matrix
    >>> import numpy as np
    >>> A = csr_matrix(np.array([[1.0,0,0],[0,2.0,0],[0,0,3.0]]))
    >>> print rho_D_inv_A(A)
    1.0
    """

    if not hasattr(A, 'rho_D_inv'):
        D_inv = get_diagonal(A, inv=True)
        D_inv_A = scale_rows(A, D_inv, copy=True)
        A.rho_D_inv = approximate_spectral_radius(D_inv_A)

    return A.rho_D_inv
Beispiel #4
0
def setup_richardson(lvl, iterations=DEFAULT_NITER, omega=1.0):
    omega = omega/approximate_spectral_radius(lvl.A)

    def smoother(A, x, b):
        relaxation.polynomial(A, x, b, coefficients=[omega],
                              iterations=iterations)
    return smoother
Beispiel #5
0
def approx_spectral_radius(M, pyamg=False, symmetric=False, tol=1e-04):
    """pyamg=False ... DEPRECATED
    Wrapper around existing methods to calculate spectral radius.
    1. Original method: function 'pyamg.util.linalg.approximate_spectral_radius'.
    Behaved strange at times, and packages needed time to import and returned errors.
    But kept as default since the other method from scipy sometimes gives wrong results!
    2. 'scipy.sparse.linalg.eigs' which seemed to work faster and apparently more reliably than the old method.
    However, it sometimes does not return the correct value!
    This happens when echo=True and the biggest value is negative. Then returns the next smaller positive.
    For example: returns 0.908 for [ 0.9089904+0.j -1.0001067+0.j], or 0.933 for [ 0.93376532+0.j -1.03019369+0.j]

    http://scicomp.stackexchange.com/questions/7369/what-is-the-fastest-way-to-compute-all-eigenvalues-of-a-very-big-and-sparse-adja
    http://www.netlib.org/utk/people/JackDongarra/etemplates/node138.html

    Both methods require matrix to have float entries (asfptype)
    Testing: scipy is faster up to at least graphs with 600k edges
    10k nodes, 100k edges: pyamp 0.4 sec, scipy: 0.04
    60k nodes, 600k edges: pyam 2 sec, scipy: 1 sec

    Allows both sparse matrices and numpy arrays: For both, transforms int into float structure.
    However, numpy astype makes a copy (optional attribute copy=False does not work for scipy.csr_matrix)

    'eigsh' is not consistently faster than 'eigs' for symmetric M
    """
    pyamg=False
    if pyamg:
        return approximate_spectral_radius(M.astype('float'), tol=tol, maxiter=20, restart=10)
    else:
        return np.absolute(eigs(M.astype('float'), k=1, return_eigenvectors=False, which='LM', tol=1e-04)[0])   # which='LM': largest magnitude; eigs / eigsh
Beispiel #6
0
def setup_chebyshev(lvl, lower_bound=1.0/30.0, upper_bound=1.1, degree=3, iterations=1):
    rho = approximate_spectral_radius(lvl.A)
    a = rho * lower_bound
    b = rho * upper_bound
    coefficients = -chebyshev_polynomial_coefficients(a, b, degree)[:-1] # drop the constant coefficient
    def smoother(A,x,b):
        relaxation.polynomial(A, x, b, coefficients=coefficients, iterations=iterations)
    return smoother
Beispiel #7
0
 def get_aggregate_weights(AggOp, A, z, nPDEs, ndof):
     """
     Calculate local aggregate quantities
     Return a vector of length num_aggregates where entry i is
     (card(agg_i)/A.shape[0]) ( <Az, z>/rho(A) )
     """
     rho = approximate_spectral_radius(A)
     zAz = np.dot(z.reshape(1, -1), A * z.reshape(-1, 1))
     card = nPDEs * (AggOp.indptr[1:] - AggOp.indptr[:-1])
     weights = (np.ravel(card) * zAz) / (A.shape[0] * rho)
     return weights.reshape(-1, 1)
Beispiel #8
0
 def get_aggregate_weights(AggOp, A, z, nPDEs, ndof):
     """
     Calculate local aggregate quantities
     Return a vector of length num_aggregates where entry i is
     (card(agg_i)/A.shape[0]) ( <Az, z>/rho(A) )
     """
     rho = approximate_spectral_radius(A)
     zAz = numpy.dot(z.reshape(1, -1), A*z.reshape(-1, 1))
     card = nPDEs*(AggOp.indptr[1:]-AggOp.indptr[:-1])
     weights = (numpy.ravel(card)*zAz)/(A.shape[0]*rho)
     return weights.reshape(-1, 1)
Beispiel #9
0
def setup_chebyshev(lvl, lower_bound=1.0/30.0, upper_bound=1.1, degree=3,
                    iterations=1):
    rho = approximate_spectral_radius(lvl.A)
    a = rho * lower_bound
    b = rho * upper_bound
    # drop the constant coefficient
    coefficients = -chebyshev_polynomial_coefficients(a, b, degree)[:-1]

    def smoother(A, x, b):
        relaxation.polynomial(A, x, b, coefficients=coefficients,
                              iterations=iterations)
    return smoother
Beispiel #10
0
def eps_convergence_linbp(Hc, W, echo=False, rho_W=None):
    """Calculates eps_convergence with which to multiply Hc, for LinBP (with or w/o echo) to be at convergence boundary.
    Returns 0 if the values HC are too small (std < SPECTRAL_TOLERANCE)
    Assumes symmetric W and symmetric and doubly stochastic Hc

    Uses: degree_matrix()

    Parameters
    ----------
    Hc : np.array
        Centered coupling matrix (symmetric, doubly stochastic)
    W : sparse matrix
        Sparse edge matrix (symmetric)
    echo : boolean
        True to include the echo cancellation term
    rho_W : float
        the spectral radius of W as optional input to speed up the calculations
    """
    if np.std(Hc) < SPECTRAL_TOLERANCE:
        return 0
    else:
        if rho_W is None:
            rho_W = approximate_spectral_radius(csr_matrix(W, dtype="f"))  # needs to transform from int
        rho_H = approximate_spectral_radius(np.array(Hc, dtype="f"))  # same here
        eps = 1.0 / rho_W / rho_H

        # if echo is used, then the above eps value is used as starting point
        if echo:
            Hc2 = Hc.dot(Hc)
            D = degree_matrix(W)

            # function for which we need to determine the root: spectral radius minus 1
            def radius(eps):
                return approximate_spectral_radius(kron(Hc, W).dot(eps) - kron(Hc2, D).dot(eps ** 2)) - 1

            # http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.newton.html#scipy.optimize.newton
            eps2 = newton(radius, eps, tol=1e-04, maxiter=100)
            eps = eps2

        return eps
Beispiel #11
0
def rho_block_D_inv_A(A, Dinv):
    """
    Return the (approx.) spectral radius of block D^-1 * A

    Parameters
    ----------
    A : {sparse-matrix}
        size NxN
    Dinv : {array}
        Inverse of diagonal blocks of A
        size (N/blocksize, blocksize, blocksize)

    Returns
    -------
    approximate spectral radius of (Dinv A)

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.relaxation.smoothing import rho_block_D_inv_A
    >>> from pyamg.util.utils import get_block_diag
    >>> A = poisson((10,10), format='csr')
    >>> Dinv = get_block_diag(A, blocksize=4, inv_flag=True)

    """

    if not hasattr(A, 'rho_block_D_inv'):
        from scipy.sparse.linalg import LinearOperator

        blocksize = Dinv.shape[1]
        if Dinv.shape[1] != Dinv.shape[2]:
            raise ValueError('Dinv has incorrect dimensions')
        elif Dinv.shape[0] != int(A.shape[0] / blocksize):
            raise ValueError('Dinv and A have incompatible dimensions')

        Dinv = sp.sparse.bsr_matrix(
            (Dinv, sp.arange(Dinv.shape[0]), sp.arange(Dinv.shape[0] + 1)),
            shape=A.shape)

        # Don't explicitly form Dinv*A
        def matvec(x):
            return Dinv * (A * x)

        D_inv_A = LinearOperator(A.shape, matvec, dtype=A.dtype)

        A.rho_block_D_inv = approximate_spectral_radius(D_inv_A)

    return A.rho_block_D_inv
Beispiel #12
0
def rho_block_D_inv_A(A, Dinv):
    """
    Return the (approx.) spectral radius of block D^-1 * A

    Parameters
    ----------
    A : {sparse-matrix}
        size NxN
    Dinv : {array}
        Inverse of diagonal blocks of A
        size (N/blocksize, blocksize, blocksize)

    Returns
    -------
    approximate spectral radius of (Dinv A)

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.relaxation.smoothing import rho_block_D_inv_A
    >>> from pyamg.util.utils import get_block_diag
    >>> A = poisson((10,10), format='csr')
    >>> Dinv = get_block_diag(A, blocksize=4, inv_flag=True)

    """

    if not hasattr(A, 'rho_block_D_inv'):
        from scipy.sparse.linalg import LinearOperator

        blocksize = Dinv.shape[1]
        if Dinv.shape[1] != Dinv.shape[2]:
            raise ValueError('Dinv has incorrect dimensions')
        elif Dinv.shape[0] != int(A.shape[0]/blocksize):
            raise ValueError('Dinv and A have incompatible dimensions')

        Dinv = sp.sparse.bsr_matrix((Dinv,
                                    sp.arange(Dinv.shape[0]),
                                    sp.arange(Dinv.shape[0]+1)),
                                    shape=A.shape)

        # Don't explicitly form Dinv*A
        def matvec(x):
            return Dinv*(A*x)
        D_inv_A = LinearOperator(A.shape, matvec, dtype=A.dtype)

        A.rho_block_D_inv = approximate_spectral_radius(D_inv_A)

    return A.rho_block_D_inv
Beispiel #13
0
 def relax(A, x):
     fn, kwargs = unpack_arg(prepostsmoother)
     if fn == 'gauss_seidel':
         gauss_seidel(A, x, np.zeros_like(x),
                      iterations=candidate_iters, sweep='symmetric')
     elif fn == 'gauss_seidel_nr':
         gauss_seidel_nr(A, x, np.zeros_like(x),
                         iterations=candidate_iters, sweep='symmetric')
     elif fn == 'gauss_seidel_ne':
         gauss_seidel_ne(A, x, np.zeros_like(x),
                         iterations=candidate_iters, sweep='symmetric')
     elif fn == 'jacobi':
         jacobi(A, x, np.zeros_like(x), iterations=1,
                omega=1.0 / rho_D_inv_A(A))
     elif fn == 'richardson':
         polynomial(A, x, np.zeros_like(x), iterations=1,
                    coefficients=[1.0/approximate_spectral_radius(A)])
     elif fn == 'gmres':
         x[:] = (gmres(A, np.zeros_like(x), x0=x,
                       maxiter=candidate_iters)[0]).reshape(x.shape)
     else:
         raise TypeError('Unrecognized smoother')
Beispiel #14
0
def jacobi_prolongation_smoother(S, T, C, B, omega=4.0/3.0, degree=1, filter=False, weighting='diagonal'):
    """Jacobi prolongation smoother
   
    Parameters
    ----------
    S : {csr_matrix, bsr_matrix}
        Sparse NxN matrix used for smoothing.  Typically, A.
    T : {csr_matrix, bsr_matrix}
        Tentative prolongator
    C : {csr_matrix, bsr_matrix}
        Strength-of-connection matrix
    B : {array}
        Near nullspace modes for the coarse grid such that T*B 
        exactly reproduces the fine grid near nullspace modes
    omega : {scalar}
        Damping parameter
    filter : {boolean}
        If true, filter S before smoothing T.  This option can greatly control
        complexity.
    weighting : {string}
        'block', 'diagonal' or 'local' weighting for constructing the Jacobi D
        'local': Uses a local row-wise weight based on the Gershgorin estimate.
          Avoids any potential under-damping due to inaccurate spectral radius
          estimates.
        'block': If A is a BSR matrix, use a block diagonal inverse of A  
        'diagonal': Classic Jacobi D = diagonal(A)

    Returns
    -------
    P : {csr_matrix, bsr_matrix}
        Smoothed (final) prolongator defined by P = (I - omega/rho(K) K) * T
        where K = diag(S)^-1 * S and rho(K) is an approximation to the 
        spectral radius of K.

    Notes
    -----
    If weighting is not 'local', then results using Jacobi prolongation
    smoother are not precisely reproducible due to a random initial guess used
    for the spectral radius approximation.  For precise reproducibility, 
    set numpy.random.seed(..) to the same value before each test. 
    
    Examples
    --------
    >>> from pyamg.aggregation import jacobi_prolongation_smoother
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse import coo_matrix
    >>> import numpy
    >>> data = numpy.ones((6,))
    >>> row = numpy.arange(0,6)
    >>> col = numpy.kron([0,1],numpy.ones((3,)))
    >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
    >>> T.todense()
    matrix([[ 1.,  0.],
            [ 1.,  0.],
            [ 1.,  0.],
            [ 0.,  1.],
            [ 0.,  1.],
            [ 0.,  1.]])
    >>> A = poisson((6,),format='csr')
    >>> P = jacobi_prolongation_smoother(A,T,A,numpy.ones((2,1)))
    >>> P.todense()
    matrix([[ 0.64930164,  0.        ],
            [ 1.        ,  0.        ],
            [ 0.64930164,  0.35069836],
            [ 0.35069836,  0.64930164],
            [ 0.        ,  1.        ],
            [ 0.        ,  0.64930164]])

    """

    # preprocess weighting
    if weighting == 'block':
        if isspmatrix_csr(S):
            weighting = 'diagonal'
        elif isspmatrix_bsr(S):
            if S.blocksize[0] == 1:
                weighting = 'diagonal'
    
    if filter:
        ##
        # Implement filtered prolongation smoothing for the general case by
        # utilizing satisfy constraints

        if isspmatrix_bsr(S):
            numPDEs = S.blocksize[0]
        else:
            numPDEs = 1

        # Create a filtered S with entries dropped that aren't in C
        C = UnAmal(C, numPDEs, numPDEs)
        S = S.multiply(C)
        S.eliminate_zeros()

    if weighting == 'diagonal':
        # Use diagonal of S
        D_inv = get_diagonal(S, inv=True)
        D_inv_S = scale_rows(S, D_inv, copy=True)
        D_inv_S = (omega/approximate_spectral_radius(D_inv_S))*D_inv_S
    elif weighting == 'block':
        # Use block diagonal of S
        D_inv = get_block_diag(S, blocksize=S.blocksize[0], inv_flag=True)
        D_inv = bsr_matrix( (D_inv, numpy.arange(D_inv.shape[0]), \
                         numpy.arange(D_inv.shape[0]+1)), shape = S.shape)
        D_inv_S = D_inv*S
        D_inv_S = (omega/approximate_spectral_radius(D_inv_S))*D_inv_S
    elif weighting == 'local':
        # Use the Gershgorin estimate as each row's weight, instead of a global
        # spectral radius estimate
        D = numpy.abs(S)*numpy.ones((S.shape[0],1), dtype=S.dtype)
        D_inv = numpy.zeros_like(D)
        D_inv[D != 0] = 1.0 / numpy.abs(D[D != 0])

        D_inv_S = scale_rows(S, D_inv, copy=True)
        D_inv_S = omega*D_inv_S
    else:
        raise ValueError('Incorrect weighting option')

    
    if filter: 
        ##
        # Carry out Jacobi, but after calculating the prolongator update, U,
        # apply satisfy constraints so that U*B = 0
        P = T
        for i in range(degree):
            U =  (D_inv_S*P).tobsr(blocksize=P.blocksize)
            
            ##
            # Enforce U*B = 0 
            # (1) Construct array of inv(Bi'Bi), where Bi is B restricted to row
            # i's sparsity pattern in Sparsity Pattern. This array is used
            # multiple times in Satisfy_Constraints(...).
            BtBinv = compute_BtBinv(B, U)
            # (2) Apply satisfy constraints
            Satisfy_Constraints(U, B, BtBinv)
            
            ##
            # Update P
            P = P - U

    else:
        ##
        # Carry out Jacobi as normal
        P = T
        for i in range(degree):
            P = P - (D_inv_S*P)

    return P
Beispiel #15
0
def richardson_prolongation_smoother(S, T, omega=4.0/3.0, degree=1):
    """Richardson prolongation smoother
   
    Parameters
    ----------
    S : {csr_matrix, bsr_matrix}
        Sparse NxN matrix used for smoothing.  Typically, A or the
        "filtered matrix" obtained from A by lumping weak connections
        onto the diagonal of A.
    T : {csr_matrix, bsr_matrix}
        Tentative prolongator
    omega : {scalar}
        Damping parameter

    Returns
    -------
    P : {csr_matrix, bsr_matrix}
        Smoothed (final) prolongator defined by P = (I - omega/rho(S) S) * T
        where rho(S) is an approximation to the spectral radius of S.
    
    Notes
    -----
    Results using Richardson prolongation smoother are not precisely
    reproducible due to a random initial guess used for the spectral radius
    approximation.  For precise reproducibility, set numpy.random.seed(..) to
    the same value before each test. 
    

    Examples
    --------
    >>> from pyamg.aggregation import richardson_prolongation_smoother
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse import coo_matrix
    >>> import numpy
    >>> data = numpy.ones((6,))
    >>> row = numpy.arange(0,6)
    >>> col = numpy.kron([0,1],numpy.ones((3,)))
    >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
    >>> T.todense()
    matrix([[ 1.,  0.],
            [ 1.,  0.],
            [ 1.,  0.],
            [ 0.,  1.],
            [ 0.,  1.],
            [ 0.,  1.]])
    >>> A = poisson((6,),format='csr')
    >>> P = richardson_prolongation_smoother(A,T)
    >>> P.todense()
    matrix([[ 0.64930164,  0.        ],
            [ 1.        ,  0.        ],
            [ 0.64930164,  0.35069836],
            [ 0.35069836,  0.64930164],
            [ 0.        ,  1.        ],
            [ 0.        ,  0.64930164]])

    """

    weight = omega/approximate_spectral_radius(S)

    P = T
    for i in range(degree):
        P = P - weight*(S*P)

    return P
Beispiel #16
0
def jacobi_prolongation_smoother(S,
                                 T,
                                 C,
                                 B,
                                 omega=4.0 / 3.0,
                                 degree=1,
                                 filter=False,
                                 weighting='diagonal',
                                 cost=[0.0]):
    """Jacobi prolongation smoother

    Parameters
    ----------
    S : {csr_matrix, bsr_matrix}
        Sparse NxN matrix used for smoothing.  Typically, A.
    T : {csr_matrix, bsr_matrix}
        Tentative prolongator
    C : {csr_matrix, bsr_matrix}
        Strength-of-connection matrix
    B : {array}
        Near nullspace modes for the coarse grid such that T*B
        exactly reproduces the fine grid near nullspace modes
    omega : {scalar}
        Damping parameter
    filter : {boolean}
        If true, filter S before smoothing T.  This option can greatly control
        complexity.
    weighting : {string}
        'block', 'diagonal' or 'local' weighting for constructing the Jacobi D
        'local': Uses a local row-wise weight based on the Gershgorin estimate.
          Avoids any potential under-damping due to inaccurate spectral radius
          estimates.
        'block': If A is a BSR matrix, use a block diagonal inverse of A
        'diagonal': Classic Jacobi D = diagonal(A)

    Returns
    -------
    P : {csr_matrix, bsr_matrix}
        Smoothed (final) prolongator defined by P = (I - omega/rho(K) K) * T
        where K = diag(S)^-1 * S and rho(K) is an approximation to the
        spectral radius of K.

    Notes
    -----
    If weighting is not 'local', then results using Jacobi prolongation
    smoother are not precisely reproducible due to a random initial guess used
    for the spectral radius approximation.  For precise reproducibility,
    set numpy.random.seed(..) to the same value before each test.

    Examples
    --------
    >>> from pyamg.aggregation import jacobi_prolongation_smoother
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse import coo_matrix
    >>> import numpy as np
    >>> data = np.ones((6,))
    >>> row = np.arange(0,6)
    >>> col = np.kron([0,1],np.ones((3,)))
    >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
    >>> T.todense()
    matrix([[ 1.,  0.],
            [ 1.,  0.],
            [ 1.,  0.],
            [ 0.,  1.],
            [ 0.,  1.],
            [ 0.,  1.]])
    >>> A = poisson((6,),format='csr')
    >>> P = jacobi_prolongation_smoother(A,T,A,np.ones((2,1)))
    >>> P.todense()
    matrix([[ 0.64930164,  0.        ],
            [ 1.        ,  0.        ],
            [ 0.64930164,  0.35069836],
            [ 0.35069836,  0.64930164],
            [ 0.        ,  1.        ],
            [ 0.        ,  0.64930164]])

    """

    # preprocess weighting
    if weighting == 'block':
        if sparse.isspmatrix_csr(S):
            weighting = 'diagonal'
        elif sparse.isspmatrix_bsr(S):
            if S.blocksize[0] == 1:
                weighting = 'diagonal'

    if filter:
        # Implement filtered prolongation smoothing for the general case by
        # utilizing satisfy constraints

        if sparse.isspmatrix_bsr(S):
            numPDEs = S.blocksize[0]
        else:
            numPDEs = 1

        # Create a filtered S with entries dropped that aren't in C
        C = UnAmal(C, numPDEs, numPDEs)
        S = S.multiply(C)
        S.eliminate_zeros()
        cost[0] += 1.0

    if weighting == 'diagonal':
        # Use diagonal of S
        D_inv = get_diagonal(S, inv=True)
        D_inv_S = scale_rows(S, D_inv, copy=True)
        D_inv_S = (omega / approximate_spectral_radius(D_inv_S)) * D_inv_S
        # 15 WU to find spectral radius, 2 to scale D_inv_S twice
        cost[0] += 17
    elif weighting == 'block':
        # Use block diagonal of S
        D_inv = get_block_diag(S, blocksize=S.blocksize[0], inv_flag=True)
        D_inv = sparse.bsr_matrix(
            (D_inv, np.arange(D_inv.shape[0]), np.arange(D_inv.shape[0] + 1)),
            shape=S.shape)
        D_inv_S = D_inv * S
        # 15 WU to find spectral radius, 2 to scale D_inv_S twice
        D_inv_S = (omega / approximate_spectral_radius(D_inv_S)) * D_inv_S
        cost[0] += 17
    elif weighting == 'local':
        # Use the Gershgorin estimate as each row's weight, instead of a global
        # spectral radius estimate
        D = np.abs(S) * np.ones((S.shape[0], 1), dtype=S.dtype)
        D_inv = np.zeros_like(D)
        D_inv[D != 0] = 1.0 / np.abs(D[D != 0])

        D_inv_S = scale_rows(S, D_inv, copy=True)
        D_inv_S = omega * D_inv_S
        cost[0] += 3
    else:
        raise ValueError('Incorrect weighting option')

    if filter:
        # Carry out Jacobi, but after calculating the prolongator update, U,
        # apply satisfy constraints so that U*B = 0
        P = T
        for i in range(degree):
            if sparse.isspmatrix_bsr(P):
                U = (D_inv_S * P).tobsr(blocksize=P.blocksize)
            else:
                U = D_inv_S * P

            cost[0] += P.nnz / float(S.nnz)

            # (1) Enforce U*B = 0. Construct array of inv(Bi'Bi), where Bi is B
            # restricted to row i's sparsity pattern in Sparsity Pattern. This
            # array is used multiple times in Satisfy_Constraints(...).
            temp_cost = [0.0]
            BtBinv = compute_BtBinv(B, U, cost=temp_cost)
            cost[0] += temp_cost[0] / float(S.nnz)

            # (2) Apply satisfy constraints
            temp_cost = [0.0]
            Satisfy_Constraints(U, B, BtBinv, cost=temp_cost)
            cost[0] += temp_cost[0] / float(S.nnz)

            # Update P
            P = P - U
            cost[0] += max(P.nnz, U.nnz) / float(S.nnz)
    else:
        # Carry out Jacobi as normal
        P = T
        for i in range(degree):
            P = P - (D_inv_S * P)
            cost[0] += P.nnz / float(S.nnz)

    return P
Beispiel #17
0
def richardson_prolongation_smoother(S,
                                     T,
                                     omega=4.0 / 3.0,
                                     degree=1,
                                     cost=[0.0]):
    """Richardson prolongation smoother

    Parameters
    ----------
    S : {csr_matrix, bsr_matrix}
        Sparse NxN matrix used for smoothing.  Typically, A or the
        "filtered matrix" obtained from A by lumping weak connections
        onto the diagonal of A.
    T : {csr_matrix, bsr_matrix}
        Tentative prolongator
    omega : {scalar}
        Damping parameter

    Returns
    -------
    P : {csr_matrix, bsr_matrix}
        Smoothed (final) prolongator defined by P = (I - omega/rho(S) S) * T
        where rho(S) is an approximation to the spectral radius of S.

    Notes
    -----
    Results using Richardson prolongation smoother are not precisely
    reproducible due to a random initial guess used for the spectral radius
    approximation.  For precise reproducibility, set numpy.random.seed(..) to
    the same value before each test.


    Examples
    --------
    >>> from pyamg.aggregation import richardson_prolongation_smoother
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse import coo_matrix
    >>> import numpy as np
    >>> data = np.ones((6,))
    >>> row = np.arange(0,6)
    >>> col = np.kron([0,1],np.ones((3,)))
    >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
    >>> T.todense()
    matrix([[ 1.,  0.],
            [ 1.,  0.],
            [ 1.,  0.],
            [ 0.,  1.],
            [ 0.,  1.],
            [ 0.,  1.]])
    >>> A = poisson((6,),format='csr')
    >>> P = richardson_prolongation_smoother(A,T)
    >>> P.todense()
    matrix([[ 0.64930164,  0.        ],
            [ 1.        ,  0.        ],
            [ 0.64930164,  0.35069836],
            [ 0.35069836,  0.64930164],
            [ 0.        ,  1.        ],
            [ 0.        ,  0.64930164]])

    """

    # Default 15 Lanczos iterations to find spectral radius
    weight = omega / approximate_spectral_radius(S)
    cost[0] += 15

    P = T
    for i in range(degree):
        P = P - weight * (S * P)
        cost[0] += float(P.nnz) / S.nnz

    return P
Beispiel #18
0
def reference_evolution_soc(A,
                            B,
                            epsilon=4.0,
                            k=2,
                            proj_type="l2",
                            weighting='diagonal'):
    """
    All python reference implementation for Evolution Strength of Connection

    --> If doing imaginary test, both A and B should be imaginary type upon
    entry

    --> This does the "unsymmetrized" version of the ode measure
    
    --> This supports 'local' and 'diagonal' weighting
    """

    # number of PDEs per point is defined implicitly by block size
    csrflag = isspmatrix_csr(A)
    if csrflag:
        numPDEs = 1
    else:
        numPDEs = A.blocksize[0]
        A = A.tocsr()

    # Preliminaries
    near_zero = np.finfo(float).eps
    sqrt_near_zero = np.sqrt(np.sqrt(near_zero))
    Bmat = np.mat(B)
    A.eliminate_zeros()
    A.sort_indices()
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    # Get the scaled A for ODE time-stepping
    D = A.diagonal()  # This D is used later
    if weighting == 'diagonal':
        Dinv = np.zeros_like(D)
        mask = (D != 0.0)
        Dinv[mask] = 1.0 / D[mask]
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)
        rho_DinvA = approximate_spectral_radius(Dinv_A)
    elif weighting == 'local':
        Dsum = np.abs(A) * np.ones((A.shape[0], 1), dtype=A.dtype)
        Dinv = np.zeros_like(Dsum)
        Dinv[Dsum != 0] = 1.0 / np.abs(Dsum[Dsum != 0])
        Dinv[Dsum == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)
        rho_DinvA = 1.0

    # Calculate (Atilde^k) naively
    S = (scipy.sparse.eye(dimen, dimen, format="csr") -
         (1.0 / rho_DinvA) * Dinv_A)
    Atilde = scipy.sparse.eye(dimen, dimen, format="csr")
    for i in range(k):
        Atilde = S * Atilde

    # Strength Info should be row-based, so transpose Atilde
    Atilde = Atilde.T.tocsr()

    # Construct and apply a sparsity mask for Atilde that restricts Atilde^T to
    # the nonzero pattern of A, with the added constraint that row i of
    # Atilde^T retains only the nonzeros that are also in the same PDE as i.

    mask = A.copy()

    # Only consider strength at dofs from your PDE.  Use mask to enforce this
    # by zeroing out all entries in Atilde that aren't from your PDE.
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(np.arange(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # Apply mask to Atilde, zeros in mask have already been eliminated at start
    # of routine.
    mask.data[:] = 1.0
    Atilde = Atilde.multiply(mask)
    Atilde.eliminate_zeros()
    Atilde.sort_indices()
    del mask

    # Calculate strength based on constrained min problem of
    LHS = np.mat(np.zeros((NullDim + 1, NullDim + 1)), dtype=A.dtype)
    RHS = np.mat(np.zeros((NullDim + 1, 1)), dtype=A.dtype)

    # Choose tolerance for dropping "numerically zero" values later
    t = Atilde.dtype.char
    eps = np.finfo(np.float).eps
    feps = np.finfo(np.single).eps
    geps = np.finfo(np.longfloat).eps
    _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
    tol = {0: feps * 1e3, 1: eps * 1e6, 2: geps * 1e6}[_array_precision[t]]

    for i in range(dimen):

        # Get rowptrs and col indices from Atilde
        rowstart = Atilde.indptr[i]
        rowend = Atilde.indptr[i + 1]
        length = rowend - rowstart
        colindx = Atilde.indices[rowstart:rowend]

        # Local diagonal of A is used for scale invariant min problem
        D_A = np.mat(np.eye(length, dtype=A.dtype))
        if proj_type == "D_A":
            for j in range(length):
                D_A[j, j] = D[colindx[j]]

        # Find row i's position in colindx, matrix must have sorted column
        # indices.
        iInRow = colindx.searchsorted(i)

        if length <= NullDim:
            # Do nothing, because the number of nullspace vectors will
            # be able to perfectly approximate this row of Atilde.
            Atilde.data[rowstart:rowend] = 1.0
        else:
            # Grab out what we want from Atilde and B.  Put into zi, Bi
            zi = np.mat(Atilde.data[rowstart:rowend]).T

            Bi = Bmat[colindx, :]

            # Construct constrained min problem
            LHS[0:NullDim, 0:NullDim] = 2.0 * Bi.H * D_A * Bi
            LHS[0:NullDim, NullDim] = D_A[iInRow, iInRow] * Bi[iInRow, :].H
            LHS[NullDim, 0:NullDim] = Bi[iInRow, :]
            RHS[0:NullDim, 0] = 2.0 * Bi.H * D_A * zi
            RHS[NullDim, 0] = zi[iInRow, 0]

            # Calc Soln to Min Problem
            x = np.mat(pinv(LHS)) * RHS

            # Calculate best constrained approximation to zi with span(Bi), and
            # filter out "numerically" zero values.  This is important because
            # we look only at the sign of values below when calculating angle.
            zihat = Bi * x[:-1]
            tol_i = np.max(np.abs(zihat)) * tol
            zihat.real[np.abs(zihat.real) < tol_i] = 0.0
            if np.iscomplexobj(zihat):
                zihat.imag[np.abs(zihat.imag) < tol_i] = 0.0

            # if angle in the complex plane between individual entries is
            # greater than 90 degrees, then weak.  We can just look at the dot
            # product to determine if angle is greater than 90 degrees.
            angle = real(np.ravel(zihat))*real(np.ravel(zi)) +\
                imag(np.ravel(zihat))*imag(np.ravel(zi))
            angle = angle < 0.0
            angle = np.array(angle, dtype=bool)

            # Calculate approximation ratio
            zi = zihat / zi

            # If the ratio is small, then weak connection
            zi[np.abs(zi) <= 1e-4] = 1e100

            # If angle is greater than 90 degrees, then weak connection
            zi[angle] = 1e100

            # Calculate Relative Approximation Error
            zi = np.abs(1.0 - zi)

            # important to make "perfect" connections explicitly nonzero
            zi[zi < sqrt_near_zero] = 1e-4

            # Calculate and apply drop-tol.  Ignore diagonal by making it very
            # large
            zi[iInRow] = 1e5
            drop_tol = np.min(zi) * epsilon
            zi[zi > drop_tol] = 0.0
            Atilde.data[rowstart:rowend] = np.ravel(zi)

    # Clean up, and return Atilde
    Atilde.eliminate_zeros()
    Atilde.data = np.array(real(Atilde.data), dtype=float)

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    I = scipy.sparse.eye(dimen, dimen, format="csr")
    I.data -= Atilde.diagonal()
    Atilde = Atilde + I

    # If converted BSR to CSR we return amalgamated matrix with the minimum
    # nonzero for each block making up the nonzeros of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        # Atilde = csr_matrix((data, row, col), shape=(*,*))
        At = []
        for i in range(Atilde.indices.shape[0]):
            Atmin = Atilde.data[i, :, :][Atilde.data[i, :, :].nonzero()]
            At.append(Atmin.min())

        Atilde = csr_matrix((np.array(At), Atilde.indices, Atilde.indptr),
                            shape=(int(Atilde.shape[0] / numPDEs),
                                   int(Atilde.shape[1] / numPDEs)))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0 / Atilde.data

    # Scale Atilde by the largest magnitude entry in each row
    largest_row_entry = np.zeros((Atilde.shape[0], ), dtype=Atilde.dtype)
    for i in range(Atilde.shape[0]):
        for j in range(Atilde.indptr[i], Atilde.indptr[i + 1]):
            val = abs(Atilde.data[j])
            if val > largest_row_entry[i]:
                largest_row_entry[i] = val

    largest_row_entry[largest_row_entry != 0] =\
        1.0 / largest_row_entry[largest_row_entry != 0]
    Atilde = Atilde.tocsr()
    Atilde = scale_rows(Atilde, largest_row_entry, copy=True)

    return Atilde
Beispiel #19
0
    def test_approximate_spectral_radius(self):
        np.random.seed(3456)
        cases = []

        cases.append(np.array([[-4]]))

        cases.append(np.array([[2, 0], [0, 1]]))
        cases.append(np.array([[-2, 0], [0, 1]]))

        cases.append(np.array([[100, 0, 0], [0, 101, 0], [0, 0, 99]]))

        for i in range(1, 5):
            cases.append(np.random.rand(i, i))

        # method should be almost exact for small matrices
        for A in cases:
            A = A.astype(float)
            Asp = csr_matrix(A)

            [E, V] = linalg.eig(A)
            E = np.abs(E)
            largest_eig = (E == E.max()).nonzero()[0]
            expected_eig = E[largest_eig]
            expected_vec = V[:, largest_eig]

            assert_almost_equal(approximate_spectral_radius(A), expected_eig)
            assert_almost_equal(approximate_spectral_radius(Asp), expected_eig)
            vec = approximate_spectral_radius(A, return_vector=True)[1]
            minnorm = min(norm(expected_vec + vec), norm(expected_vec - vec))
            diff = minnorm / norm(expected_vec)
            assert_almost_equal(diff, 0.0, decimal=4)
            vec = approximate_spectral_radius(Asp, return_vector=True)[1]
            minnorm = min(norm(expected_vec + vec), norm(expected_vec - vec))
            diff = minnorm / norm(expected_vec)
            assert_almost_equal(diff, 0.0, decimal=4)

        # try symmetric matrices
        for A in cases:
            A = A + A.transpose()
            A = A.astype(float)
            Asp = csr_matrix(A)

            [E, V] = linalg.eig(A)
            E = np.abs(E)
            largest_eig = (E == E.max()).nonzero()[0]
            expected_eig = E[largest_eig]
            expected_vec = V[:, largest_eig]

            assert_almost_equal(approximate_spectral_radius(A), expected_eig)
            assert_almost_equal(approximate_spectral_radius(Asp), expected_eig)
            vec = approximate_spectral_radius(A, return_vector=True)[1]
            minnorm = min(norm(expected_vec + vec), norm(expected_vec - vec))
            diff = minnorm / norm(expected_vec)
            assert_almost_equal(diff, 0.0, decimal=4)
            vec = approximate_spectral_radius(Asp, return_vector=True)[1]
            minnorm = min(norm(expected_vec + vec), norm(expected_vec - vec))
            diff = minnorm / norm(expected_vec)
            assert_almost_equal(diff, 0.0, decimal=4)

        # test a larger matrix, and various parameter choices
        cases = []
        A1 = gallery.poisson((50, 50), format='csr')
        cases.append((A1, 7.99241331495))
        A2 = gallery.elasticity.linear_elasticity((32, 32), format='bsr')[0]
        cases.append((A2, 536549.922189))
        for A, expected in cases:
            # test that increasing maxiter increases accuracy
            ans1 = approximate_spectral_radius(A, tol=1e-16, maxiter=5,
                                               restart=0)
            del A.rho
            ans2 = approximate_spectral_radius(A, tol=1e-16, maxiter=15,
                                               restart=0)
            del A.rho
            assert_equal(abs(ans2 - expected) < 0.5*abs(ans1 - expected), True)
            # test that increasing restart increases accuracy
            ans1 = approximate_spectral_radius(A, tol=1e-16, maxiter=10,
                                               restart=0)
            del A.rho
            ans2 = approximate_spectral_radius(A, tol=1e-16, maxiter=10,
                                               restart=1)
            del A.rho
            assert_equal(abs(ans2 - expected) < 0.8*abs(ans1 - expected), True)
            # test tol
            ans1 = approximate_spectral_radius(A, tol=0.1, maxiter=15,
                                               restart=5)
            del A.rho
            assert_equal(abs(ans1 - expected)/abs(expected) < 0.1, True)
            ans2 = approximate_spectral_radius(A, tol=0.001, maxiter=15,
                                               restart=5)
            del A.rho
            assert_equal(abs(ans2 - expected)/abs(expected) < 0.001, True)
            assert_equal(abs(ans2 - expected) < 0.1*abs(ans1 - expected), True)
Beispiel #20
0
    def test_approximate_spectral_radius(self):
        cases = []

        cases.append(np.array([[-4-4.0j]]))
        cases.append(np.array([[-4+8.2j]]))

        cases.append(np.array([[2.0-2.9j, 0], [0, 1.5]]))
        cases.append(np.array([[-2.0-2.4j, 0], [0, 1.21]]))

        cases.append(np.array([[100+1.0j, 0, 0],
                               [0, 101-1.0j, 0],
                               [0, 0, 99+9.9j]]))

        for i in range(1, 6):
            cases.append(np.array(np.random.rand(i, i)+1.0j*np.random.rand(i, i)))

        # method should be almost exact for small matrices
        for A in cases:
            Asp = csr_matrix(A)
            [E, V] = linalg.eig(A)
            E = np.abs(E)
            largest_eig = (E == E.max()).nonzero()[0]
            expected_eig = E[largest_eig]
            # expected_vec = V[:, largest_eig]

            assert_almost_equal(approximate_spectral_radius(A), expected_eig)
            assert_almost_equal(approximate_spectral_radius(Asp), expected_eig)
            vec = approximate_spectral_radius(A, return_vector=True)[1]
            Avec = A.dot(vec)
            Avec = np.ravel(Avec)
            vec = np.ravel(vec)
            rayleigh = abs(np.dot(Avec, vec) / np.dot(vec, vec))
            assert_almost_equal(rayleigh, expected_eig, decimal=4)
            vec = approximate_spectral_radius(Asp, return_vector=True)[1]
            Aspvec = Asp * vec
            Aspvec = np.ravel(Aspvec)
            vec = np.ravel(vec)
            rayleigh = abs(np.dot(Aspvec, vec) / np.dot(vec, vec))
            assert_almost_equal(rayleigh, expected_eig, decimal=4)

            AA = A.conj().T.dot(A)
            AAsp = csr_matrix(AA)
            [E, V] = linalg.eig(AA)
            E = np.abs(E)
            largest_eig = (E == E.max()).nonzero()[0]
            expected_eig = E[largest_eig]
            # expected_vec = V[:, largest_eig]

            assert_almost_equal(approximate_spectral_radius(AA),
                                expected_eig)
            assert_almost_equal(approximate_spectral_radius(AAsp),
                                expected_eig)
            vec = approximate_spectral_radius(AA, return_vector=True)[1]
            AAvec = AA.dot(vec)
            AAvec = np.ravel(AAvec)
            vec = np.ravel(vec)
            rayleigh = abs(np.dot(AAvec, vec) / np.dot(vec, vec))
            assert_almost_equal(rayleigh, expected_eig, decimal=4)
            vec = approximate_spectral_radius(AAsp, return_vector=True)[1]
            AAspvec = AAsp * vec
            AAspvec = np.ravel(AAspvec)
            vec = np.ravel(vec)
            rayleigh = abs(np.dot(AAspvec, vec) / np.dot(vec, vec))
            assert_almost_equal(rayleigh, expected_eig, decimal=4)
Beispiel #21
0
def evolution_strength_of_connection(A, B='ones', epsilon=4.0, k=2,
                                     proj_type="l2", block_flag=False,
                                     symmetrize_measure=True):
    """
    Construct strength of connection matrix using an Evolution-based measure

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    B : {string, array}
        If B='ones', then the near nullspace vector used is all ones.  If B is
        an (NxK) array, then B is taken to be the near nullspace vectors.
    epsilon : scalar
        Drop tolerance
    k : integer
        ODE num time steps, step size is assumed to be 1/rho(DinvA)
    proj_type : {'l2','D_A'}
        Define norm for constrained min prob, i.e. define projection
    block_flag : {boolean}
        If True, use a block D inverse as preconditioner for A during
        weighted-Jacobi

    Returns
    -------
    Atilde : {csr_matrix}
        Sparse matrix of strength values

    References
    ----------
    .. [1] Olson, L. N., Schroder, J., Tuminaro, R. S.,
       "A New Perspective on Strength Measures in Algebraic Multigrid",
       submitted, June, 2008.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import evolution_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = evolution_strength_of_connection(A,  np.ones((A.shape[0],1)))
    """
    # local imports for evolution_strength_of_connection
    from pyamg.util.utils import scale_rows, get_block_diag, scale_columns
    from pyamg.util.linalg import approximate_spectral_radius

    # ====================================================================
    # Check inputs
    if epsilon < 1.0:
        raise ValueError("expected epsilon > 1.0")
    if k <= 0:
        raise ValueError("number of time steps must be > 0")
    if proj_type not in ['l2', 'D_A']:
        raise ValueError("proj_type must be 'l2' or 'D_A'")
    if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
        raise TypeError("expected csr_matrix or bsr_matrix")

    # ====================================================================
    # Format A and B correctly.
    # B must be in mat format, this isn't a deep copy
    if B == 'ones':
        Bmat = np.mat(np.ones((A.shape[0], 1), dtype=A.dtype))
    else:
        Bmat = np.mat(B)

    # Pre-process A.  We need A in CSR, to be devoid of explicit 0's and have
    # sorted indices
    if (not sparse.isspmatrix_csr(A)):
        csrflag = False
        numPDEs = A.blocksize[0]
        D = A.diagonal()
        # Calculate Dinv*A
        if block_flag:
            Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
            Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]),
                                     np.arange(Dinv.shape[0] + 1)),
                                     shape=A.shape)
            Dinv_A = (Dinv * A).tocsr()
        else:
            Dinv = np.zeros_like(D)
            mask = (D != 0.0)
            Dinv[mask] = 1.0 / D[mask]
            Dinv[D == 0] = 1.0
            Dinv_A = scale_rows(A, Dinv, copy=True)
        A = A.tocsr()
    else:
        csrflag = True
        numPDEs = 1
        D = A.diagonal()
        Dinv = np.zeros_like(D)
        mask = (D != 0.0)
        Dinv[mask] = 1.0 / D[mask]
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)

    A.eliminate_zeros()
    A.sort_indices()

    # Handle preliminaries for the algorithm
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    # Get spectral radius of Dinv*A, this will be used to scale the time step
    # size for the ODE
    rho_DinvA = approximate_spectral_radius(Dinv_A)

    # Calculate D_A for later use in the minimization problem
    if proj_type == "D_A":
        D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr')
    else:
        D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)

    # Calculate (I - delta_t Dinv A)^k
    #      In order to later access columns, we calculate the transpose in
    #      CSR format so that columns will be accessed efficiently
    # Calculate the number of time steps that can be done by squaring, and
    # the number of time steps that must be done incrementally
    nsquare = int(np.log2(k))
    ninc = k - 2**nsquare

    # Calculate one time step
    I = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
    Atilde = (I - (1.0/rho_DinvA)*Dinv_A)
    Atilde = Atilde.T.tocsr()

    # Construct a sparsity mask for Atilde that will restrict Atilde^T to the
    # nonzero pattern of A, with the added constraint that row i of Atilde^T
    # retains only the nonzeros that are also in the same PDE as i.
    mask = A.copy()

    # Restrict to same PDE
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(range(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # If the total number of time steps is a power of two, then there is
    # a very efficient computational short-cut.  Otherwise, we support
    # other numbers of time steps, through an inefficient algorithm.
    if ninc > 0:
        warn("The most efficient time stepping for the Evolution Strength\
             Method is done in powers of two.\nYou have chosen " + str(k) +
             " time steps.")

        # Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare
        for i in range(nsquare):
            Atilde = Atilde*Atilde

        JacobiStep = (I - (1.0/rho_DinvA)*Dinv_A).T.tocsr()
        for i in range(ninc):
            Atilde = Atilde*JacobiStep
        del JacobiStep

        # Apply mask to Atilde, zeros in mask have already been eliminated at
        # start of routine.
        mask.data[:] = 1.0
        Atilde = Atilde.multiply(mask)
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    elif nsquare == 0:
        if numPDEs > 1:
            # Apply mask to Atilde, zeros in mask have already been eliminated
            # at start of routine.
            mask.data[:] = 1.0
            Atilde = Atilde.multiply(mask)
            Atilde.eliminate_zeros()
            Atilde.sort_indices()

    else:
        # Use computational short-cut for case (ninc == 0) and (nsquare > 0)
        # Calculate Atilde^k only at the sparsity pattern of mask.
        for i in range(nsquare-1):
            Atilde = Atilde*Atilde

        # Call incomplete mat-mat mult
        AtildeCSC = Atilde.tocsc()
        AtildeCSC.sort_indices()
        mask.sort_indices()
        Atilde.sort_indices()
        amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices,
                                         Atilde.data, AtildeCSC.indptr,
                                         AtildeCSC.indices, AtildeCSC.data,
                                         mask.indptr, mask.indices, mask.data,
                                         dimen)

        del AtildeCSC, Atilde
        Atilde = mask
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    del Dinv, Dinv_A, mask

    # Calculate strength based on constrained min problem of
    # min( z - B*x ), such that
    # (B*x)|_i = z|_i, i.e. they are equal at point i
    # z = (I - (t/k) Dinv A)^k delta_i
    #
    # Strength is defined as the relative point-wise approx. error between
    # B*x and z.  We don't use the full z in this problem, only that part of
    # z that is in the sparsity pattern of A.
    #
    # Can use either the D-norm, and inner product, or l2-norm and inner-prod
    # to solve the constrained min problem.  Using D gives scale invariance.
    #
    # This is a quadratic minimization problem with a linear constraint, so
    # we can build a linear system and solve it to find the critical point,
    # i.e. minimum.
    #
    # We exploit a known shortcut for the case of NullDim = 1.  The shortcut is
    # mathematically equivalent to the longer constrained min. problem

    if NullDim == 1:
        # Use shortcut to solve constrained min problem if B is only a vector
        # Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) |
        # These ratios can be calculated by diagonal row and column scalings

        # Create necessary vectors for scaling Atilde
        #   Its not clear what to do where B == 0.  This is an
        #   an easy programming solution, that may make sense.
        Bmat_forscaling = np.ravel(Bmat)
        Bmat_forscaling[Bmat_forscaling == 0] = 1.0
        DAtilde = Atilde.diagonal()
        DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling

        # Calculate best approximation, z_tilde, in span(B)
        #   Importantly, scale_rows and scale_columns leave zero entries
        #   in the matrix.  For previous implementations this was useful
        #   because we assume data and Atilde.data are the same length below
        data = Atilde.data.copy()
        Atilde.data[:] = 1.0
        Atilde = scale_rows(Atilde, DAtildeDivB)
        Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling))

        # If angle in the complex plane between z and z_tilde is
        # greater than 90 degrees, then weak.  We can just look at the
        # dot product to determine if angle is greater than 90 degrees.
        angle = np.real(Atilde.data) * np.real(data) +\
            np.imag(Atilde.data) * np.imag(data)
        angle = angle < 0.0
        angle = np.array(angle, dtype=bool)

        # Calculate Approximation ratio
        Atilde.data = Atilde.data/data

        # If approximation ratio is less than tol, then weak connection
        weak_ratio = (np.abs(Atilde.data) < 1e-4)

        # Calculate Approximation error
        Atilde.data = abs(1.0 - Atilde.data)

        # Set small ratios and large angles to weak
        Atilde.data[weak_ratio] = 0.0
        Atilde.data[angle] = 0.0

        # Set near perfect connections to 1e-4
        Atilde.eliminate_zeros()
        Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4

        del data, weak_ratio, angle

    else:
        # For use in computing local B_i^H*B, precompute the element-wise
        # multiply of each column of B with each other column.  We also scale
        # by 2.0 to account for BDB's eventual use in a constrained
        # minimization problem
        BDBCols = int(np.sum(range(NullDim + 1)))
        BDB = np.zeros((dimen, BDBCols), dtype=A.dtype)
        counter = 0
        for i in range(NullDim):
            for j in range(i, NullDim):
                BDB[:, counter] = 2.0 *\
                    (np.conjugate(np.ravel(np.asarray(B[:, i]))) *
                        np.ravel(np.asarray(D_A * B[:, j])))
                counter = counter + 1

        # Choose tolerance for dropping "numerically zero" values later
        t = Atilde.dtype.char
        eps = np.finfo(np.float).eps
        feps = np.finfo(np.single).eps
        geps = np.finfo(np.longfloat).eps
        _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
        tol = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]

        # Use constrained min problem to define strength
        amg_core.evolution_strength_helper(Atilde.data,
                                           Atilde.indptr,
                                           Atilde.indices,
                                           Atilde.shape[0],
                                           np.ravel(np.asarray(B)),
                                           np.ravel(np.asarray(
                                               (D_A * np.conjugate(B)).T)),
                                           np.ravel(np.asarray(BDB)),
                                           BDBCols, NullDim, tol)

        Atilde.eliminate_zeros()

    # All of the strength values are real by this point, so ditch the complex
    # part
    Atilde.data = np.array(np.real(Atilde.data), dtype=float)

    # Apply drop tolerance
    if symmetrize_measure:
        Atilde = 0.5*(Atilde + Atilde.T)

    if epsilon != np.inf:
        amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr,
                                       Atilde.indices, Atilde.data)
        Atilde.eliminate_zeros()

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    I = sparse.eye(dimen, dimen, format="csr")
    I.data -= Atilde.diagonal()
    Atilde = Atilde + I

    # If converted BSR to CSR, convert back and return amalgamated matrix,
    #   i.e. the sparsity structure of the blocks of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        n_blocks = Atilde.indices.shape[0]
        blocksize = Atilde.blocksize[0]*Atilde.blocksize[1]
        CSRdata = np.zeros((n_blocks,))
        amg_core.min_blocks(n_blocks, blocksize,
                            np.ravel(np.asarray(Atilde.data)), CSRdata)
        # Atilde = sparse.csr_matrix((data, row, col), shape=(*,*))
        Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr),
                                   shape=(Atilde.shape[0] / numPDEs,
                                          Atilde.shape[1] / numPDEs))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0/Atilde.data

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)

    return Atilde
def global_ritz_process(A, B1, B2=None, weak_tol=15., level=0, verbose=False):
    """
    Helper function that compresses two sets of targets B1 and B2 into one set
    of candidates. This is the Ritz procedure.

    Parameters
    ---------
    A : {sparse matrix}
        SPD matrix used to compress the candidates so that the weak
        approximation property is satisfied.
    B1 : {array}
        n x m1 array of m1 potential candidates
    B2 : {array}
        n x m2 array of m2 potential candidates
    weak_tol : {float}
        The constant in the weak approximation property.

    Returns
    -------
    New set of candidates forming an Euclidean orthogonal and energy
    orthonormal subset of span(B1,B2). The candidates that trivially satisfy
    the weak approximation property are deleted.
    """

    if B2 is not None:
        B = np.hstack((B1, B2.reshape(-1, 1)))
    else:
        B = B1

    # Orthonormalize the vectors.
    [Q, R] = scipy.linalg.qr(B, mode='economic')

    # Formulate and solve the eigenpairs problem returning eigenvalues in
    # ascending order.
    QtAQ = scipy.dot(Q.conjugate().T, A * Q)  # WAP
    [E, V] = scipy.linalg.eigh(QtAQ)
    # QtAAQ = A*Q
    # QtAAQ = scipy.dot(QtAAQ.conjugate().T, QtAAQ)   # WAP_{A^2} = SAP
    # [E,V] = scipy.linalg.eigh(QtAAQ)

    # Make sure eigenvectors are real. Eigenvalues must be already real.
    try:
        V = np.real(V)
    except:
        import pdb
        pdb.set_trace()

    # Compute Ritz vectors and normalize them in energy. Also, mark vectors
    # that trivially satisfy the weak approximation property.
    V = scipy.dot(Q, V)
    num_candidates = -1
    entire_const = weak_tol / approximate_spectral_radius(A)
    if verbose:
        print
        print tabs(level), "WAP const", entire_const
    for j in range(V.shape[1]):
        V[:, j] /= np.sqrt(E[j])
        # verify energy norm is 1
        if verbose:
            print tabs(level), "Vector 1/e", j, 1. / E[
                j], "ELIMINATED" if 1. / E[j] <= entire_const else ""
        if 1. / E[j] <= entire_const:
            num_candidates = j
            break

    if num_candidates == 0:
        num_candidates = 1
    if num_candidates == -1:
        num_candidates = V.shape[1]

    if verbose:
        # print tabs(level), "Finished global ritz process, eliminated", B.shape[1]-num_candidates, "candidates", num_candidates, ". candidates remaining"
        print

    return V[:, :num_candidates]
Beispiel #23
0
def energy_based_strength_of_connection(A, theta=0.0, k=2, cost=[0]):
    """
    Compute a strength of connection matrix using an energy-based measure.

    Parameters
    ----------
    A : {sparse-matrix}
        matrix from which to generate strength of connection information
    theta : {float}
        Threshold parameter in [0,1]
    k : {int}
        Number of relaxation steps used to generate strength information

    Returns
    -------
    S : {csr_matrix}
        Matrix graph defining strong connections.  The sparsity pattern
        of S matches that of A.  For BSR matrices, S is a reduced strength
        of connection matrix that describes connections between supernodes.

    Notes
    -----
    This method relaxes with weighted-Jacobi in order to approximate the
    matrix inverse.  A normalized change of energy is then used to define
    point-wise strength of connection values.  Specifically, let v be the
    approximation to the i-th column of the inverse, then

    (S_ij)^2 = <v_j, v_j>_A / <v, v>_A,

    where v_j = v, such that entry j in v has been zeroed out.  As is common,
    larger values imply a stronger connection.

    Current implementation is a very slow pure-python implementation for
    experimental purposes, only.

    References
    ----------
    .. [1] Brannick, Brezina, MacLachlan, Manteuffel, McCormick.
       "An Energy-Based AMG Coarsening Strategy",
       Numerical Linear Algebra with Applications,
       vol. 13, pp. 133-148, 2006.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import energy_based_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = energy_based_strength_of_connection(A, 0.0)
    """

    if (theta < 0):
        raise ValueError('expected a positive theta')
    if not sparse.isspmatrix(A):
        raise ValueError('expected sparse matrix')
    if (k < 0):
        raise ValueError('expected positive number of steps')
    if not isinstance(k, int):
        raise ValueError('expected integer')

    if sparse.isspmatrix_bsr(A):
        bsr_flag = True
        numPDEs = A.blocksize[0]
        if A.blocksize[0] != A.blocksize[1]:
            raise ValueError('expected square blocks in BSR matrix A')
    else:
        bsr_flag = False

    # Convert A to csc and Atilde to csr
    if sparse.isspmatrix_csr(A):
        Atilde = A.copy()
        A = A.tocsc()
    else:
        A = A.tocsc()
        Atilde = A.copy()
        Atilde = Atilde.tocsr()

    # Calculate the weighted-Jacobi parameter
    from pyamg.util.linalg import approximate_spectral_radius
    D = A.diagonal()
    Dinv = 1.0 / D
    Dinv[D == 0] = 0.0
    Dinv = sparse.csc_matrix(
        (Dinv, (np.arange(A.shape[0]), np.arange(A.shape[1]))), shape=A.shape)
    DinvA = Dinv * A
    omega = 1.0 / approximate_spectral_radius(DinvA)
    del DinvA

    # Approximate A-inverse with k steps of w-Jacobi and a zero initial guess
    S = sparse.csc_matrix(A.shape, dtype=A.dtype)  # empty matrix
    I = sparse.eye(A.shape[0], A.shape[1], format='csc')
    for i in range(k + 1):
        S = S + omega * (Dinv * (I - A * S))

    # Calculate the strength entries in S column-wise, but only strength
    # values at the sparsity pattern of A
    for i in range(Atilde.shape[0]):
        v = np.mat(S[:, i].todense())
        Av = np.mat(A * v)
        denom = np.sqrt(np.conjugate(v).T * Av)
        # replace entries in row i with strength values
        for j in range(Atilde.indptr[i], Atilde.indptr[i + 1]):
            col = Atilde.indices[j]
            vj = v[col].copy()
            v[col] = 0.0
            #   =  (||v_j||_A - ||v||_A) / ||v||_A
            val = np.sqrt(np.conjugate(v).T * A * v) / denom - 1.0

            # Negative values generally imply a weak connection
            if val > -0.01:
                Atilde.data[j] = abs(val)
            else:
                Atilde.data[j] = 0.0

            v[col] = vj

    # Apply drop tolerance
    Atilde = classical_strength_of_connection_abs(Atilde, theta=theta)
    Atilde.eliminate_zeros()

    # Put ones on the diagonal
    Atilde = Atilde + I.tocsr()
    Atilde.sort_indices()

    # Amalgamate Atilde for the BSR case, using ones for all strong connections
    if bsr_flag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))
        nblocks = Atilde.indices.shape[0]
        uone = np.ones((nblocks, ))
        Atilde = sparse.csr_matrix((uone, Atilde.indices, Atilde.indptr),
                                   shape=(int(Atilde.shape[0] / numPDEs),
                                          int(Atilde.shape[1] / numPDEs)))

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)

    return Atilde
Beispiel #24
0
    def test_approximate_spectral_radius(self):
        cases = []

        cases.append(np.array([[-4-4.0j]]))
        cases.append(np.array([[-4+8.2j]]))

        cases.append(np.array([[2.0-2.9j, 0], [0, 1.5]]))
        cases.append(np.array([[-2.0-2.4j, 0], [0, 1.21]]))

        cases.append(np.array([[100+1.0j, 0, 0],
                             [0, 101-1.0j, 0],
                             [0, 0, 99+9.9j]]))

        for i in range(1, 6):
            cases.append(np.array(np.random.rand(i, i)+1.0j*np.random.rand(i, i)))

        # method should be almost exact for small matrices
        for A in cases:
            Asp = csr_matrix(A)
            [E, V] = linalg.eig(A)
            E = np.abs(E)
            largest_eig = (E == E.max()).nonzero()[0]
            expected_eig = E[largest_eig]
            # expected_vec = V[:, largest_eig]

            assert_almost_equal(approximate_spectral_radius(A), expected_eig)
            assert_almost_equal(approximate_spectral_radius(Asp), expected_eig)
            vec = approximate_spectral_radius(A, return_vector=True)[1]
            Avec = A.dot(vec)
            Avec = np.ravel(Avec)
            vec = np.ravel(vec)
            rayleigh = abs(np.dot(Avec, vec) / np.dot(vec, vec))
            assert_almost_equal(rayleigh, expected_eig, decimal=4)
            vec = approximate_spectral_radius(Asp, return_vector=True)[1]
            Aspvec = Asp * vec
            Aspvec = np.ravel(Aspvec)
            vec = np.ravel(vec)
            rayleigh = abs(np.dot(Aspvec, vec) / np.dot(vec, vec))
            assert_almost_equal(rayleigh, expected_eig, decimal=4)

            AA = A.conj().T.dot(A)
            AAsp = csr_matrix(AA)
            [E, V] = linalg.eig(AA)
            E = np.abs(E)
            largest_eig = (E == E.max()).nonzero()[0]
            expected_eig = E[largest_eig]
            # expected_vec = V[:, largest_eig]

            assert_almost_equal(approximate_spectral_radius(AA),
                                expected_eig)
            assert_almost_equal(approximate_spectral_radius(AAsp),
                                expected_eig)
            vec = approximate_spectral_radius(AA, return_vector=True)[1]
            AAvec = AA.dot(vec)
            AAvec = np.ravel(AAvec)
            vec = np.ravel(vec)
            rayleigh = abs(np.dot(AAvec, vec) / np.dot(vec, vec))
            assert_almost_equal(rayleigh, expected_eig, decimal=4)
            vec = approximate_spectral_radius(AAsp, return_vector=True)[1]
            AAspvec = AAsp * vec
            AAspvec = np.ravel(AAspvec)
            vec = np.ravel(vec)
            rayleigh = abs(np.dot(AAspvec, vec) / np.dot(vec, vec))
            assert_almost_equal(rayleigh, expected_eig, decimal=4)
Beispiel #25
0
def reference_evolution_soc(A, B, epsilon=4.0, k=2, proj_type="l2"):
    """
    All python reference implementation for Evolution Strength of Connection

    --> If doing imaginary test, both A and B should be imaginary type upon
    entry

    --> This does the "unsymmetrized" version of the ode measure
    """

    # number of PDEs per point is defined implicitly by block size
    csrflag = isspmatrix_csr(A)
    if csrflag:
        numPDEs = 1
    else:
        numPDEs = A.blocksize[0]
        A = A.tocsr()

    # Preliminaries
    near_zero = np.finfo(float).eps
    sqrt_near_zero = np.sqrt(np.sqrt(near_zero))
    Bmat = np.mat(B)
    A.eliminate_zeros()
    A.sort_indices()
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    # Get spectral radius of Dinv*A, this is the time step size for the ODE
    D = A.diagonal()
    Dinv = np.zeros_like(D)
    mask = (D != 0.0)
    Dinv[mask] = 1.0 / D[mask]
    Dinv[D == 0] = 1.0
    Dinv_A = scale_rows(A, Dinv, copy=True)
    rho_DinvA = approximate_spectral_radius(Dinv_A)

    # Calculate (Atilde^k) naively
    S = (scipy.sparse.eye(dimen, dimen, format="csr") - (1.0/rho_DinvA)*Dinv_A)
    Atilde = scipy.sparse.eye(dimen, dimen, format="csr")
    for i in range(k):
        Atilde = S*Atilde

    # Strength Info should be row-based, so transpose Atilde
    Atilde = Atilde.T.tocsr()

    # Construct and apply a sparsity mask for Atilde that restricts Atilde^T to
    # the nonzero pattern of A, with the added constraint that row i of
    # Atilde^T retains only the nonzeros that are also in the same PDE as i.

    mask = A.copy()

    # Only consider strength at dofs from your PDE.  Use mask to enforce this
    # by zeroing out all entries in Atilde that aren't from your PDE.
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(np.arange(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # Apply mask to Atilde, zeros in mask have already been eliminated at start
    # of routine.
    mask.data[:] = 1.0
    Atilde = Atilde.multiply(mask)
    Atilde.eliminate_zeros()
    Atilde.sort_indices()
    del mask

    # Calculate strength based on constrained min problem of
    LHS = np.mat(np.zeros((NullDim+1, NullDim+1)), dtype=A.dtype)
    RHS = np.mat(np.zeros((NullDim+1, 1)), dtype=A.dtype)

    # Choose tolerance for dropping "numerically zero" values later
    t = Atilde.dtype.char
    eps = np.finfo(np.float).eps
    feps = np.finfo(np.single).eps
    geps = np.finfo(np.longfloat).eps
    _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
    tol = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]

    for i in range(dimen):

        # Get rowptrs and col indices from Atilde
        rowstart = Atilde.indptr[i]
        rowend = Atilde.indptr[i+1]
        length = rowend - rowstart
        colindx = Atilde.indices[rowstart:rowend]

        # Local diagonal of A is used for scale invariant min problem
        D_A = np.mat(np.eye(length, dtype=A.dtype))
        if proj_type == "D_A":
            for j in range(length):
                D_A[j, j] = D[colindx[j]]

        # Find row i's position in colindx, matrix must have sorted column
        # indices.
        iInRow = colindx.searchsorted(i)

        if length <= NullDim:
            # Do nothing, because the number of nullspace vectors will
            # be able to perfectly approximate this row of Atilde.
            Atilde.data[rowstart:rowend] = 1.0
        else:
            # Grab out what we want from Atilde and B.  Put into zi, Bi
            zi = np.mat(Atilde.data[rowstart:rowend]).T

            Bi = Bmat[colindx, :]

            # Construct constrained min problem
            LHS[0:NullDim, 0:NullDim] = 2.0*Bi.H*D_A*Bi
            LHS[0:NullDim, NullDim] = D_A[iInRow, iInRow]*Bi[iInRow, :].H
            LHS[NullDim, 0:NullDim] = Bi[iInRow, :]
            RHS[0:NullDim, 0] = 2.0*Bi.H*D_A*zi
            RHS[NullDim, 0] = zi[iInRow, 0]

            # Calc Soln to Min Problem
            x = np.mat(pinv(LHS))*RHS

            # Calculate best constrained approximation to zi with span(Bi), and
            # filter out "numerically" zero values.  This is important because
            # we look only at the sign of values below when calculating angle.
            zihat = Bi*x[:-1]
            tol_i = np.max(np.abs(zihat))*tol
            zihat.real[np.abs(zihat.real) < tol_i] = 0.0
            if np.iscomplexobj(zihat):
                zihat.imag[np.abs(zihat.imag) < tol_i] = 0.0

            # if angle in the complex plane between individual entries is
            # greater than 90 degrees, then weak.  We can just look at the dot
            # product to determine if angle is greater than 90 degrees.
            angle = real(np.ravel(zihat))*real(np.ravel(zi)) +\
                imag(np.ravel(zihat))*imag(np.ravel(zi))
            angle = angle < 0.0
            angle = np.array(angle, dtype=bool)

            # Calculate approximation ratio
            zi = zihat/zi

            # If the ratio is small, then weak connection
            zi[np.abs(zi) <= 1e-4] = 1e100

            # If angle is greater than 90 degrees, then weak connection
            zi[angle] = 1e100

            # Calculate Relative Approximation Error
            zi = np.abs(1.0 - zi)

            # important to make "perfect" connections explicitly nonzero
            zi[zi < sqrt_near_zero] = 1e-4

            # Calculate and apply drop-tol.  Ignore diagonal by making it very
            # large
            zi[iInRow] = 1e5
            drop_tol = np.min(zi)*epsilon
            zi[zi > drop_tol] = 0.0
            Atilde.data[rowstart:rowend] = np.ravel(zi)

    # Clean up, and return Atilde
    Atilde.eliminate_zeros()
    Atilde.data = np.array(real(Atilde.data), dtype=float)

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    I = scipy.sparse.eye(dimen, dimen, format="csr")
    I.data -= Atilde.diagonal()
    Atilde = Atilde + I

    # If converted BSR to CSR we return amalgamated matrix with the minimum
    # nonzero for each block making up the nonzeros of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        # Atilde = csr_matrix((data, row, col), shape=(*,*))
        At = []
        for i in range(Atilde.indices.shape[0]):
            Atmin = Atilde.data[i, :, :][Atilde.data[i, :, :].nonzero()]
            At.append(Atmin.min())

        Atilde = csr_matrix((np.array(At), Atilde.indices, Atilde.indptr),
                            shape=(int(Atilde.shape[0]/numPDEs),
                                   int(Atilde.shape[1]/numPDEs)))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0/Atilde.data

    # Scale Atilde by the largest magnitude entry in each row
    largest_row_entry = np.zeros((Atilde.shape[0],), dtype=Atilde.dtype)
    for i in range(Atilde.shape[0]):
        for j in range(Atilde.indptr[i], Atilde.indptr[i+1]):
            val = abs(Atilde.data[j])
            if val > largest_row_entry[i]:
                largest_row_entry[i] = val

    largest_row_entry[largest_row_entry != 0] =\
        1.0 / largest_row_entry[largest_row_entry != 0]
    Atilde = Atilde.tocsr()
    Atilde = scale_rows(Atilde, largest_row_entry, copy=True)

    return Atilde
Beispiel #26
0
def general_setup_stage(ml, symmetry, candidate_iters, prepostsmoother, smooth,
                        eliminate_local, coarse_solver, work):
    """
    Computes additional candidates and improvements
    following Algorithm 4 in Brezina et al.

    Parameters
    ----------
    candidate_iters
        number of test relaxation iterations
    epsilon
        minimum acceptable relaxation convergence factor

    References
    ----------
    .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
       "Adaptive Smoothed Aggregation (alphaSA) Multigrid"
       SIAM Review Volume 47,  Issue 2  (2005)
       http://www.cs.umn.edu/~maclach/research/aSA2.pdf
    """
    def make_bridge(T):
        M, N = T.shape
        K = T.blocksize[0]
        bnnz = T.indptr[-1]
        # the K+1 represents the new dof introduced by the new candidate.  the
        # bridge 'T' ignores this new dof and just maps zeros there
        data = np.zeros((bnnz, K + 1, K), dtype=T.dtype)
        data[:, :-1, :] = T.data
        return bsr_matrix((data, T.indices, T.indptr),
                          shape=((K + 1) * int(M / K), N))

    def expand_candidates(B_old, nodesize):
        # insert a new dof that is always zero, to create NullDim+1 dofs per
        # node in B
        NullDim = B_old.shape[1]
        nnodes = int(B_old.shape[0] / nodesize)
        Bnew = np.zeros((nnodes, nodesize + 1, NullDim), dtype=B_old.dtype)
        Bnew[:, :-1, :] = B_old.reshape(nnodes, nodesize, NullDim)
        return Bnew.reshape(-1, NullDim)

    levels = ml.levels

    x = sp.rand(levels[0].A.shape[0], 1)
    if levels[0].A.dtype.name.startswith('complex'):
        x = x + 1.0j * sp.rand(levels[0].A.shape[0], 1)
    b = np.zeros_like(x)

    x = ml.solve(b,
                 x0=x,
                 tol=float(np.finfo(np.float).tiny),
                 maxiter=candidate_iters)
    work[:] += ml.operator_complexity(
    ) * ml.levels[0].A.nnz * candidate_iters * 2

    T0 = levels[0].T.copy()

    # TEST FOR CONVERGENCE HERE

    for i in range(len(ml.levels) - 2):
        # alpha-SA paper does local elimination here, but after talking
        # to Marian, its not clear that this helps things
        # fn, kwargs = unpack_arg(eliminate_local)
        # if fn == True:
        #    eliminate_local_candidates(x,levels[i].AggOp,levels[i].A,
        #    levels[i].T, **kwargs)

        # add candidate to B
        B = np.hstack((levels[i].B, x.reshape(-1, 1)))

        # construct Ptent
        T, R = fit_candidates(levels[i].AggOp, B)

        levels[i].T = T
        x = R[:, -1].reshape(-1, 1)

        # smooth P
        fn, kwargs = unpack_arg(smooth[i])
        if fn == 'jacobi':
            levels[i].P = jacobi_prolongation_smoother(levels[i].A, T,
                                                       levels[i].C, R,
                                                       **kwargs)
        elif fn == 'richardson':
            levels[i].P = richardson_prolongation_smoother(
                levels[i].A, T, **kwargs)
        elif fn == 'energy':
            levels[i].P = energy_prolongation_smoother(levels[i].A, T,
                                                       levels[i].C, R, None,
                                                       (False, {}), **kwargs)
            x = R[:, -1].reshape(-1, 1)
        elif fn is None:
            levels[i].P = T
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # construct R
        if symmetry == 'symmetric':  # R should reflect A's structure
            levels[i].R = levels[i].P.T.asformat(levels[i].P.format)
        elif symmetry == 'hermitian':
            levels[i].R = levels[i].P.H.asformat(levels[i].P.format)

        # construct coarse A
        levels[i + 1].A = levels[i].R * levels[i].A * levels[i].P

        # construct bridging P
        T_bridge = make_bridge(levels[i + 1].T)
        R_bridge = levels[i + 2].B

        # smooth bridging P
        fn, kwargs = unpack_arg(smooth[i + 1])
        if fn == 'jacobi':
            levels[i + 1].P = jacobi_prolongation_smoother(
                levels[i + 1].A, T_bridge, levels[i + 1].C, R_bridge, **kwargs)
        elif fn == 'richardson':
            levels[i + 1].P = richardson_prolongation_smoother(
                levels[i + 1].A, T_bridge, **kwargs)
        elif fn == 'energy':
            levels[i + 1].P = energy_prolongation_smoother(
                levels[i + 1].A, T_bridge, levels[i + 1].C, R_bridge, None,
                (False, {}), **kwargs)
        elif fn is None:
            levels[i + 1].P = T_bridge
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # construct the "bridging" R
        if symmetry == 'symmetric':  # R should reflect A's structure
            levels[i + 1].R = levels[i + 1].P.T.asformat(levels[i +
                                                                1].P.format)
        elif symmetry == 'hermitian':
            levels[i + 1].R = levels[i + 1].P.H.asformat(levels[i +
                                                                1].P.format)

        # run solver on candidate
        solver = multilevel_solver(levels[i + 1:], coarse_solver=coarse_solver)
        change_smoothers(solver,
                         presmoother=prepostsmoother,
                         postsmoother=prepostsmoother)
        x = solver.solve(np.zeros_like(x),
                         x0=x,
                         tol=float(np.finfo(np.float).tiny),
                         maxiter=candidate_iters)
        work[:] += 2 * solver.operator_complexity() * solver.levels[0].A.nnz *\
            candidate_iters*2

        # update values on next level
        levels[i + 1].B = R[:, :-1].copy()
        levels[i + 1].T = T_bridge

    # note that we only use the x from the second coarsest level
    fn, kwargs = unpack_arg(prepostsmoother)
    for lvl in reversed(levels[:-2]):
        x = lvl.P * x
        work[:] += lvl.A.nnz * candidate_iters * 2

        if fn == 'gauss_seidel':
            # only relax at nonzeros, so as not to mess up any locally dropped
            # candidates
            indices = np.ravel(x).nonzero()[0]
            gauss_seidel_indexed(lvl.A,
                                 x,
                                 np.zeros_like(x),
                                 indices,
                                 iterations=candidate_iters,
                                 sweep='symmetric')

        elif fn == 'gauss_seidel_ne':
            gauss_seidel_ne(lvl.A,
                            x,
                            np.zeros_like(x),
                            iterations=candidate_iters,
                            sweep='symmetric')

        elif fn == 'gauss_seidel_nr':
            gauss_seidel_nr(lvl.A,
                            x,
                            np.zeros_like(x),
                            iterations=candidate_iters,
                            sweep='symmetric')

        elif fn == 'jacobi':
            jacobi(lvl.A,
                   x,
                   np.zeros_like(x),
                   iterations=1,
                   omega=1.0 / rho_D_inv_A(lvl.A))

        elif fn == 'richardson':
            polynomial(lvl.A,
                       x,
                       np.zeros_like(x),
                       iterations=1,
                       coefficients=[1.0 / approximate_spectral_radius(lvl.A)])

        elif fn == 'gmres':
            x[:] = (gmres(lvl.A,
                          np.zeros_like(x),
                          x0=x,
                          maxiter=candidate_iters)[0]).reshape(x.shape)
        else:
            raise TypeError('Unrecognized smoother')

    # x will be dense again, so we have to drop locally again
    elim, elim_kwargs = unpack_arg(eliminate_local)
    if elim is True:
        x = x / norm(x, 'inf')
        eliminate_local_candidates(x, levels[0].AggOp, levels[0].A, T0,
                                   **elim_kwargs)

    return x.reshape(-1, 1)
Beispiel #27
0
def general_setup_stage(ml, symmetry, candidate_iters, prepostsmoother,
                        smooth, eliminate_local, coarse_solver, work):
    """Compute additional candidates and improvements following Algorithm 4 in Brezina et al.

    Parameters
    ----------
    candidate_iters
        number of test relaxation iterations
    epsilon
        minimum acceptable relaxation convergence factor

    References
    ----------
    .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
       "Adaptive Smoothed Aggregation (alphaSA) Multigrid"
       SIAM Review Volume 47,  Issue 2  (2005)
       http://www.cs.umn.edu/~maclach/research/aSA2.pdf

    """
    def make_bridge(T):
        M, N = T.shape
        K = T.blocksize[0]
        bnnz = T.indptr[-1]
        # the K+1 represents the new dof introduced by the new candidate.  the
        # bridge 'T' ignores this new dof and just maps zeros there
        data = np.zeros((bnnz, K+1, K), dtype=T.dtype)
        data[:, :-1, :] = T.data
        return bsr_matrix((data, T.indices, T.indptr),
                          shape=((K + 1) * int(M / K), N))

    def expand_candidates(B_old, nodesize):
        # insert a new dof that is always zero, to create NullDim+1 dofs per
        # node in B
        NullDim = B_old.shape[1]
        nnodes = int(B_old.shape[0] / nodesize)
        Bnew = np.zeros((nnodes, nodesize+1, NullDim), dtype=B_old.dtype)
        Bnew[:, :-1, :] = B_old.reshape(nnodes, nodesize, NullDim)
        return Bnew.reshape(-1, NullDim)

    levels = ml.levels

    x = sp.rand(levels[0].A.shape[0], 1)
    if levels[0].A.dtype.name.startswith('complex'):
        x = x + 1.0j*sp.rand(levels[0].A.shape[0], 1)
    b = np.zeros_like(x)

    x = ml.solve(b, x0=x, tol=float(np.finfo(np.float).tiny),
                 maxiter=candidate_iters)
    work[:] += ml.operator_complexity()*ml.levels[0].A.nnz*candidate_iters*2

    T0 = levels[0].T.copy()

    # TEST FOR CONVERGENCE HERE

    for i in range(len(ml.levels) - 2):
        # alpha-SA paper does local elimination here, but after talking
        # to Marian, its not clear that this helps things
        # fn, kwargs = unpack_arg(eliminate_local)
        # if fn == True:
        #    eliminate_local_candidates(x,levels[i].AggOp,levels[i].A,
        #    levels[i].T, **kwargs)

        # add candidate to B
        B = np.hstack((levels[i].B, x.reshape(-1, 1)))

        # construct Ptent
        T, R = fit_candidates(levels[i].AggOp, B)

        levels[i].T = T
        x = R[:, -1].reshape(-1, 1)

        # smooth P
        fn, kwargs = unpack_arg(smooth[i])
        if fn == 'jacobi':
            levels[i].P = jacobi_prolongation_smoother(levels[i].A, T,
                                                       levels[i].C, R,
                                                       **kwargs)
        elif fn == 'richardson':
            levels[i].P = richardson_prolongation_smoother(levels[i].A, T,
                                                           **kwargs)
        elif fn == 'energy':
            levels[i].P = energy_prolongation_smoother(levels[i].A, T,
                                                       levels[i].C, R, None,
                                                       (False, {}), **kwargs)
            x = R[:, -1].reshape(-1, 1)
        elif fn is None:
            levels[i].P = T
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # construct R
        if symmetry == 'symmetric':  # R should reflect A's structure
            levels[i].R = levels[i].P.T.asformat(levels[i].P.format)
        elif symmetry == 'hermitian':
            levels[i].R = levels[i].P.H.asformat(levels[i].P.format)

        # construct coarse A
        levels[i+1].A = levels[i].R * levels[i].A * levels[i].P

        # construct bridging P
        T_bridge = make_bridge(levels[i+1].T)
        R_bridge = levels[i+2].B

        # smooth bridging P
        fn, kwargs = unpack_arg(smooth[i+1])
        if fn == 'jacobi':
            levels[i+1].P = jacobi_prolongation_smoother(levels[i+1].A,
                                                         T_bridge,
                                                         levels[i+1].C,
                                                         R_bridge, **kwargs)
        elif fn == 'richardson':
            levels[i+1].P = richardson_prolongation_smoother(levels[i+1].A,
                                                             T_bridge,
                                                             **kwargs)
        elif fn == 'energy':
            levels[i+1].P = energy_prolongation_smoother(levels[i+1].A,
                                                         T_bridge,
                                                         levels[i+1].C,
                                                         R_bridge, None,
                                                         (False, {}), **kwargs)
        elif fn is None:
            levels[i+1].P = T_bridge
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # construct the "bridging" R
        if symmetry == 'symmetric':  # R should reflect A's structure
            levels[i+1].R = levels[i+1].P.T.asformat(levels[i+1].P.format)
        elif symmetry == 'hermitian':
            levels[i+1].R = levels[i+1].P.H.asformat(levels[i+1].P.format)

        # run solver on candidate
        solver = multilevel_solver(levels[i+1:], coarse_solver=coarse_solver)
        change_smoothers(solver, presmoother=prepostsmoother,
                         postsmoother=prepostsmoother)
        x = solver.solve(np.zeros_like(x), x0=x,
                         tol=float(np.finfo(np.float).tiny),
                         maxiter=candidate_iters)
        work[:] += 2 * solver.operator_complexity() * solver.levels[0].A.nnz *\
            candidate_iters*2

        # update values on next level
        levels[i+1].B = R[:, :-1].copy()
        levels[i+1].T = T_bridge

    # note that we only use the x from the second coarsest level
    fn, kwargs = unpack_arg(prepostsmoother)
    for lvl in reversed(levels[:-2]):
        x = lvl.P * x
        work[:] += lvl.A.nnz*candidate_iters*2

        if fn == 'gauss_seidel':
            # only relax at nonzeros, so as not to mess up any locally dropped
            # candidates
            indices = np.ravel(x).nonzero()[0]
            gauss_seidel_indexed(lvl.A, x, np.zeros_like(x), indices,
                                 iterations=candidate_iters, sweep='symmetric')

        elif fn == 'gauss_seidel_ne':
            gauss_seidel_ne(lvl.A, x, np.zeros_like(x),
                            iterations=candidate_iters, sweep='symmetric')

        elif fn == 'gauss_seidel_nr':
            gauss_seidel_nr(lvl.A, x, np.zeros_like(x),
                            iterations=candidate_iters, sweep='symmetric')

        elif fn == 'jacobi':
            jacobi(lvl.A, x, np.zeros_like(x), iterations=1,
                   omega=1.0 / rho_D_inv_A(lvl.A))

        elif fn == 'richardson':
            polynomial(lvl.A, x, np.zeros_like(x), iterations=1,
                       coefficients=[1.0/approximate_spectral_radius(lvl.A)])

        elif fn == 'gmres':
            x[:] = (gmres(lvl.A, np.zeros_like(x), x0=x,
                          maxiter=candidate_iters)[0]).reshape(x.shape)
        else:
            raise TypeError('Unrecognized smoother')

    # x will be dense again, so we have to drop locally again
    elim, elim_kwargs = unpack_arg(eliminate_local)
    if elim is True:
        x = x/norm(x, 'inf')
        eliminate_local_candidates(x, levels[0].AggOp, levels[0].A, T0,
                                   **elim_kwargs)

    return x.reshape(-1, 1)
Beispiel #28
0
    def test_approximate_spectral_radius(self):
        np.random.seed(3456)
        cases = []

        cases.append(np.array([[-4]]))

        cases.append(np.array([[2, 0], [0, 1]]))
        cases.append(np.array([[-2, 0], [0, 1]]))

        cases.append(np.array([[100, 0, 0], [0, 101, 0], [0, 0, 99]]))

        for i in range(1, 5):
            cases.append(np.random.rand(i, i))

        # method should be almost exact for small matrices
        for A in cases:
            A = A.astype(float)
            Asp = csr_matrix(A)

            [E, V] = linalg.eig(A)
            E = np.abs(E)
            largest_eig = (E == E.max()).nonzero()[0]
            expected_eig = E[largest_eig]
            expected_vec = V[:, largest_eig]

            assert_almost_equal(approximate_spectral_radius(A), expected_eig)
            assert_almost_equal(approximate_spectral_radius(Asp), expected_eig)
            vec = approximate_spectral_radius(A, return_vector=True)[1]
            minnorm = min(norm(expected_vec + vec), norm(expected_vec - vec))
            diff = minnorm / norm(expected_vec)
            assert_almost_equal(diff, 0.0, decimal=4)
            vec = approximate_spectral_radius(Asp, return_vector=True)[1]
            minnorm = min(norm(expected_vec + vec), norm(expected_vec - vec))
            diff = minnorm / norm(expected_vec)
            assert_almost_equal(diff, 0.0, decimal=4)

        # try symmetric matrices
        for A in cases:
            A = A + A.transpose()
            A = A.astype(float)
            Asp = csr_matrix(A)

            [E, V] = linalg.eig(A)
            E = np.abs(E)
            largest_eig = (E == E.max()).nonzero()[0]
            expected_eig = E[largest_eig]
            expected_vec = V[:, largest_eig]

            assert_almost_equal(approximate_spectral_radius(A), expected_eig)
            assert_almost_equal(approximate_spectral_radius(Asp), expected_eig)
            vec = approximate_spectral_radius(A, return_vector=True)[1]
            minnorm = min(norm(expected_vec + vec), norm(expected_vec - vec))
            diff = minnorm / norm(expected_vec)
            assert_almost_equal(diff, 0.0, decimal=4)
            vec = approximate_spectral_radius(Asp, return_vector=True)[1]
            minnorm = min(norm(expected_vec + vec), norm(expected_vec - vec))
            diff = minnorm / norm(expected_vec)
            assert_almost_equal(diff, 0.0, decimal=4)

        # test a larger matrix, and various parameter choices
        cases = []
        A1 = gallery.poisson((50, 50), format='csr')
        cases.append((A1, 7.99241331495))
        A2 = gallery.elasticity.linear_elasticity((32, 32), format='bsr')[0]
        cases.append((A2, 536549.922189))
        for A, expected in cases:
            # test that increasing maxiter increases accuracy
            ans1 = approximate_spectral_radius(A, tol=1e-16, maxiter=5,
                                               restart=0)
            del A.rho
            ans2 = approximate_spectral_radius(A, tol=1e-16, maxiter=15,
                                               restart=0)
            del A.rho
            assert_equal(abs(ans2 - expected) < 0.5*abs(ans1 - expected), True)
            # test that increasing restart increases accuracy
            ans1 = approximate_spectral_radius(A, tol=1e-16, maxiter=10,
                                               restart=0)
            del A.rho
            ans2 = approximate_spectral_radius(A, tol=1e-16, maxiter=10,
                                               restart=1)
            del A.rho
            assert_equal(abs(ans2 - expected) < 0.8*abs(ans1 - expected), True)
            # test tol
            ans1 = approximate_spectral_radius(A, tol=0.1, maxiter=15,
                                               restart=5)
            del A.rho
            assert_equal(abs(ans1 - expected)/abs(expected) < 0.1, True)
            ans2 = approximate_spectral_radius(A, tol=0.001, maxiter=15,
                                               restart=5)
            del A.rho
            assert_equal(abs(ans2 - expected)/abs(expected) < 0.001, True)
            assert_equal(abs(ans2 - expected) < 0.1*abs(ans1 - expected), True)
Beispiel #29
0
 def radius(eps):
     return approximate_spectral_radius(kron(Hc, W).dot(eps) - kron(Hc2, D).dot(eps ** 2)) - 1
Beispiel #30
0
def evolution_strength_of_connection(A,
                                     B=None,
                                     epsilon=4.0,
                                     k=2,
                                     proj_type="l2",
                                     weighting='diagonal',
                                     symmetrize_measure=True,
                                     cost=[0]):
    """
    Construct strength of connection matrix using an Evolution-based measure

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    B : {string, array}
        If B=None, then the near nullspace vector used is all ones.  If B is
        an (NxK) array, then B is taken to be the near nullspace vectors.
    epsilon : scalar
        Drop tolerance
    k : integer
        ODE num time steps, step size is assumed to be 1/rho(DinvA)
    proj_type : {'l2','D_A'}
        Define norm for constrained min prob, i.e. define projection
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the D-inverse 
        used to precondition A before "evolving" delta-functions.  The
        local option is the cheapest.

    Returns
    -------
    Atilde : {csr_matrix}
        Sparse matrix of strength values

    References
    ----------
    .. [1] Olson, L. N., Schroder, J., Tuminaro, R. S.,
       "A New Perspective on Strength Measures in Algebraic Multigrid",
       submitted, June, 2008.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import evolution_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = evolution_strength_of_connection(A,  np.ones((A.shape[0],1)))
    """
    # local imports for evolution_strength_of_connection
    from pyamg.util.utils import scale_rows, get_block_diag, scale_columns
    from pyamg.util.linalg import approximate_spectral_radius

    # ====================================================================
    # Check inputs
    if epsilon < 1.0:
        raise ValueError("expected epsilon > 1.0")
    if k <= 0:
        raise ValueError("number of time steps must be > 0")
    if proj_type not in ['l2', 'D_A']:
        raise ValueError("proj_type must be 'l2' or 'D_A'")
    if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
        raise TypeError("expected csr_matrix or bsr_matrix")

    # ====================================================================
    # Format A and B correctly.
    # B must be in mat format, this isn't a deep copy
    if B is None:
        Bmat = np.mat(np.ones((A.shape[0], 1), dtype=A.dtype))
    else:
        Bmat = np.mat(B)

    # Is matrix A CSR?
    if (not sparse.isspmatrix_csr(A)):
        numPDEs = A.blocksize[0]
        csrflag = False
    else:
        numPDEs = 1
        csrflag = True

    # Pre-process A.  We need A in CSR, to be devoid of explicit 0's, have
    # sorted indices and be scaled by D-inverse
    if weighting == 'block':
        Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
        Dinv = sparse.bsr_matrix(
            (Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0] + 1)),
            shape=A.shape)
        Dinv_A = (Dinv * A).tocsr()
        cost[0] += 1
    elif weighting == 'diagonal':
        D = A.diagonal()
        Dinv = get_diagonal(A, norm_eq=False, inv=True)
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)
        cost[0] += 1
    elif weighting == 'local':
        D = np.abs(A) * np.ones((A.shape[0], 1), dtype=A.dtype)
        Dinv = np.zeros_like(D)
        Dinv[D != 0] = 1.0 / np.abs(D[D != 0])
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)
        cost[0] += 1
    else:
        raise ValueError('Unrecognized weighting for Evolution measure')

    A = A.tocsr()
    A.eliminate_zeros()
    A.sort_indices()

    # Handle preliminaries for the algorithm
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    if weighting == 'diagonal' or weighting == 'block':
        # Get spectral radius of Dinv*A, scales the time step size for the ODE
        rho_DinvA = approximate_spectral_radius(Dinv_A)
        cost[0] += 15  # 15 lanczos iterations to approximate spectral radius
    else:
        # Using local weighting, no need for spectral radius
        rho_DinvA = 1.0

    # Calculate D_A for later use in the minimization problem
    if proj_type == "D_A":
        D = A.diagonal()
        D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr')
    else:
        D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)

    # Calculate (I - delta_t Dinv A)^k
    # We transpose the product, so that we can efficiently access
    # the columns in CSR format.  We want the columns (not rows) because
    # strength is based on the columns of (I - delta_t Dinv A)^k, i.e.,
    # relaxed delta functions

    # Calculate the number of time steps that can be done by squaring, and
    # the number of time steps that must be done incrementally
    nsquare = int(np.log2(k))
    ninc = k - 2**nsquare

    # Calculate one time step
    I = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
    Atilde = (I - (1.0 / rho_DinvA) * Dinv_A)
    Atilde = Atilde.T.tocsr()
    cost[0] += 1

    # Construct a sparsity mask for Atilde that will restrict Atilde^T to the
    # nonzero pattern of A, with the added constraint that row i of Atilde^T
    # retains only the nonzeros that are also in the same PDE as i.
    mask = A.copy()

    # Restrict to same PDE
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(np.arange(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # If the total number of time steps is a power of two, then there is
    # a very efficient computational short-cut.  Otherwise, we support
    # other numbers of time steps, through an inefficient algorithm.
    if ninc > 0:
        warn("The most efficient time stepping for the Evolution Strength\
             Method is done in powers of two.\nYou have chosen " + str(k) +
             " time steps.")

        JacobiStep = csr_matrix(Atilde, copy=True)
        # Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare
        for i in range(nsquare):
            cost[0] += mat_mat_complexity(Atilde, Atilde)
            Atilde = Atilde * Atilde

        for i in range(ninc):
            cost[0] += mat_mat_complexity(Atilde, JacobiStep)
            Atilde = Atilde * JacobiStep

        del JacobiStep

        # Apply mask to Atilde, zeros in mask have already been eliminated at
        # start of routine.
        mask.data[:] = 1.0
        Atilde = Atilde.multiply(mask)
        Atilde.eliminate_zeros()
        Atilde.sort_indices()
        cost[0] += Atilde.nnz / float(A.nnz)

    elif nsquare == 0:
        if numPDEs > 1:
            # Apply mask to Atilde, zeros in mask have already been eliminated
            # at start of routine.
            mask.data[:] = 1.0
            Atilde = Atilde.multiply(mask)
            Atilde.eliminate_zeros()
            Atilde.sort_indices()

    else:
        # Use computational short-cut for case (ninc == 0) and (nsquare > 0)
        # Calculate Atilde^k only at the sparsity pattern of mask.
        for i in range(nsquare - 1):
            cost[0] += mat_mat_complexity(Atilde, Atilde)
            Atilde = Atilde * Atilde

        # Call incomplete mat-mat mult
        AtildeCSC = Atilde.tocsc()
        AtildeCSC.sort_indices()
        mask.sort_indices()
        Atilde.sort_indices()
        amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices,
                                         Atilde.data, AtildeCSC.indptr,
                                         AtildeCSC.indices, AtildeCSC.data,
                                         mask.indptr, mask.indices, mask.data,
                                         dimen)
        cost[0] += mat_mat_complexity(Atilde, mask, incomplete=True) / float(
            A.nnz)

        del AtildeCSC, Atilde
        Atilde = mask
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    del Dinv, Dinv_A, mask

    # Calculate strength based on constrained min problem of
    # min( z - B*x ), such that
    # (B*x)|_i = z|_i, i.e. they are equal at point i
    # z = (I - (t/k) Dinv A)^k delta_i
    #
    # Strength is defined as the relative point-wise approx. error between
    # B*x and z.  We don't use the full z in this problem, only that part of
    # z that is in the sparsity pattern of A.
    #
    # Can use either the D-norm, and inner product, or l2-norm and inner-prod
    # to solve the constrained min problem.  Using D gives scale invariance.
    #
    # This is a quadratic minimization problem with a linear constraint, so
    # we can build a linear system and solve it to find the critical point,
    # i.e. minimum.
    #
    # We exploit a known shortcut for the case of NullDim = 1.  The shortcut is
    # mathematically equivalent to the longer constrained min. problem

    if NullDim == 1:
        # Use shortcut to solve constrained min problem if B is only a vector
        # Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) |
        # These ratios can be calculated by diagonal row and column scalings

        # Create necessary vectors for scaling Atilde
        #   Its not clear what to do where B == 0.  This is an
        #   an easy programming solution, that may make sense.
        Bmat_forscaling = np.ravel(Bmat)
        Bmat_forscaling[Bmat_forscaling == 0] = 1.0
        DAtilde = Atilde.diagonal()
        DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling
        cost[0] += Atilde.shape[0] / float(A.nnz)

        # Calculate best approximation, z_tilde, in span(B)
        #   Importantly, scale_rows and scale_columns leave zero entries
        #   in the matrix.  For previous implementations this was useful
        #   because we assume data and Atilde.data are the same length below
        data = Atilde.data.copy()
        Atilde.data[:] = 1.0
        Atilde = scale_rows(Atilde, DAtildeDivB)
        Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling))
        cost[0] += 2.0 * Atilde.nnz / float(A.nnz)

        # If angle in the complex plane between z and z_tilde is
        # greater than 90 degrees, then weak.  We can just look at the
        # dot product to determine if angle is greater than 90 degrees.
        angle = np.real(Atilde.data) * np.real(data) +\
            np.imag(Atilde.data) * np.imag(data)
        angle = angle < 0.0
        angle = np.array(angle, dtype=bool)
        cost[0] += Atilde.nnz / float(A.nnz)
        if Atilde.dtype is 'complex':
            cost[0] += Atilde.nnz / float(A.nnz)

        # Calculate Approximation ratio
        Atilde.data = Atilde.data / data
        cost[0] += Atilde.nnz / float(A.nnz)

        # If approximation ratio is less than tol, then weak connection
        weak_ratio = (np.abs(Atilde.data) < 1e-4)

        # Calculate Approximation error
        Atilde.data = abs(1.0 - Atilde.data)
        cost[0] += Atilde.nnz / float(A.nnz)

        # Set small ratios and large angles to weak
        Atilde.data[weak_ratio] = 0.0
        Atilde.data[angle] = 0.0

        # Set near perfect connections to 1e-4
        Atilde.eliminate_zeros()
        Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4

        del data, weak_ratio, angle

    else:
        # For use in computing local B_i^H*B, precompute the element-wise
        # multiply of each column of B with each other column.  We also scale
        # by 2.0 to account for BDB's eventual use in a constrained
        # minimization problem
        BDBCols = int(np.sum(np.arange(NullDim + 1)))
        BDB = np.zeros((dimen, BDBCols), dtype=A.dtype)
        counter = 0
        for i in range(NullDim):
            for j in range(i, NullDim):
                BDB[:, counter] = 2.0 *\
                    (np.conjugate(np.ravel(np.asarray(B[:, i]))) *
                        np.ravel(np.asarray(D_A * B[:, j])))
                counter = counter + 1
                cost[0] += B.shape[0] / float(A.nnz)

        # Choose tolerance for dropping "numerically zero" values later
        t = Atilde.dtype.char
        eps = np.finfo(np.float).eps
        feps = np.finfo(np.single).eps
        geps = np.finfo(np.longfloat).eps
        _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
        tol = {0: feps * 1e3, 1: eps * 1e6, 2: geps * 1e6}[_array_precision[t]]

        # Use constrained min problem to define strength.
        # This function is doing similar to NullDim=1 with more bad guys.
        # Complexity accounts for computing the block inverse, and
        #   hat{z_i} = B_i*x, hat{z_i} .* hat{z_i},
        #   hat{z_i} = hat{z_i} / z_i, and abs(1.0 - hat{z_i}).
        cost[0] += (Atilde.nnz * (3 + NullDim) +
                    (NullDim**3) * dimen) / float(A.nnz)
        amg_core.evolution_strength_helper(
            Atilde.data, Atilde.indptr, Atilde.indices, Atilde.shape[0],
            np.ravel(np.asarray(B)),
            np.ravel(np.asarray((D_A * np.conjugate(B)).T)),
            np.ravel(np.asarray(BDB)), BDBCols, NullDim, tol)

        Atilde.eliminate_zeros()

    # All of the strength values are real by this point, so ditch the complex
    # part
    Atilde.data = np.array(np.real(Atilde.data), dtype=float)

    # Apply drop tolerance
    if epsilon != np.inf:
        cost[0] += Atilde.nnz / float(A.nnz)
        amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr,
                                       Atilde.indices, Atilde.data)
        Atilde.eliminate_zeros()

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    I = sparse.eye(dimen, dimen, format="csr")
    I.data -= Atilde.diagonal()
    Atilde = Atilde + I
    cost[0] += Atilde.shape[0] / float(A.nnz)

    # If converted BSR to CSR, convert back and return amalgamated matrix,
    #   i.e. the sparsity structure of the blocks of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        n_blocks = Atilde.indices.shape[0]
        blocksize = Atilde.blocksize[0] * Atilde.blocksize[1]
        CSRdata = np.zeros((n_blocks, ))
        amg_core.min_blocks(n_blocks, blocksize,
                            np.ravel(np.asarray(Atilde.data)), CSRdata)
        # Atilde = sparse.csr_matrix((data, row, col), shape=(*,*))
        Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr),
                                   shape=(int(Atilde.shape[0] / numPDEs),
                                          int(Atilde.shape[1] / numPDEs)))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0 / Atilde.data
    cost[0] += Atilde.nnz / float(A.nnz)

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)
    cost[0] += Atilde.nnz / float(A.nnz)

    # Symmetrize
    if symmetrize_measure:
        Atilde = 0.5 * (Atilde + Atilde.T)
        cost[0] += Atilde.nnz / float(A.nnz)

    return Atilde
Beispiel #31
0
def energy_based_strength_of_connection(A, theta=0.0, k=2):
    """
    Compute a strength of connection matrix using an energy-based measure.

    Parameters
    ----------
    A : {sparse-matrix}
        matrix from which to generate strength of connection information
    theta : {float}
        Threshold parameter in [0,1]
    k : {int}
        Number of relaxation steps used to generate strength information

    Returns
    -------
    S : {csr_matrix}
        Matrix graph defining strong connections.  The sparsity pattern
        of S matches that of A.  For BSR matrices, S is a reduced strength
        of connection matrix that describes connections between supernodes.

    Notes
    -----
    This method relaxes with weighted-Jacobi in order to approximate the
    matrix inverse.  A normalized change of energy is then used to define
    point-wise strength of connection values.  Specifically, let v be the
    approximation to the i-th column of the inverse, then

    (S_ij)^2 = <v_j, v_j>_A / <v, v>_A,

    where v_j = v, such that entry j in v has been zeroed out.  As is common,
    larger values imply a stronger connection.

    Current implementation is a very slow pure-python implementation for
    experimental purposes, only.

    References
    ----------
    .. [1] Brannick, Brezina, MacLachlan, Manteuffel, McCormick.
       "An Energy-Based AMG Coarsening Strategy",
       Numerical Linear Algebra with Applications,
       vol. 13, pp. 133-148, 2006.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import energy_based_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = energy_based_strength_of_connection(A, 0.0)
    """

    if (theta < 0):
        raise ValueError('expected a positive theta')
    if not sparse.isspmatrix(A):
        raise ValueError('expected sparse matrix')
    if (k < 0):
        raise ValueError('expected positive number of steps')
    if not isinstance(k, int):
        raise ValueError('expected integer')

    if sparse.isspmatrix_bsr(A):
        bsr_flag = True
        numPDEs = A.blocksize[0]
        if A.blocksize[0] != A.blocksize[1]:
            raise ValueError('expected square blocks in BSR matrix A')
    else:
        bsr_flag = False

    # Convert A to csc and Atilde to csr
    if sparse.isspmatrix_csr(A):
        Atilde = A.copy()
        A = A.tocsc()
    else:
        A = A.tocsc()
        Atilde = A.copy()
        Atilde = Atilde.tocsr()

    # Calculate the weighted-Jacobi parameter
    from pyamg.util.linalg import approximate_spectral_radius
    D = A.diagonal()
    Dinv = 1.0 / D
    Dinv[D == 0] = 0.0
    Dinv = sparse.csc_matrix((Dinv, (np.arange(A.shape[0]),
                             np.arange(A.shape[1]))), shape=A.shape)
    DinvA = Dinv*A
    omega = 1.0/approximate_spectral_radius(DinvA)
    del DinvA

    # Approximate A-inverse with k steps of w-Jacobi and a zero initial guess
    S = sparse.csc_matrix(A.shape, dtype=A.dtype)  # empty matrix
    I = sparse.eye(A.shape[0], A.shape[1], format='csc')
    for i in range(k+1):
        S = S + omega*(Dinv*(I - A * S))

    # Calculate the strength entries in S column-wise, but only strength
    # values at the sparsity pattern of A
    for i in range(Atilde.shape[0]):
        v = np.mat(S[:, i].todense())
        Av = np.mat(A * v)
        denom = np.sqrt(np.conjugate(v).T * Av)
        # replace entries in row i with strength values
        for j in range(Atilde.indptr[i], Atilde.indptr[i+1]):
            col = Atilde.indices[j]
            vj = v[col].copy()
            v[col] = 0.0
            #   =  (||v_j||_A - ||v||_A) / ||v||_A
            val = np.sqrt(np.conjugate(v).T * A * v)/denom - 1.0

            # Negative values generally imply a weak connection
            if val > -0.01:
                Atilde.data[j] = abs(val)
            else:
                Atilde.data[j] = 0.0

            v[col] = vj

    # Apply drop tolerance
    Atilde = classical_strength_of_connection(Atilde, theta=theta)
    Atilde.eliminate_zeros()

    # Put ones on the diagonal
    Atilde = Atilde + I.tocsr()
    Atilde.sort_indices()

    # Amalgamate Atilde for the BSR case, using ones for all strong connections
    if bsr_flag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))
        nblocks = Atilde.indices.shape[0]
        uone = np.ones((nblocks,))
        Atilde = sparse.csr_matrix((uone, Atilde.indices, Atilde.indptr),
                                   shape=(
                                       Atilde.shape[0] / numPDEs,
                                       Atilde.shape[1] / numPDEs))

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)

    return Atilde