Ejemplo n.º 1
0
def evolution_strength_of_connection(A,
                                     B=None,
                                     epsilon=4.0,
                                     k=2,
                                     proj_type="l2",
                                     weighting='diagonal',
                                     symmetrize_measure=True,
                                     cost=[0]):
    """
    Construct strength of connection matrix using an Evolution-based measure

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    B : {string, array}
        If B=None, then the near nullspace vector used is all ones.  If B is
        an (NxK) array, then B is taken to be the near nullspace vectors.
    epsilon : scalar
        Drop tolerance
    k : integer
        ODE num time steps, step size is assumed to be 1/rho(DinvA)
    proj_type : {'l2','D_A'}
        Define norm for constrained min prob, i.e. define projection
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the D-inverse 
        used to precondition A before "evolving" delta-functions.  The
        local option is the cheapest.

    Returns
    -------
    Atilde : {csr_matrix}
        Sparse matrix of strength values

    References
    ----------
    .. [1] Olson, L. N., Schroder, J., Tuminaro, R. S.,
       "A New Perspective on Strength Measures in Algebraic Multigrid",
       submitted, June, 2008.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import evolution_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = evolution_strength_of_connection(A,  np.ones((A.shape[0],1)))
    """
    # local imports for evolution_strength_of_connection
    from pyamg.util.utils import scale_rows, get_block_diag, scale_columns
    from pyamg.util.linalg import approximate_spectral_radius

    # ====================================================================
    # Check inputs
    if epsilon < 1.0:
        raise ValueError("expected epsilon > 1.0")
    if k <= 0:
        raise ValueError("number of time steps must be > 0")
    if proj_type not in ['l2', 'D_A']:
        raise ValueError("proj_type must be 'l2' or 'D_A'")
    if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
        raise TypeError("expected csr_matrix or bsr_matrix")

    # ====================================================================
    # Format A and B correctly.
    # B must be in mat format, this isn't a deep copy
    if B is None:
        Bmat = np.mat(np.ones((A.shape[0], 1), dtype=A.dtype))
    else:
        Bmat = np.mat(B)

    # Is matrix A CSR?
    if (not sparse.isspmatrix_csr(A)):
        numPDEs = A.blocksize[0]
        csrflag = False
    else:
        numPDEs = 1
        csrflag = True

    # Pre-process A.  We need A in CSR, to be devoid of explicit 0's, have
    # sorted indices and be scaled by D-inverse
    if weighting == 'block':
        Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
        Dinv = sparse.bsr_matrix(
            (Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0] + 1)),
            shape=A.shape)
        Dinv_A = (Dinv * A).tocsr()
        cost[0] += 1
    elif weighting == 'diagonal':
        D = A.diagonal()
        Dinv = get_diagonal(A, norm_eq=False, inv=True)
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)
        cost[0] += 1
    elif weighting == 'local':
        D = np.abs(A) * np.ones((A.shape[0], 1), dtype=A.dtype)
        Dinv = np.zeros_like(D)
        Dinv[D != 0] = 1.0 / np.abs(D[D != 0])
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)
        cost[0] += 1
    else:
        raise ValueError('Unrecognized weighting for Evolution measure')

    A = A.tocsr()
    A.eliminate_zeros()
    A.sort_indices()

    # Handle preliminaries for the algorithm
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    if weighting == 'diagonal' or weighting == 'block':
        # Get spectral radius of Dinv*A, scales the time step size for the ODE
        rho_DinvA = approximate_spectral_radius(Dinv_A)
        cost[0] += 15  # 15 lanczos iterations to approximate spectral radius
    else:
        # Using local weighting, no need for spectral radius
        rho_DinvA = 1.0

    # Calculate D_A for later use in the minimization problem
    if proj_type == "D_A":
        D = A.diagonal()
        D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr')
    else:
        D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)

    # Calculate (I - delta_t Dinv A)^k
    # We transpose the product, so that we can efficiently access
    # the columns in CSR format.  We want the columns (not rows) because
    # strength is based on the columns of (I - delta_t Dinv A)^k, i.e.,
    # relaxed delta functions

    # Calculate the number of time steps that can be done by squaring, and
    # the number of time steps that must be done incrementally
    nsquare = int(np.log2(k))
    ninc = k - 2**nsquare

    # Calculate one time step
    I = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
    Atilde = (I - (1.0 / rho_DinvA) * Dinv_A)
    Atilde = Atilde.T.tocsr()
    cost[0] += 1

    # Construct a sparsity mask for Atilde that will restrict Atilde^T to the
    # nonzero pattern of A, with the added constraint that row i of Atilde^T
    # retains only the nonzeros that are also in the same PDE as i.
    mask = A.copy()

    # Restrict to same PDE
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(np.arange(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # If the total number of time steps is a power of two, then there is
    # a very efficient computational short-cut.  Otherwise, we support
    # other numbers of time steps, through an inefficient algorithm.
    if ninc > 0:
        warn("The most efficient time stepping for the Evolution Strength\
             Method is done in powers of two.\nYou have chosen " + str(k) +
             " time steps.")

        JacobiStep = csr_matrix(Atilde, copy=True)
        # Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare
        for i in range(nsquare):
            cost[0] += mat_mat_complexity(Atilde, Atilde)
            Atilde = Atilde * Atilde

        for i in range(ninc):
            cost[0] += mat_mat_complexity(Atilde, JacobiStep)
            Atilde = Atilde * JacobiStep

        del JacobiStep

        # Apply mask to Atilde, zeros in mask have already been eliminated at
        # start of routine.
        mask.data[:] = 1.0
        Atilde = Atilde.multiply(mask)
        Atilde.eliminate_zeros()
        Atilde.sort_indices()
        cost[0] += Atilde.nnz / float(A.nnz)

    elif nsquare == 0:
        if numPDEs > 1:
            # Apply mask to Atilde, zeros in mask have already been eliminated
            # at start of routine.
            mask.data[:] = 1.0
            Atilde = Atilde.multiply(mask)
            Atilde.eliminate_zeros()
            Atilde.sort_indices()

    else:
        # Use computational short-cut for case (ninc == 0) and (nsquare > 0)
        # Calculate Atilde^k only at the sparsity pattern of mask.
        for i in range(nsquare - 1):
            cost[0] += mat_mat_complexity(Atilde, Atilde)
            Atilde = Atilde * Atilde

        # Call incomplete mat-mat mult
        AtildeCSC = Atilde.tocsc()
        AtildeCSC.sort_indices()
        mask.sort_indices()
        Atilde.sort_indices()
        amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices,
                                         Atilde.data, AtildeCSC.indptr,
                                         AtildeCSC.indices, AtildeCSC.data,
                                         mask.indptr, mask.indices, mask.data,
                                         dimen)
        cost[0] += mat_mat_complexity(Atilde, mask, incomplete=True) / float(
            A.nnz)

        del AtildeCSC, Atilde
        Atilde = mask
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    del Dinv, Dinv_A, mask

    # Calculate strength based on constrained min problem of
    # min( z - B*x ), such that
    # (B*x)|_i = z|_i, i.e. they are equal at point i
    # z = (I - (t/k) Dinv A)^k delta_i
    #
    # Strength is defined as the relative point-wise approx. error between
    # B*x and z.  We don't use the full z in this problem, only that part of
    # z that is in the sparsity pattern of A.
    #
    # Can use either the D-norm, and inner product, or l2-norm and inner-prod
    # to solve the constrained min problem.  Using D gives scale invariance.
    #
    # This is a quadratic minimization problem with a linear constraint, so
    # we can build a linear system and solve it to find the critical point,
    # i.e. minimum.
    #
    # We exploit a known shortcut for the case of NullDim = 1.  The shortcut is
    # mathematically equivalent to the longer constrained min. problem

    if NullDim == 1:
        # Use shortcut to solve constrained min problem if B is only a vector
        # Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) |
        # These ratios can be calculated by diagonal row and column scalings

        # Create necessary vectors for scaling Atilde
        #   Its not clear what to do where B == 0.  This is an
        #   an easy programming solution, that may make sense.
        Bmat_forscaling = np.ravel(Bmat)
        Bmat_forscaling[Bmat_forscaling == 0] = 1.0
        DAtilde = Atilde.diagonal()
        DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling
        cost[0] += Atilde.shape[0] / float(A.nnz)

        # Calculate best approximation, z_tilde, in span(B)
        #   Importantly, scale_rows and scale_columns leave zero entries
        #   in the matrix.  For previous implementations this was useful
        #   because we assume data and Atilde.data are the same length below
        data = Atilde.data.copy()
        Atilde.data[:] = 1.0
        Atilde = scale_rows(Atilde, DAtildeDivB)
        Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling))
        cost[0] += 2.0 * Atilde.nnz / float(A.nnz)

        # If angle in the complex plane between z and z_tilde is
        # greater than 90 degrees, then weak.  We can just look at the
        # dot product to determine if angle is greater than 90 degrees.
        angle = np.real(Atilde.data) * np.real(data) +\
            np.imag(Atilde.data) * np.imag(data)
        angle = angle < 0.0
        angle = np.array(angle, dtype=bool)
        cost[0] += Atilde.nnz / float(A.nnz)
        if Atilde.dtype is 'complex':
            cost[0] += Atilde.nnz / float(A.nnz)

        # Calculate Approximation ratio
        Atilde.data = Atilde.data / data
        cost[0] += Atilde.nnz / float(A.nnz)

        # If approximation ratio is less than tol, then weak connection
        weak_ratio = (np.abs(Atilde.data) < 1e-4)

        # Calculate Approximation error
        Atilde.data = abs(1.0 - Atilde.data)
        cost[0] += Atilde.nnz / float(A.nnz)

        # Set small ratios and large angles to weak
        Atilde.data[weak_ratio] = 0.0
        Atilde.data[angle] = 0.0

        # Set near perfect connections to 1e-4
        Atilde.eliminate_zeros()
        Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4

        del data, weak_ratio, angle

    else:
        # For use in computing local B_i^H*B, precompute the element-wise
        # multiply of each column of B with each other column.  We also scale
        # by 2.0 to account for BDB's eventual use in a constrained
        # minimization problem
        BDBCols = int(np.sum(np.arange(NullDim + 1)))
        BDB = np.zeros((dimen, BDBCols), dtype=A.dtype)
        counter = 0
        for i in range(NullDim):
            for j in range(i, NullDim):
                BDB[:, counter] = 2.0 *\
                    (np.conjugate(np.ravel(np.asarray(B[:, i]))) *
                        np.ravel(np.asarray(D_A * B[:, j])))
                counter = counter + 1
                cost[0] += B.shape[0] / float(A.nnz)

        # Choose tolerance for dropping "numerically zero" values later
        t = Atilde.dtype.char
        eps = np.finfo(np.float).eps
        feps = np.finfo(np.single).eps
        geps = np.finfo(np.longfloat).eps
        _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
        tol = {0: feps * 1e3, 1: eps * 1e6, 2: geps * 1e6}[_array_precision[t]]

        # Use constrained min problem to define strength.
        # This function is doing similar to NullDim=1 with more bad guys.
        # Complexity accounts for computing the block inverse, and
        #   hat{z_i} = B_i*x, hat{z_i} .* hat{z_i},
        #   hat{z_i} = hat{z_i} / z_i, and abs(1.0 - hat{z_i}).
        cost[0] += (Atilde.nnz * (3 + NullDim) +
                    (NullDim**3) * dimen) / float(A.nnz)
        amg_core.evolution_strength_helper(
            Atilde.data, Atilde.indptr, Atilde.indices, Atilde.shape[0],
            np.ravel(np.asarray(B)),
            np.ravel(np.asarray((D_A * np.conjugate(B)).T)),
            np.ravel(np.asarray(BDB)), BDBCols, NullDim, tol)

        Atilde.eliminate_zeros()

    # All of the strength values are real by this point, so ditch the complex
    # part
    Atilde.data = np.array(np.real(Atilde.data), dtype=float)

    # Apply drop tolerance
    if epsilon != np.inf:
        cost[0] += Atilde.nnz / float(A.nnz)
        amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr,
                                       Atilde.indices, Atilde.data)
        Atilde.eliminate_zeros()

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    I = sparse.eye(dimen, dimen, format="csr")
    I.data -= Atilde.diagonal()
    Atilde = Atilde + I
    cost[0] += Atilde.shape[0] / float(A.nnz)

    # If converted BSR to CSR, convert back and return amalgamated matrix,
    #   i.e. the sparsity structure of the blocks of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        n_blocks = Atilde.indices.shape[0]
        blocksize = Atilde.blocksize[0] * Atilde.blocksize[1]
        CSRdata = np.zeros((n_blocks, ))
        amg_core.min_blocks(n_blocks, blocksize,
                            np.ravel(np.asarray(Atilde.data)), CSRdata)
        # Atilde = sparse.csr_matrix((data, row, col), shape=(*,*))
        Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr),
                                   shape=(int(Atilde.shape[0] / numPDEs),
                                          int(Atilde.shape[1] / numPDEs)))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0 / Atilde.data
    cost[0] += Atilde.nnz / float(A.nnz)

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)
    cost[0] += Atilde.nnz / float(A.nnz)

    # Symmetrize
    if symmetrize_measure:
        Atilde = 0.5 * (Atilde + Atilde.T)
        cost[0] += Atilde.nnz / float(A.nnz)

    return Atilde
Ejemplo n.º 2
0
def gmres_prolongation_smoothing(A,
                                 T,
                                 B,
                                 BtBinv,
                                 Sparsity_Pattern,
                                 maxiter,
                                 tol,
                                 weighting='local',
                                 Cpt_params=None,
                                 cost=[0.0]):
    '''
    Helper function for energy_prolongation_smoother(...).

    Use GMRES to smooth T by solving A T = 0, subject to nullspace
    and sparsity constraints.

    Parameters
    ----------

    A : {csr_matrix, bsr_matrix}
        SPD sparse NxN matrix
        Should be at least nonsymmetric or indefinite
    T : {bsr_matrix}
        Tentative prolongator, a NxM sparse matrix (M < N).
        This is initial guess for the equation A T = 0.
        Assumed that T B_c = B_f
    B : {array}
        Near-nullspace modes for coarse grid, i.e., B_c.
        Has shape (M,k) where k is the number of coarse candidate vectors.
    BtBinv : {array}
        3 dimensional array such that,
        BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
        to the neighborhood (in the matrix graph) of dof of i.
    Sparsity_Pattern : {csr_matrix, bsr_matrix}
        Sparse NxM matrix
        This is the sparsity pattern constraint to enforce on the
        eventual prolongator
    maxiter : int
        maximum number of iterations
    tol : float
        residual tolerance for A T = 0
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the diagonal
        preconditioning
    Cpt_params : {tuple}
        Tuple of the form (bool, dict).  If the Cpt_params[0] = False, then
        the standard SA prolongation smoothing is carried out.  If True, then
        dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
        the injection matrix for the Cpts, (2) I_F: an identity matrix
        for only the F-points (i.e. I, but with zero rows and columns for
        C-points) and I_C: the C-point analogue to I_F.

    Returns
    -------
    T : {bsr_matrix}
        Smoothed prolongator using GMRES to solve A T = 0,
        subject to the constraints, T B_c = B_f, and T has no nonzero
        outside of the sparsity pattern in Sparsity_Pattern.

    See Also
    --------
    The principal calling routine,
    pyamg.aggregation.smooth.energy_prolongation_smoother

    '''

    # For non-SPD system, apply GMRES with Diagonal Preconditioning

    # Preallocate space for new search directions
    uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
    AV = sparse.bsr_matrix(
        (uones, Sparsity_Pattern.indices, Sparsity_Pattern.indptr),
        shape=(Sparsity_Pattern.shape))

    # Preallocate for Givens Rotations, Hessenberg matrix and Krylov Space
    xtype = sparse.sputils.upcast(A.dtype, T.dtype, B.dtype)
    Q = []  # Givens Rotations
    V = []  # Krylov Space
    # vs = []     # vs store the pointers to each column of V for speed

    # Upper Hessenberg matrix, converted to upper tri with Givens Rots
    H = np.zeros((maxiter + 1, maxiter + 1), dtype=xtype)

    # GMRES will be run with diagonal preconditioning
    if weighting == 'diagonal':
        Dinv = get_diagonal(A, norm_eq=False, inv=True)
    elif weighting == 'block':
        Dinv = get_block_diag(A, blocksize=A.blocksize[0], inv_flag=True)
        Dinv = sparse.bsr_matrix(
            (Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0] + 1)),
            shape=A.shape)
    elif weighting == 'local':
        # Based on Gershgorin estimate
        D = np.abs(A) * np.ones((A.shape[0], 1), dtype=A.dtype)
        Dinv = np.zeros_like(D)
        Dinv[D != 0] = 1.0 / np.abs(D[D != 0])
        cost[0] += 1.0
    else:
        raise ValueError('weighting value is invalid')

    # Calculate initial residual
    #   Equivalent to R = -A*T;    R = R.multiply(Sparsity_Pattern)
    #   with the added constraint that R has an explicit 0 wherever
    #   R is 0 and Sparsity_Pattern is not
    uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
    R = sparse.bsr_matrix(
        (uones, Sparsity_Pattern.indices, Sparsity_Pattern.indptr),
        shape=(Sparsity_Pattern.shape))
    pyamg.amg_core.incomplete_mat_mult_bsr(
        A.indptr, A.indices, np.ravel(A.data), T.indptr, T.indices,
        np.ravel(T.data), R.indptr, R.indices, np.ravel(R.data),
        int(T.shape[0] / T.blocksize[0]), int(T.shape[1] / T.blocksize[1]),
        A.blocksize[0], A.blocksize[1], T.blocksize[1])
    R.data *= -1.0
    # T is block diagonal, using sparsity pattern of R with
    # incomplete=True significantly overestimates complexity.
    # More accurate to use full mat-mat with block diagonal T.
    cost[0] += mat_mat_complexity(A, T, incomplete=False) / float(A.nnz)

    # Apply diagonal preconditioner
    if weighting == 'local' or weighting == 'diagonal':
        R = scale_rows(R, Dinv)
    else:
        R = Dinv * R

    cost[0] += R.nnz / float(A.nnz)

    # Enforce R*B = 0
    temp_cost = [0.0]
    Satisfy_Constraints(R, B, BtBinv, cost=temp_cost)
    cost[0] += temp_cost[0] / float(A.nnz)

    if R.nnz == 0:
        print("Error in sa_energy_min(..).  Initial R no nonzeros on a level. \
               Returning tentative prolongator\n")
        return T

    # This is the RHS vector for the problem in the Krylov Space
    normr = np.sqrt((R.data.conjugate() * R.data).sum())
    g = np.zeros((maxiter + 1, ), dtype=xtype)
    g[0] = normr

    # First Krylov vector
    # V[0] = r/normr
    if normr > 0.0:
        V.append((1.0 / normr) * R)

    i = -1
    while i < maxiter - 1 and normr > tol:
        i = i + 1

        # Calculate new search direction
        #   Equivalent to:  AV = A*V;    AV = AV.multiply(Sparsity_Pattern)
        #   with the added constraint that explicit zeros are in AP wherever
        #   AP = 0 and Sparsity_Pattern does not
        AV.data[:] = 0.0
        pyamg.amg_core.incomplete_mat_mult_bsr(
            A.indptr, A.indices, np.ravel(A.data), V[i].indptr, V[i].indices,
            np.ravel(V[i].data), AV.indptr, AV.indices, np.ravel(AV.data),
            int(T.shape[0] / T.blocksize[0]), int(T.shape[1] / T.blocksize[1]),
            A.blocksize[0], A.blocksize[1], T.blocksize[1])
        cost[0] += mat_mat_complexity(A, AV, incomplete=True) / float(A.nnz)

        if weighting == 'local' or weighting == 'diagonal':
            AV = scale_rows(AV, Dinv)
        else:
            AV = Dinv * AV

        cost[0] += AV.nnz / float(A.nnz)

        # Enforce AV*B = 0
        temp_cost = [0.0]
        Satisfy_Constraints(AV, B, BtBinv, cost=temp_cost)
        V.append(AV.copy())
        cost[0] += temp_cost[0] / float(A.nnz)

        # Modified Gram-Schmidt
        for j in range(i + 1):
            # Frobenius inner-product
            H[j, i] = (V[j].conjugate().multiply(V[i + 1])).sum()
            V[i + 1] = V[i + 1] - H[j, i] * V[j]
            cost[0] += 2.0 * max(V[i + 1].nnz, V[j].nnz) / float(A.nnz)

        # Frobenius Norm
        H[i + 1, i] = np.sqrt(
            (V[i + 1].data.conjugate() * V[i + 1].data).sum())
        cost[0] += V[i + 1].nnz / float(A.nnz)

        # Check for breakdown
        if H[i + 1, i] != 0.0:
            V[i + 1] = (1.0 / H[i + 1, i]) * V[i + 1]
            cost[0] += V[i + 1].nnz / float(A.nnz)

        # Apply previous Givens rotations to H
        if i > 0:
            apply_givens(Q, H[:, i], i)

        # Calculate and apply next complex-valued Givens Rotation
        if H[i + 1, i] != 0:
            h1 = H[i, i]
            h2 = H[i + 1, i]
            h1_mag = np.abs(h1)
            h2_mag = np.abs(h2)
            if h1_mag < h2_mag:
                mu = h1 / h2
                tau = np.conjugate(mu) / np.abs(mu)
            else:
                mu = h2 / h1
                tau = mu / np.abs(mu)

            denom = np.sqrt(h1_mag**2 + h2_mag**2)
            c = h1_mag / denom
            s = h2_mag * tau / denom
            Qblock = np.array([[c, np.conjugate(s)], [-s, c]], dtype=xtype)
            Q.append(Qblock)

            # Apply Givens Rotation to g,
            #   the RHS for the linear system in the Krylov Subspace.
            g[i:i + 2] = sp.dot(Qblock, g[i:i + 2])

            # Apply effect of Givens Rotation to H
            H[i, i] = sp.dot(Qblock[0, :], H[i:i + 2, i])
            H[i + 1, i] = 0.0

        normr = np.abs(g[i + 1])
    # End while loop

    # Find best update to x in Krylov Space, V.  Solve (i x i) system.
    if i != -1:
        y = la.solve(H[0:i + 1, 0:i + 1], g[0:i + 1])
        for j in range(i + 1):
            T = T + y[j] * V[j]
            cost[0] += max(T.nnz, V[j].nnz) / float(A.nnz)

    # Ensure identity at C-pts
    if Cpt_params[0]:
        T = Cpt_params[1]['I_F'] * T + Cpt_params[1]['P_I']

    return T
Ejemplo n.º 3
0
def energy_prolongation_smoother(A,
                                 T,
                                 Atilde,
                                 B,
                                 Bf,
                                 Cpt_params,
                                 krylov='cg',
                                 maxiter=4,
                                 tol=1e-8,
                                 degree=1,
                                 weighting='local',
                                 prefilter={},
                                 postfilter={},
                                 cost=[0.0]):
    """Minimize the energy of the coarse basis functions (columns of T).  Both
    root-node and non-root-node style prolongation smoothing is available, see
    Cpt_params description below.

    Parameters
    ----------

    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    T : {bsr_matrix}
        Tentative prolongator, a NxM sparse matrix (M < N)
    Atilde : {csr_matrix}
        Strength of connection matrix
    B : {array}
        Near-nullspace modes for coarse grid.  Has shape (M,k) where
        k is the number of coarse candidate vectors.
    Bf : {array}
        Near-nullspace modes for fine grid.  Has shape (N,k) where
        k is the number of coarse candidate vectors.
    Cpt_params : {tuple}
        Tuple of the form (bool, dict).  If the Cpt_params[0] = False, then the
        standard SA prolongation smoothing is carried out.  If True, then
        root-node style prolongation smoothing is carried out.  The dict must
        be a dictionary of parameters containing, (1) P_I: P_I.T is the
        injection matrix for the Cpts, (2) I_F: an identity matrix for only the
        F-points (i.e. I, but with zero rows and columns for C-points) and I_C:
        the C-point analogue to I_F.  See Notes below for more information.
    krylov : {string}
        'cg' : for SPD systems.  Solve A T = 0 in a constraint space with CG
        'cgnr' : for nonsymmetric and/or indefinite systems.
                 Solve A T = 0 in a constraint space with CGNR
        'gmres' : for nonsymmetric and/or indefinite systems.
                 Solve A T = 0 in a constraint space with GMRES
    maxiter : integer
        Number of energy minimization steps to apply to the prolongator
    tol : {scalar}
        Minimization tolerance
    degree : {int}
        Generate sparsity pattern for P based on (Atilde^degree T)
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the
            diagonal preconditioning
        'local': Uses a local row-wise weight based on the Gershgorin estimate.
            Avoids any potential under-damping due to inaccurate spectral
            radius estimates.
        'block': If A is a BSR matrix, use a block diagonal inverse of A
        'diagonal': Use inverse of the diagonal of A
    prefilter : {dictionary} : Default {}
        Filters elements by row in sparsity pattern for P to reduce operator and
        setup complexity. If None or empty dictionary, no dropping in P is done.
        If postfilter has key 'k', then the largest 'k' entries  are kept in each
        row.  If postfilter has key 'theta', all entries such that
            P[i,j] < kwargs['theta']*max(abs(P[i,:]))
        are dropped.  If postfilter['k'] and postfiler['theta'] are present, then
        they are used in conjunction, with the union of their patterns used.
    postfilter : {dictionary} : Default {}
        Filters elements by row in smoothed P to reduce operator complexity. 
        Only supported if using the rootnode_solver. If None or empty dictionary,
        no dropping in P is done. If postfilter has key 'k', then the largest 'k'
        entries  are kept in each row.  If postfilter has key 'theta', all entries
        such that
            P[i,j] < kwargs['theta']*max(abs(P[i,:]))
        are dropped.  If postfilter['k'] and postfiler['theta'] are present, then
        they are used in conjunction, with the union of their patterns used.

    Returns
    -------
    T : {bsr_matrix}
        Smoothed prolongator

    Notes
    -----
    Only 'diagonal' weighting is supported for the CGNR method, because
    we are working with A^* A and not A.

    When Cpt_params[0] == True, root-node style prolongation smoothing
    is used to minimize the energy of columns of T.  Essentially, an
    identity block is maintained in T, corresponding to injection from
    the coarse-grid to the fine-grid root-nodes.  See [2] for more details,
    and see util.utils.get_Cpt_params for the helper function to generate
    Cpt_params.

    If Cpt_params[0] == False, the energy of columns of T are still
    minimized, but without maintaining the identity block.

    Examples
    --------
    >>> from pyamg.aggregation import energy_prolongation_smoother
    >>> from pyamg.gallery import poisson
    >>> from scipy.sparse import coo_matrix
    >>> import numpy as np
    >>> data = np.ones((6,))
    >>> row = np.arange(0,6)
    >>> col = np.kron([0,1],np.ones((3,)))
    >>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
    >>> print T.todense()
    [[ 1.  0.]
     [ 1.  0.]
     [ 1.  0.]
     [ 0.  1.]
     [ 0.  1.]
     [ 0.  1.]]
    >>> A = poisson((6,),format='csr')
    >>> B = np.ones((2,1),dtype=float)
    >>> P = energy_prolongation_smoother(A,T,A,B, None, (False,{}))
    >>> print P.todense()
    [[ 1.          0.        ]
     [ 1.          0.        ]
     [ 0.66666667  0.33333333]
     [ 0.33333333  0.66666667]
     [ 0.          1.        ]
     [ 0.          1.        ]]

    References
    ----------
    .. [1] Jan Mandel, Marian Brezina, and Petr Vanek
       "Energy Optimization of Algebraic Multigrid Bases"
       Computing 62, 205-228, 1999
       http://dx.doi.org/10.1007/s006070050022
    .. [2] Olson, L. and Schroder, J. and Tuminaro, R.,
       "A general interpolation strategy for algebraic
       multigrid using energy minimization", SIAM Journal
       on Scientific Computing (SISC), vol. 33, pp.
       966--991, 2011.
    """

    # Test Inputs
    if maxiter < 0:
        raise ValueError('maxiter must be > 0')
    if tol > 1:
        raise ValueError('tol must be <= 1')

    if sparse.isspmatrix_csr(A):
        A = A.tobsr(blocksize=(1, 1), copy=False)
    elif sparse.isspmatrix_bsr(A):
        pass
    else:
        raise TypeError("A must be csr_matrix or bsr_matrix")

    if sparse.isspmatrix_csr(T):
        T = T.tobsr(blocksize=(1, 1), copy=False)
    elif sparse.isspmatrix_bsr(T):
        pass
    else:
        raise TypeError("T must be csr_matrix or bsr_matrix")

    if T.blocksize[0] != A.blocksize[0]:
        raise ValueError("T row-blocksize should be the same as A blocksize")

    if B.shape[0] != T.shape[1]:
        raise ValueError("B is the candidates for the coarse grid. \
                            num_rows(b) = num_cols(T)")

    if min(T.nnz, A.nnz) == 0:
        return T

    if not sparse.isspmatrix_csr(Atilde):
        raise TypeError("Atilde must be csr_matrix")

    if ('theta' in prefilter) and (prefilter['theta'] == 0):
        prefilter.pop('theta', None)

    if ('theta' in postfilter) and (postfilter['theta'] == 0):
        postfilter.pop('theta', None)

    # Prepocess Atilde, the strength matrix
    if Atilde is None:
        Atilde = sparse.csr_matrix(
            (np.ones(len(A.indices)), A.indices.copy(), A.indptr.copy()),
            shape=(A.shape[0] / A.blocksize[0], A.shape[1] / A.blocksize[1]))

    # If Atilde has no nonzeros, then return T
    if min(T.nnz, A.nnz) == 0:
        return T

    # Expand allowed sparsity pattern for P through multiplication by Atilde
    if degree > 0:

        # Construct Sparsity_Pattern by multiplying with Atilde
        T.sort_indices()
        shape = (int(T.shape[0] / T.blocksize[0]),
                 int(T.shape[1] / T.blocksize[1]))
        Sparsity_Pattern = sparse.csr_matrix(
            (np.ones(T.indices.shape), T.indices, T.indptr), shape=shape)

        AtildeCopy = Atilde.copy()
        for i in range(degree):
            cost[0] += mat_mat_complexity(AtildeCopy,
                                          Sparsity_Pattern) / float(A.nnz)
            Sparsity_Pattern = AtildeCopy * Sparsity_Pattern

        # Optional filtering of sparsity pattern before smoothing
        #   - Complexity: two passes through T for theta-filter, a sort on
        #     each row for k-filter, adding matrices if both.
        if 'theta' in prefilter and 'k' in prefilter:
            temp_cost = [0.0]
            Sparsity_theta = filter_matrix_rows(Sparsity_Pattern,
                                                prefilter['theta'],
                                                cost=temp_cost)
            Sparsity_Pattern = truncate_rows(Sparsity_Pattern,
                                             prefilter['k'],
                                             cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
            # Union two sparsity patterns
            Sparsity_Pattern += Sparsity_theta
            cost[0] += Sparsity_Pattern.nnz / float(A.nnz)
        elif 'k' in prefilter:
            temp_cost = [0.0]
            Sparsity_Pattern = truncate_rows(Sparsity_Pattern,
                                             prefilter['k'],
                                             cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
        elif 'theta' in prefilter:
            temp_cost = [0.0]
            Sparsity_Pattern = filter_matrix_rows(Sparsity_Pattern,
                                                  prefilter['theta'],
                                                  cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
        elif len(prefilter) > 0:
            raise ValueError("Unrecognized prefilter option")

        # UnAmal returns a BSR matrix with 1's in the nonzero locations
        Sparsity_Pattern = UnAmal(Sparsity_Pattern, T.blocksize[0],
                                  T.blocksize[1])
        Sparsity_Pattern.sort_indices()

    else:
        # If degree is 0, just copy T for the sparsity pattern
        Sparsity_Pattern = T.copy()
        if 'theta' in prefilter and 'k' in prefilter:
            temp_cost = [0.0]
            Sparsity_theta = filter_matrix_rows(Sparsity_Pattern,
                                                prefilter['theta'],
                                                cost=temp_cost)
            Sparsity_Pattern = truncate_rows(Sparsity_Pattern,
                                             prefilter['k'],
                                             cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
            # Union two sparsity patterns
            Sparsity_Pattern += Sparsity_theta
            cost[0] += Sparsity_Pattern.nnz / float(A.nnz)
        elif 'k' in prefilter:
            temp_cost = [0.0]
            Sparsity_Pattern = truncate_rows(Sparsity_Pattern,
                                             prefilter['k'],
                                             cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
        elif 'theta' in prefilter:
            temp_cost = [0.0]
            Sparsity_Pattern = filter_matrix_rows(Sparsity_Pattern,
                                                  prefilter['theta'],
                                                  cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
        elif len(prefilter) > 0:
            raise ValueError("Unrecognized prefilter option")

        Sparsity_Pattern.data[:] = 1.0
        Sparsity_Pattern.sort_indices()

    # If using root nodes, enforce identity at C-points
    if Cpt_params[0]:
        Sparsity_Pattern = Cpt_params[1]['I_F'] * Sparsity_Pattern
        Sparsity_Pattern = Cpt_params[1]['P_I'] + Sparsity_Pattern
        cost[0] += Sparsity_Pattern.nnz / float(A.nnz)

    # Construct array of inv(Bi'Bi), where Bi is B restricted to row i's
    # sparsity pattern in Sparsity Pattern. This array is used multiple times
    # in Satisfy_Constraints(...).
    temp_cost = [0.0]
    BtBinv = compute_BtBinv(B, Sparsity_Pattern, cost=temp_cost)
    cost[0] += temp_cost[0] / float(A.nnz)

    # If using root nodes and B has more columns that A's blocksize, then
    # T must be updated so that T*B = Bfine.  Note, if this is a 'secondpass'
    # after dropping entries in P, then we must re-enforce the constraints
    if (Cpt_params[0] and
        (B.shape[1] > A.blocksize[0])) or ('secondpass' in postfilter):
        temp_cost = [0.0]
        T = filter_operator(T, Sparsity_Pattern, B, Bf, BtBinv, cost=temp_cost)
        cost[0] += temp_cost[0] / float(A.nnz)
        # Ensure identity at C-pts
        if Cpt_params[0]:
            T = Cpt_params[1]['I_F'] * T + Cpt_params[1]['P_I']

    # Iteratively minimize the energy of T subject to the constraints of
    # Sparsity_Pattern and maintaining T's effect on B, i.e. T*B =
    # (T+Update)*B, i.e. Update*B = 0
    if krylov == 'cg':
        T = cg_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern,
                                      maxiter, tol, weighting, Cpt_params,
                                      cost)
    elif krylov == 'cgnr':
        T = cgnr_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern,
                                        maxiter, tol, weighting, Cpt_params,
                                        cost)
    elif krylov == 'gmres':
        T = gmres_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern,
                                         maxiter, tol, weighting, Cpt_params,
                                         cost)

    T.eliminate_zeros()

    # Filter entries in P, only in the rootnode case, i.e., Cpt_params[0] == True
    if (len(postfilter) == 0) or ('secondpass'
                                  in postfilter) or (Cpt_params[0] is False):
        return T
    else:
        if 'theta' in postfilter and 'k' in postfilter:
            temp_cost = [0.0]
            T_theta = filter_matrix_rows(T,
                                         postfilter['theta'],
                                         cost=temp_cost)
            T_k = truncate_rows(T, postfilter['k'], cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)

            # Union two sparsity patterns
            T_theta.data[:] = 1.0
            T_k.data[:] = 1.0
            T_filter = T_theta + T_k
            T_filter.data[:] = 1.0
            T_filter = T.multiply(T_filter)

        elif 'k' in postfilter:
            temp_cost = [0.0]
            T_filter = truncate_rows(T, postfilter['k'], cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
        elif 'theta' in postfilter:
            temp_cost = [0.0]
            T_filter = filter_matrix_rows(T,
                                          postfilter['theta'],
                                          cost=temp_cost)
            cost[0] += temp_cost[0] / float(A.nnz)
        else:
            raise ValueError("Unrecognized postfilter option")

        # Re-smooth T_filter and re-fit the modes B into the span.
        # Note, we set 'secondpass', because this is the second
        # filtering pass
        T = energy_prolongation_smoother(A,
                                         T_filter,
                                         Atilde,
                                         B,
                                         Bf,
                                         Cpt_params,
                                         krylov=krylov,
                                         maxiter=1,
                                         tol=1e-8,
                                         degree=0,
                                         weighting=weighting,
                                         prefilter={},
                                         postfilter={'secondpass': True},
                                         cost=cost)

    return T
Ejemplo n.º 4
0
def cg_prolongation_smoothing(A,
                              T,
                              B,
                              BtBinv,
                              Sparsity_Pattern,
                              maxiter,
                              tol,
                              weighting='local',
                              Cpt_params=None,
                              cost=[0.0]):
    '''
    Helper function for energy_prolongation_smoother(...)

    Use CG to smooth T by solving A T = 0, subject to nullspace
    and sparsity constraints.

    Parameters
    ----------

    A : {csr_matrix, bsr_matrix}
        SPD sparse NxN matrix
    T : {bsr_matrix}
        Tentative prolongator, a NxM sparse matrix (M < N).
        This is initial guess for the equation A T = 0.
        Assumed that T B_c = B_f
    B : {array}
        Near-nullspace modes for coarse grid, i.e., B_c.
        Has shape (M,k) where k is the number of coarse candidate vectors.
    BtBinv : {array}
        3 dimensional array such that,
        BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
        to the neighborhood (in the matrix graph) of dof of i.
    Sparsity_Pattern : {csr_matrix, bsr_matrix}
        Sparse NxM matrix
        This is the sparsity pattern constraint to enforce on the
        eventual prolongator
    maxiter : int
        maximum number of iterations
    tol : float
        residual tolerance for A T = 0
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the diagonal
        preconditioning
    Cpt_params : {tuple}
        Tuple of the form (bool, dict).  If the Cpt_params[0] = False, then
        the standard SA prolongation smoothing is carried out.  If True, then
        dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
        the injection matrix for the Cpts, (2) I_F: an identity matrix
        for only the F-points (i.e. I, but with zero rows and columns for
        C-points) and I_C: the C-point analogue to I_F.

    Returns
    -------
    T : {bsr_matrix}
        Smoothed prolongator using conjugate gradients to solve A T = 0,
        subject to the constraints, T B_c = B_f, and T has no nonzero
        outside of the sparsity pattern in Sparsity_Pattern.

    See Also
    --------
    The principal calling routine,
    pyamg.aggregation.smooth.energy_prolongation_smoother

    '''

    # Preallocate
    AP = sparse.bsr_matrix(
        (np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype),
         Sparsity_Pattern.indices, Sparsity_Pattern.indptr),
        shape=(Sparsity_Pattern.shape))

    # CG will be run with diagonal preconditioning
    if weighting == 'diagonal':
        Dinv = get_diagonal(A, norm_eq=False, inv=True)
    elif weighting == 'block':
        Dinv = get_block_diag(A, blocksize=A.blocksize[0], inv_flag=True)
        Dinv = sparse.bsr_matrix(
            (Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0] + 1)),
            shape=A.shape)
    elif weighting == 'local':
        # Based on Gershgorin estimate
        D = np.abs(A) * np.ones((A.shape[0], 1), dtype=A.dtype)
        Dinv = np.zeros_like(D)
        Dinv[D != 0] = 1.0 / np.abs(D[D != 0])
        cost[0] += 1
    else:
        raise ValueError('weighting value is invalid')

    # Calculate initial residual
    #   Equivalent to R = -A*T;    R = R.multiply(Sparsity_Pattern)
    #   with the added constraint that R has an explicit 0 wherever
    #   R is 0 and Sparsity_Pattern is not
    uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
    R = sparse.bsr_matrix(
        (uones, Sparsity_Pattern.indices, Sparsity_Pattern.indptr),
        shape=(Sparsity_Pattern.shape))
    pyamg.amg_core.incomplete_mat_mult_bsr(
        A.indptr, A.indices, np.ravel(A.data), T.indptr, T.indices,
        np.ravel(T.data), R.indptr, R.indices, np.ravel(R.data),
        int(T.shape[0] / T.blocksize[0]), int(T.shape[1] / T.blocksize[1]),
        A.blocksize[0], A.blocksize[1], T.blocksize[1])
    R.data *= -1.0
    # T is block diagonal, using sparsity pattern of R with
    # incomplete=True significantly overestimates complexity.
    # More accurate to use full mat-mat with block diagonal T.
    cost[0] += mat_mat_complexity(A, T, incomplete=False) / float(A.nnz)

    # Enforce R*B = 0
    temp_cost = [0.0]
    Satisfy_Constraints(R, B, BtBinv, cost=temp_cost)
    cost[0] += temp_cost[0] / float(A.nnz)

    if R.nnz == 0:
        print("Error in sa_energy_min(..).  Initial R no nonzeros on a level. \
               Returning tentative prolongator\n")
        return T

    # Calculate Frobenius norm of the residual
    resid = R.nnz  # np.sqrt((R.data.conjugate()*R.data).sum())

    i = 0
    while i < maxiter and resid > tol:
        # Apply diagonal preconditioner
        if weighting == 'local' or weighting == 'diagonal':
            Z = scale_rows(R, Dinv)
        else:
            Z = Dinv * R

        cost[0] += R.nnz / float(A.nnz)

        # Frobenius inner-product of (R,Z) = sum( np.conjugate(rk).*zk)
        newsum = (R.conjugate().multiply(Z)).sum()
        cost[0] += Z.nnz / float(A.nnz)
        if newsum < tol:
            # met tolerance, so halt
            break

        # P is the search direction, not the prolongator, which is T.
        if (i == 0):
            P = Z
            oldsum = newsum
        else:
            beta = newsum / oldsum
            P = Z + beta * P
            cost[0] += max(Z.nnz, P.nnz) / float(A.nnz)
        oldsum = newsum

        # Calculate new direction and enforce constraints
        #   Equivalent to:  AP = A*P;    AP = AP.multiply(Sparsity_Pattern)
        #   with the added constraint that explicit zeros are in AP wherever
        #   AP = 0 and Sparsity_Pattern does not  !!!!
        AP.data[:] = 0.0
        pyamg.amg_core.incomplete_mat_mult_bsr(
            A.indptr, A.indices, np.ravel(A.data), P.indptr, P.indices,
            np.ravel(P.data), AP.indptr, AP.indices, np.ravel(AP.data),
            int(T.shape[0] / T.blocksize[0]), int(T.shape[1] / T.blocksize[1]),
            A.blocksize[0], A.blocksize[1], P.blocksize[1])
        cost[0] += mat_mat_complexity(A, AP, incomplete=True) / float(A.nnz)

        # Enforce AP*B = 0
        temp_cost = [0.0]
        Satisfy_Constraints(AP, B, BtBinv, cost=temp_cost)
        cost[0] += temp_cost[0] / float(A.nnz)

        # Frobenius inner-product of (P, AP)
        alpha = newsum / (P.conjugate().multiply(AP)).sum()
        cost[0] += max(P.nnz, AP.nnz) / float(A.nnz)

        # Update the prolongator, T
        T = T + alpha * P
        cost[0] += max(P.nnz, T.nnz) / float(A.nnz)

        # Ensure identity at C-pts
        if Cpt_params[0]:
            T = Cpt_params[1]['I_F'] * T + Cpt_params[1]['P_I']

        # Update residual
        R = R - alpha * AP
        cost[0] += max(R.nnz, AP.nnz) / float(A.nnz)

        # Calculate Frobenius norm of the residual
        resid = R.nnz  # np.sqrt((R.data.conjugate()*R.data).sum())
        i += 1

    return T
Ejemplo n.º 5
0
def cgnr_prolongation_smoothing(A,
                                T,
                                B,
                                BtBinv,
                                Sparsity_Pattern,
                                maxiter,
                                tol,
                                weighting='local',
                                Cpt_params=None,
                                cost=[0.0]):
    '''
    Helper function for energy_prolongation_smoother(...)

    Use CGNR to smooth T by solving A T = 0, subject to nullspace
    and sparsity constraints.

    Parameters
    ----------

    A : {csr_matrix, bsr_matrix}
        SPD sparse NxN matrix
        Should be at least nonsymmetric or indefinite
    T : {bsr_matrix}
        Tentative prolongator, a NxM sparse matrix (M < N).
        This is initial guess for the equation A T = 0.
        Assumed that T B_c = B_f
    B : {array}
        Near-nullspace modes for coarse grid, i.e., B_c.
        Has shape (M,k) where k is the number of coarse candidate vectors.
    BtBinv : {array}
        3 dimensional array such that,
        BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
        to the neighborhood (in the matrix graph) of dof of i.
    Sparsity_Pattern : {csr_matrix, bsr_matrix}
        Sparse NxM matrix
        This is the sparsity pattern constraint to enforce on the
        eventual prolongator
    maxiter : int
        maximum number of iterations
    tol : float
        residual tolerance for A T = 0
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the diagonal
        preconditioning
        IGNORED here, only 'diagonal' preconditioning is used.
    Cpt_params : {tuple}
        Tuple of the form (bool, dict).  If the Cpt_params[0] = False, then
        the standard SA prolongation smoothing is carried out.  If True, then
        dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
        the injection matrix for the Cpts, (2) I_F: an identity matrix
        for only the F-points (i.e. I, but with zero rows and columns for
        C-points) and I_C: the C-point analogue to I_F.

    Returns
    -------
    T : {bsr_matrix}
        Smoothed prolongator using CGNR to solve A T = 0,
        subject to the constraints, T B_c = B_f, and T has no nonzero
        outside of the sparsity pattern in Sparsity_Pattern.

    See Also
    --------
    The principal calling routine,
    pyamg.aggregation.smooth.energy_prolongation_smoother

    '''

    # For non-SPD system, apply CG on Normal Equations with Diagonal
    # Preconditioning (requires transpose)
    Ah = A.H
    Ah.sort_indices()

    # Preallocate
    uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
    AP = sparse.bsr_matrix(
        (uones, Sparsity_Pattern.indices, Sparsity_Pattern.indptr),
        shape=(Sparsity_Pattern.shape))

    # D for A.H*A
    Dinv = get_diagonal(A, norm_eq=1, inv=True)

    # Calculate initial residual
    #   Equivalent to R = -Ah*(A*T);    R = R.multiply(Sparsity_Pattern)
    #   with the added constraint that R has an explicit 0 wherever
    #   R is 0 and Sparsity_Pattern is not
    uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
    R = sparse.bsr_matrix(
        (uones, Sparsity_Pattern.indices, Sparsity_Pattern.indptr),
        shape=(Sparsity_Pattern.shape))
    AT = -1.0 * A * T
    cost[0] += T.nnz / float(T.shape[0])
    R.data[:] = 0.0
    pyamg.amg_core.incomplete_mat_mult_bsr(Ah.indptr, Ah.indices,
                                           np.ravel(Ah.data),
                                           AT.indptr, AT.indices,
                                           np.ravel(AT.data), R.indptr,
                                           R.indices, np.ravel(R.data),
                                           int(T.shape[0] / T.blocksize[0]),
                                           int(T.shape[1] / T.blocksize[1]),
                                           Ah.blocksize[0], Ah.blocksize[1],
                                           T.blocksize[1])
    # T is block diagonal, sparsity of AT should be well contained
    # in R. incomplete=True significantly overestimates complexity
    # with R. More accurate to use full mat-mat with block diagonal T.
    cost[0] += mat_mat_complexity(Ah, AT, incomplete=False) / float(A.nnz)

    # Enforce R*B = 0
    temp_cost = [0.0]
    Satisfy_Constraints(R, B, BtBinv, cost=temp_cost)
    cost[0] += temp_cost[0] / float(A.nnz)

    if R.nnz == 0:
        print("Error in sa_energy_min(..).  Initial R no nonzeros on a level. \
               Returning tentative prolongator\n")
        return T

    # Calculate Frobenius norm of the residual
    resid = R.nnz  # np.sqrt((R.data.conjugate()*R.data).sum())
    i = 0
    while i < maxiter and resid > tol:

        # Apply diagonal preconditioner
        Z = scale_rows(R, Dinv)
        cost[0] += R.nnz / float(A.nnz)

        # Frobenius innerproduct of (R,Z) = sum(rk.*zk)
        newsum = (R.conjugate().multiply(Z)).sum()
        cost[0] += R.nnz / float(A.nnz)
        if newsum < tol:
            # met tolerance, so halt
            break

        # P is the search direction, not the prolongator, which is T.
        if (i == 0):
            P = Z
            oldsum = newsum
        else:
            beta = newsum / oldsum
            P = Z + beta * P
            cost[0] += max(Z.nnz, P.nnz) / float(A.nnz)

        oldsum = newsum

        # Calculate new direction
        #  Equivalent to:  AP = Ah*(A*P);    AP = AP.multiply(Sparsity_Pattern)
        #  with the added constraint that explicit zeros are in AP wherever
        #  AP = 0 and Sparsity_Pattern does not
        AP_temp = A * P
        cost[0] += P.nnz / float(P.shape[0])
        AP.data[:] = 0.0
        pyamg.amg_core.incomplete_mat_mult_bsr(
            Ah.indptr, Ah.indices, np.ravel(Ah.data), AP_temp.indptr,
            AP_temp.indices, np.ravel(AP_temp.data), AP.indptr, AP.indices,
            np.ravel(AP.data), int(T.shape[0] / T.blocksize[0]),
            int(T.shape[1] / T.blocksize[1]), Ah.blocksize[0], Ah.blocksize[1],
            T.blocksize[1])
        cost[0] += mat_mat_complexity(A, AP, incomplete=True) / float(A.nnz)
        del AP_temp

        # Enforce AP*B = 0
        temp_cost = [0.0]
        Satisfy_Constraints(AP, B, BtBinv, cost=temp_cost)
        cost[0] += temp_cost[0] / float(A.nnz)

        # Frobenius inner-product of (P, AP)
        alpha = newsum / (P.conjugate().multiply(AP)).sum()
        cost[0] += max(P.nnz, AP.nnz) / float(A.nnz)

        # Update the prolongator, T
        T = T + alpha * P
        cost[0] += max(T.nnz, P.nnz) / float(A.nnz)

        # Ensure identity at C-pts
        if Cpt_params[0]:
            T = Cpt_params[1]['I_F'] * T + Cpt_params[1]['P_I']

        # Update residual
        R = R - alpha * AP
        cost[0] += max(R.nnz, AP.nnz) / float(A.nnz)

        # Calculate Frobenius norm of the residual
        resid = R.nnz  # np.sqrt((R.data.conjugate()*R.data).sum())
        i += 1

    return T
Ejemplo n.º 6
0
def extend_hierarchy(levels, strength, aggregate, smooth, improve_candidates,
                     diagonal_dominance=False, keep=True):
    """Service routine to implement the strength of connection, aggregation,
    tentative prolongation construction, and prolongation smoothing.  Called by
    smoothed_aggregation_solver.
    """

    A = levels[-1].A
    B = levels[-1].B
    if A.symmetry == "nonsymmetric":
        AH = A.H.asformat(A.format)
        BH = levels[-1].BH

    # Compute the strength-of-connection matrix C, where larger
    # C[i, j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength[len(levels)-1])
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        if 'B' in kwargs:
            C = evolution_strength_of_connection(A, **kwargs)
        else:
            C = evolution_strength_of_connection(A, B, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'predefined':
        C = kwargs['C'].tocsr()
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A.tocsr()
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))
    
    levels[-1].complexity['strength'] = kwargs['cost'][0]

    # Avoid coarsening diagonally dominant rows
    flag, kwargs = unpack_arg(diagonal_dominance)
    if flag:
        C = eliminate_diag_dom_nodes(A, C, **kwargs)
        levels[-1].complexity['diag_dom'] = kwargs['cost'][0]

    # Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A).
    # AggOp is a boolean matrix, where the sparsity pattern for the k-th column
    # denotes the fine-grid nodes agglomerated into k-th coarse-grid node.
    fn, kwargs = unpack_arg(aggregate[len(levels)-1])
    if fn == 'standard':
        AggOp, Cnodes = standard_aggregation(C, **kwargs)
    elif fn == 'naive':
        AggOp, Cnodes = naive_aggregation(C, **kwargs)
    elif fn == 'lloyd':
        AggOp, Cnodes = lloyd_aggregation(C, **kwargs)
    elif fn == 'predefined':
        AggOp = kwargs['AggOp'].tocsr()
        Cnodes = kwargs['Cnodes']
    else:
        raise ValueError('unrecognized aggregation method %s' % str(fn))
    
    levels[-1].complexity['aggregation'] = kwargs['cost'][0] * (float(C.nnz)/A.nnz)

    # Improve near nullspace candidates by relaxing on A B = 0
    temp_cost = [0.0]
    fn, kwargs = unpack_arg(improve_candidates[len(levels)-1],cost=False)
    if fn is not None:
        b = np.zeros((A.shape[0], 1), dtype=A.dtype)
        B = relaxation_as_linear_operator((fn, kwargs), A, b, temp_cost) * B
        levels[-1].B = B
        if A.symmetry == "nonsymmetric":
            BH = relaxation_as_linear_operator((fn, kwargs), AH, b, temp_cost) * BH
            levels[-1].BH = BH

    levels[-1].complexity['candidates'] = temp_cost[0] * B.shape[1]

    # Compute the tentative prolongator, T, which is a tentative interpolation
    # matrix from the coarse-grid to the fine-grid.  T exactly interpolates
    # B_fine[:, 0:blocksize(A)] = T B_coarse[:, 0:blocksize(A)].
    # Orthogonalization complexity ~ 2nk^2, k = blocksize(A).
    temp_cost=[0.0]
    T, dummy = fit_candidates(AggOp, B[:, 0:blocksize(A)], cost=temp_cost)
    del dummy
    if A.symmetry == "nonsymmetric":
        TH, dummyH = fit_candidates(AggOp, BH[:, 0:blocksize(A)], cost=temp_cost)
        del dummyH

    levels[-1].complexity['tentative'] = temp_cost[0]/A.nnz
    
    # Create necessary root node matrices
    Cpt_params = (True, get_Cpt_params(A, Cnodes, AggOp, T))
    T = scale_T(T, Cpt_params[1]['P_I'], Cpt_params[1]['I_F'])
    levels[-1].complexity['tentative'] += T.nnz / float(A.nnz)
    if A.symmetry == "nonsymmetric":
        TH = scale_T(TH, Cpt_params[1]['P_I'], Cpt_params[1]['I_F'])
        levels[-1].complexity['tentative'] += TH.nnz / float(A.nnz)

    # Set coarse grid near nullspace modes as injected fine grid near
    # null-space modes
    B = Cpt_params[1]['P_I'].T*levels[-1].B
    if A.symmetry == "nonsymmetric":
        BH = Cpt_params[1]['P_I'].T*levels[-1].BH

    # Smooth the tentative prolongator, so that it's accuracy is greatly
    # improved for algebraically smooth error.
    fn, kwargs = unpack_arg(smooth[len(levels)-1])
    if fn == 'energy':
        P = energy_prolongation_smoother(A, T, C, B, levels[-1].B,
                                         Cpt_params=Cpt_params, **kwargs)
    elif fn is None:
        P = T
    else:
        raise ValueError('unrecognized prolongation smoother \
                          method %s' % str(fn))

    levels[-1].complexity['smooth_P'] = kwargs['cost'][0]

    # Compute the restriction matrix R, which interpolates from the fine-grid
    # to the coarse-grid.  If A is nonsymmetric, then R must be constructed
    # based on A.H.  Otherwise R = P.H or P.T.
    symmetry = A.symmetry
    if symmetry == 'hermitian':
        R = P.H
    elif symmetry == 'symmetric':
        R = P.T
    elif symmetry == 'nonsymmetric':
        fn, kwargs = unpack_arg(smooth[len(levels)-1])
        if fn == 'energy':
            R = energy_prolongation_smoother(AH, TH, C, BH, levels[-1].BH,
                                             Cpt_params=Cpt_params, **kwargs)
            R = R.H
            levels[-1].complexity['smooth_R'] = kwargs['cost'][0]
        elif fn is None:
            R = T.H
        else:
            raise ValueError('unrecognized prolongation smoother \
                              method %s' % str(fn))

    if keep:
        levels[-1].C = C                        # strength of connection matrix
        levels[-1].AggOp = AggOp                # aggregation operator
        levels[-1].T = T                        # tentative prolongator
        levels[-1].Fpts = Cpt_params[1]['Fpts'] # Fpts
        levels[-1].P_I = Cpt_params[1]['P_I']   # Injection operator
        levels[-1].I_F = Cpt_params[1]['I_F']   # Identity on F-pts
        levels[-1].I_C = Cpt_params[1]['I_C']   # Identity on C-pts

    levels[-1].P = P                            # smoothed prolongator
    levels[-1].R = R                            # restriction operator
    levels[-1].Cpts = Cpt_params[1]['Cpts']     # Cpts (i.e., rootnodes)

    # Form coarse grid operator, get complexity
    levels[-1].complexity['RAP'] = mat_mat_complexity(R,A) / float(A.nnz)
    RA = R * A
    levels[-1].complexity['RAP'] += mat_mat_complexity(RA,P) / float(A.nnz)
    A = RA * P      # Galerkin operator, Ac = RAP
    A.symmetry = symmetry

    levels.append(multilevel_solver.level())
    levels[-1].A = A
    levels[-1].B = B                          # right near nullspace candidates

    if A.symmetry == "nonsymmetric":
        levels[-1].BH = BH                   # left near nullspace candidates
Ejemplo n.º 7
0
def extend_hierarchy(levels, strength, aggregate, smooth, improve_candidates,
                     diagonal_dominance=False, keep=True):
    """Service routine to implement the strength of connection, aggregation,
    tentative prolongation construction, and prolongation smoothing.  Called by
    smoothed_aggregation_solver.
    """

    A = levels[-1].A
    B = levels[-1].B
    if A.symmetry == "nonsymmetric":
        AH = A.H.asformat(A.format)
        BH = levels[-1].BH

    # Compute the strength-of-connection matrix C, where larger
    # C[i,j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength[len(levels)-1])
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        if 'B' in kwargs:
            C = evolution_strength_of_connection(A, **kwargs)
        else:
            C = evolution_strength_of_connection(A, B, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'predefined':
        C = kwargs['C'].tocsr()
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A.tocsr()
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))

    levels[-1].complexity['strength'] = kwargs['cost'][0]
 
    # Avoid coarsening diagonally dominant rows
    flag, kwargs = unpack_arg(diagonal_dominance)
    if flag:
        C = eliminate_diag_dom_nodes(A, C, **kwargs)
        levels[-1].complexity['diag_dom'] = kwargs['cost'][0]

    # Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A).
    # AggOp is a boolean matrix, where the sparsity pattern for the k-th column
    # denotes the fine-grid nodes agglomerated into k-th coarse-grid node.
    fn, kwargs = unpack_arg(aggregate[len(levels)-1])
    if fn == 'standard':
        AggOp = standard_aggregation(C, **kwargs)[0]
    elif fn == 'naive':
        AggOp = naive_aggregation(C, **kwargs)[0]
    elif fn == 'lloyd':
        AggOp = lloyd_aggregation(C, **kwargs)[0]
    elif fn == 'predefined':
        AggOp = kwargs['AggOp'].tocsr()
    else:
        raise ValueError('unrecognized aggregation method %s' % str(fn))

    levels[-1].complexity['aggregation'] = kwargs['cost'][0] * (float(C.nnz)/A.nnz)

    # Improve near nullspace candidates by relaxing on A B = 0
    temp_cost = [0.0]
    fn, kwargs = unpack_arg(improve_candidates[len(levels)-1], cost=False)
    if fn is not None:
        b = np.zeros((A.shape[0], 1), dtype=A.dtype)
        B = relaxation_as_linear_operator((fn, kwargs), A, b, temp_cost) * B
        levels[-1].B = B
        if A.symmetry == "nonsymmetric":
            BH = relaxation_as_linear_operator((fn, kwargs), AH, b, temp_cost) * BH
            levels[-1].BH = BH

    levels[-1].complexity['candidates'] = temp_cost[0] * B.shape[1]

    # Compute the tentative prolongator, T, which is a tentative interpolation
    # matrix from the coarse-grid to the fine-grid.  T exactly interpolates
    # B_fine = T B_coarse. Orthogonalization complexity ~ 2nk^2, k=B.shape[1].
    temp_cost=[0.0]
    T, B = fit_candidates(AggOp, B, cost=temp_cost)
    if A.symmetry == "nonsymmetric":
        TH, BH = fit_candidates(AggOp, BH, cost=temp_cost)

    levels[-1].complexity['tentative'] = temp_cost[0]/A.nnz

    # Smooth the tentative prolongator, so that it's accuracy is greatly
    # improved for algebraically smooth error.
    fn, kwargs = unpack_arg(smooth[len(levels)-1])
    if fn == 'jacobi':
        P = jacobi_prolongation_smoother(A, T, C, B, **kwargs)
    elif fn == 'richardson':
        P = richardson_prolongation_smoother(A, T, **kwargs)
    elif fn == 'energy':
        P = energy_prolongation_smoother(A, T, C, B, None, (False, {}),
                                         **kwargs)
    elif fn is None:
        P = T
    else:
        raise ValueError('unrecognized prolongation smoother method %s' %
                         str(fn))

    levels[-1].complexity['smooth_P'] = kwargs['cost'][0]

    # Compute the restriction matrix, R, which interpolates from the fine-grid
    # to the coarse-grid.  If A is nonsymmetric, then R must be constructed
    # based on A.H.  Otherwise R = P.H or P.T.
    symmetry = A.symmetry
    if symmetry == 'hermitian':
        R = P.H
    elif symmetry == 'symmetric':
        R = P.T
    elif symmetry == 'nonsymmetric':
        fn, kwargs = unpack_arg(smooth[len(levels)-1])
        if fn == 'jacobi':
            R = jacobi_prolongation_smoother(AH, TH, C, BH, **kwargs).H
        elif fn == 'richardson':
            R = richardson_prolongation_smoother(AH, TH, **kwargs).H
        elif fn == 'energy':
            R = energy_prolongation_smoother(AH, TH, C, BH, None, (False, {}),
                                             **kwargs)
            R = R.H
        elif fn is None:
            R = T.H
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))
        levels[-1].complexity['smooth_R'] = kwargs['cost'][0]

    if keep:
        levels[-1].C = C            # strength of connection matrix
        levels[-1].AggOp = AggOp    # aggregation operator
        levels[-1].T = T            # tentative prolongator

    levels[-1].P = P  # smoothed prolongator
    levels[-1].R = R  # restriction operator

    # Form coarse grid operator, get complexity
    levels[-1].complexity['RAP'] = mat_mat_complexity(R,A) / float(A.nnz)
    RA = R * A
    levels[-1].complexity['RAP'] += mat_mat_complexity(RA,P) / float(A.nnz)
    A = RA * P      # Galerkin operator, Ac = RAP
    A.symmetry = symmetry

    levels.append(multilevel_solver.level())
    levels[-1].A = A
    levels[-1].B = B           # right near nullspace candidates

    if A.symmetry == "nonsymmetric":
        levels[-1].BH = BH     # left near nullspace candidates
Ejemplo n.º 8
0
def extend_hierarchy(levels, strength, CF, interp, restrict, filter_operator,
                     coarse_grid_P, coarse_grid_R, keep):
    """ helper function for local methods """

    # Filter operator. Need to keep original matrix on fineest level for
    # computing residuals
    if (filter_operator is not None) and (filter_operator[1] != 0): 
        if len(levels) == 1:
            A = deepcopy(levels[-1].A)
        else:
            A = levels[-1].A
        filter_matrix_rows(A, filter_operator[1], diagonal=True, lump=filter_operator[0])
    else:
        A = levels[-1].A

    # Check if matrix was filtered to be diagonal --> coarsest grid
    if A.nnz == A.shape[0]:
        return 1

    # Zero initial complexities for strength, splitting and interpolation
    levels[-1].complexity['CF'] = 0.0
    levels[-1].complexity['strength'] = 0.0
    levels[-1].complexity['interpolate'] = 0.0

    # Compute the strength-of-connection matrix C, where larger
    # C[i,j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength)
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        C = evolution_strength_of_connection(A, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))
    levels[-1].complexity['strength'] += kwargs['cost'][0] * A.nnz / float(A.nnz)

    # Generate the C/F splitting
    fn, kwargs = unpack_arg(CF)
    if fn == 'RS':
        splitting = RS(C, **kwargs)
    elif fn == 'PMIS':
        splitting = PMIS(C, **kwargs)
    elif fn == 'PMISc':
        splitting = PMISc(C, **kwargs)
    elif fn == 'CLJP':
        splitting = CLJP(C, **kwargs)
    elif fn == 'CLJPc':
        splitting = CLJPc(C, **kwargs)
    elif fn == 'CR':
        splitting = CR(C, **kwargs)
    elif fn == 'weighted_matching':
        splitting, soc = weighted_matching(C, **kwargs)
        if soc is not None:
            C = soc
    else:
        raise ValueError('unknown C/F splitting method (%s)' % CF)
    levels[-1].complexity['CF'] += kwargs['cost'][0] * C.nnz / float(A.nnz)
    temp = np.sum(splitting)
    if (temp == len(splitting)) or (temp == 0):
        return 1

    # Generate the interpolation matrix that maps from the coarse-grid to the
    # fine-grid
    r_flag = False
    fn, kwargs = unpack_arg(interp)
    if fn == 'standard':
        P = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'inject':
        P = injection_interpolation(A, splitting, **kwargs)
    elif fn == 'neumann':
        P = neumann_ideal_interpolation(A, splitting, **kwargs)
    elif fn == 'scaledAfc':
        P = scaled_Afc_interpolation(A, splitting, **kwargs)
    elif fn == 'air':
        if isspmatrix_bsr(A):
            temp_A = bsr_matrix(A.T)
            P = local_AIR(temp_A, splitting, **kwargs)
            P = bsr_matrix(P.T)
        else:
            temp_A = csr_matrix(A.T)
            P = local_AIR(temp_A, splitting, **kwargs)
            P = csr_matrix(P.T)
    elif fn == 'restrict':
        r_flag = True
    else:
        raise ValueError('unknown interpolation method (%s)' % interp)
    levels[-1].complexity['interpolate'] += kwargs['cost'][0] * A.nnz / float(A.nnz)

    # Build restriction operator
    fn, kwargs = unpack_arg(restrict)
    if fn is None:
        R = P.T
    elif fn == 'air':
        R = local_AIR(A, splitting, **kwargs)
    elif fn == 'neumann':
        R = neumann_AIR(A, splitting, **kwargs)
    elif fn == 'one_point':         # Don't need A^T here
        temp_C = C.T.tocsr()
        R = one_point_interpolation(A, temp_C, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R = R.T.tobsr()
        else:
            R = R.T.tocsr()
    elif fn == 'inject':            # Don't need A^T or C^T here
        R = injection_interpolation(A, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R = R.T.tobsr()
        else:
            R = R.T.tocsr()
    elif fn == 'standard':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        else: 
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    elif fn == 'distance_two':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        else: 
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    elif fn == 'direct':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()        
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    else:
        raise ValueError('unknown restriction method (%s)' % restrict)

    # If set P = R^T
    if r_flag:
        P = R.T

    # Optional different interpolation for RAP
    fn, kwargs = unpack_arg(coarse_grid_P)
    if fn == 'standard':
        P_temp = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P_temp = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P_temp = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P_temp = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'inject':
        P_temp = injection_interpolation(A, splitting, **kwargs)
    elif fn == 'neumann':
        P_temp = neumann_ideal_interpolation(A, splitting, **kwargs)
    elif fn == 'air':
        if isspmatrix_bsr(A): 
            temp_A = bsr_matrix(A.T)
            P_temp = local_AIR(temp_A, splitting, **kwargs)
            P_temp = bsr_matrix(P_temp.T)
        else:
            temp_A = csr_matrix(A.T)
            P_temp = local_AIR(temp_A, splitting, **kwargs)
            P_temp = csr_matrix(P_temp.T)
    else:
        P_temp = P

    # Optional different restriction for RAP
    fn, kwargs = unpack_arg(coarse_grid_R)
    if fn == 'air':
        R_temp = local_AIR(A, splitting, **kwargs)
    elif fn == 'neumann':
        R_temp = neumann_AIR(A, splitting, **kwargs)
    elif fn == 'one_point':         # Don't need A^T here
        temp_C = C.T.tocsr()
        R_temp = one_point_interpolation(A, temp_C, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R_temp = R_temp.T.tobsr()
        else:
            R_temp = R_temp.T.tocsr()
    elif fn == 'inject':            # Don't need A^T or C^T here
        R_temp = injection_interpolation(A, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R_temp = R_temp.T.tobsr()
        else:
            R_temp = R_temp.T.tocsr()
    elif fn == 'standard':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tobsr()
        else: 
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tocsr()
    elif fn == 'distance_two':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tobsr()
        else: 
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tocsr()
    elif fn == 'direct':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tobsr()        
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tocsr()
    else:
        R_temp = R

    # Store relevant information for this level
    if keep:
        levels[-1].C = C              # strength of connection matrix

    levels[-1].P = P                  # prolongation operator
    levels[-1].R = R                  # restriction operator
    levels[-1].splitting = splitting  # C/F splitting

    # Form coarse grid operator, get complexity
    #levels[-1].complexity['RAP'] = mat_mat_complexity(R_temp,A) / float(A.nnz)
    #RA = R_temp * A
    #levels[-1].complexity['RAP'] += mat_mat_complexity(RA,P_temp) / float(A.nnz)
    #A = RA * P_temp
    
    # RL: RAP = R*(A*P)
    levels[-1].complexity['RAP'] = mat_mat_complexity(A, P_temp) / float(A.nnz)
    AP = A * P_temp
    levels[-1].complexity['RAP'] += mat_mat_complexity(R_temp, AP) / float(A.nnz)
    A = R_temp * AP
    

    # Make sure coarse-grid operator is in correct sparse format
    if (isspmatrix_csr(P) and (not isspmatrix_csr(A))):
        A = A.tocsr()
    elif (isspmatrix_bsr(P) and (not isspmatrix_bsr(A))):
        A = A.tobsr()

    A.eliminate_zeros()
    levels.append(multilevel_solver.level())
    levels[-1].A = A
    return 0
Ejemplo n.º 9
0
def extend_hierarchy(levels, strength, CF, interp, restrict, filter_operator,
                     coarse_grid_P, coarse_grid_R, keep):
    """ helper function for local methods """

    # Filter operator. Need to keep original matrix on fineest level for
    # computing residuals
    if (filter_operator is not None) and (filter_operator[1] != 0):
        if len(levels) == 1:
            A = deepcopy(levels[-1].A)
        else:
            A = levels[-1].A
        filter_matrix_rows(A,
                           filter_operator[1],
                           diagonal=True,
                           lump=filter_operator[0])
    else:
        A = levels[-1].A

    # Check if matrix was filtered to be diagonal --> coarsest grid
    if A.nnz == A.shape[0]:
        return 1

    # Zero initial complexities for strength, splitting and interpolation
    levels[-1].complexity['CF'] = 0.0
    levels[-1].complexity['strength'] = 0.0
    levels[-1].complexity['interpolate'] = 0.0

    # Compute the strength-of-connection matrix C, where larger
    # C[i,j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength)
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        C = evolution_strength_of_connection(A, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))
    levels[-1].complexity['strength'] += kwargs['cost'][0] * A.nnz / float(
        A.nnz)

    # Generate the C/F splitting
    fn, kwargs = unpack_arg(CF)
    if fn == 'RS':
        splitting = RS(C, **kwargs)
    elif fn == 'PMIS':
        splitting = PMIS(C, **kwargs)
    elif fn == 'PMISc':
        splitting = PMISc(C, **kwargs)
    elif fn == 'CLJP':
        splitting = CLJP(C, **kwargs)
    elif fn == 'CLJPc':
        splitting = CLJPc(C, **kwargs)
    elif fn == 'CR':
        splitting = CR(C, **kwargs)
    elif fn == 'weighted_matching':
        splitting, soc = weighted_matching(C, **kwargs)
        if soc is not None:
            C = soc
    else:
        raise ValueError('unknown C/F splitting method (%s)' % CF)
    levels[-1].complexity['CF'] += kwargs['cost'][0] * C.nnz / float(A.nnz)
    temp = np.sum(splitting)
    if (temp == len(splitting)) or (temp == 0):
        return 1

    # Generate the interpolation matrix that maps from the coarse-grid to the
    # fine-grid
    r_flag = False
    fn, kwargs = unpack_arg(interp)
    if fn == 'standard':
        P = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'inject':
        P = injection_interpolation(A, splitting, **kwargs)
    elif fn == 'neumann':
        P = neumann_ideal_interpolation(A, splitting, **kwargs)
    elif fn == 'scaledAfc':
        P = scaled_Afc_interpolation(A, splitting, **kwargs)
    elif fn == 'air':
        if isspmatrix_bsr(A):
            temp_A = bsr_matrix(A.T)
            P = local_AIR(temp_A, splitting, **kwargs)
            P = bsr_matrix(P.T)
        else:
            temp_A = csr_matrix(A.T)
            P = local_AIR(temp_A, splitting, **kwargs)
            P = csr_matrix(P.T)
    elif fn == 'restrict':
        r_flag = True
    else:
        raise ValueError('unknown interpolation method (%s)' % interp)
    levels[-1].complexity['interpolate'] += kwargs['cost'][0] * A.nnz / float(
        A.nnz)

    # Build restriction operator
    fn, kwargs = unpack_arg(restrict)
    if fn is None:
        R = P.T
    elif fn == 'air':
        R = local_AIR(A, splitting, **kwargs)
    elif fn == 'neumann':
        R = neumann_AIR(A, splitting, **kwargs)
    elif fn == 'one_point':  # Don't need A^T here
        temp_C = C.T.tocsr()
        R = one_point_interpolation(A, temp_C, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R = R.T.tobsr()
        else:
            R = R.T.tocsr()
    elif fn == 'inject':  # Don't need A^T or C^T here
        R = injection_interpolation(A, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R = R.T.tobsr()
        else:
            R = R.T.tocsr()
    elif fn == 'standard':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    elif fn == 'distance_two':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    elif fn == 'direct':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    else:
        raise ValueError('unknown restriction method (%s)' % restrict)

    # If set P = R^T
    if r_flag:
        P = R.T

    # Optional different interpolation for RAP
    fn, kwargs = unpack_arg(coarse_grid_P)
    if fn == 'standard':
        P_temp = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P_temp = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P_temp = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P_temp = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'inject':
        P_temp = injection_interpolation(A, splitting, **kwargs)
    elif fn == 'neumann':
        P_temp = neumann_ideal_interpolation(A, splitting, **kwargs)
    elif fn == 'air':
        if isspmatrix_bsr(A):
            temp_A = bsr_matrix(A.T)
            P_temp = local_AIR(temp_A, splitting, **kwargs)
            P_temp = bsr_matrix(P_temp.T)
        else:
            temp_A = csr_matrix(A.T)
            P_temp = local_AIR(temp_A, splitting, **kwargs)
            P_temp = csr_matrix(P_temp.T)
    else:
        P_temp = P

    # Optional different restriction for RAP
    fn, kwargs = unpack_arg(coarse_grid_R)
    if fn == 'air':
        R_temp = local_AIR(A, splitting, **kwargs)
    elif fn == 'neumann':
        R_temp = neumann_AIR(A, splitting, **kwargs)
    elif fn == 'one_point':  # Don't need A^T here
        temp_C = C.T.tocsr()
        R_temp = one_point_interpolation(A, temp_C, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R_temp = R_temp.T.tobsr()
        else:
            R_temp = R_temp.T.tocsr()
    elif fn == 'inject':  # Don't need A^T or C^T here
        R_temp = injection_interpolation(A, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R_temp = R_temp.T.tobsr()
        else:
            R_temp = R_temp.T.tocsr()
    elif fn == 'standard':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = standard_interpolation(temp_A, temp_C, splitting,
                                            **kwargs)
            R_temp = R_temp.T.tobsr()
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = standard_interpolation(temp_A, temp_C, splitting,
                                            **kwargs)
            R_temp = R_temp.T.tocsr()
    elif fn == 'distance_two':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = distance_two_interpolation(temp_A, temp_C, splitting,
                                                **kwargs)
            R_temp = R_temp.T.tobsr()
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = distance_two_interpolation(temp_A, temp_C, splitting,
                                                **kwargs)
            R_temp = R_temp.T.tocsr()
    elif fn == 'direct':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tobsr()
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tocsr()
    else:
        R_temp = R

    # Store relevant information for this level
    if keep:
        levels[-1].C = C  # strength of connection matrix

    levels[-1].P = P  # prolongation operator
    levels[-1].R = R  # restriction operator
    levels[-1].splitting = splitting  # C/F splitting

    # Form coarse grid operator, get complexity
    #levels[-1].complexity['RAP'] = mat_mat_complexity(R_temp,A) / float(A.nnz)
    #RA = R_temp * A
    #levels[-1].complexity['RAP'] += mat_mat_complexity(RA,P_temp) / float(A.nnz)
    #A = RA * P_temp

    # RL: RAP = R*(A*P)
    levels[-1].complexity['RAP'] = mat_mat_complexity(A, P_temp) / float(A.nnz)
    AP = A * P_temp
    levels[-1].complexity['RAP'] += mat_mat_complexity(R_temp, AP) / float(
        A.nnz)
    A = R_temp * AP

    # Make sure coarse-grid operator is in correct sparse format
    if (isspmatrix_csr(P) and (not isspmatrix_csr(A))):
        A = A.tocsr()
    elif (isspmatrix_bsr(P) and (not isspmatrix_bsr(A))):
        A = A.tobsr()

    A.eliminate_zeros()
    levels.append(multilevel_solver.level())
    levels[-1].A = A
    return 0
Ejemplo n.º 10
0
def extend_hierarchy(levels, strength, CF, interpolation, restriction, keep):
    """ helper function for local methods """

    A = levels[-1].A

    # Compute the strength-of-connection matrix C, where larger
    # C[i,j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength)
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        C = evolution_strength_of_connection(A, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))

    levels[-1].complexity['strength'] = kwargs['cost'][0]

    # Generate the C/F splitting
    fn, kwargs = unpack_arg(CF)
    if fn == 'RS':
        splitting = split.RS(C, **kwargs)
    elif fn == 'PMIS':
        splitting = split.PMIS(C, **kwargs)
    elif fn == 'PMISc':
        splitting = split.PMISc(C, **kwargs)
    elif fn == 'CLJP':
        splitting = split.CLJP(C, **kwargs)
    elif fn == 'CLJPc':
        splitting = split.CLJPc(C, **kwargs)
    elif fn == 'CR':
        splitting = CR(C, **kwargs)
    else:
        raise ValueError('unknown C/F splitting method (%s)' % CF)

    levels[-1].complexity['CF'] = kwargs['cost'][0]

    # Generate the interpolation matrix that maps from the coarse-grid to the
    # fine-grid
    fn, kwargs = unpack_arg(interpolation)
    if fn == 'standard':
        P = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'injection':
        P = injection_interpolation(A, splitting, **kwargs)
    else:
        raise ValueError('unknown interpolation method (%s)' % interpolation)
    levels[-1].complexity['interpolate'] = kwargs['cost'][0]

    # Generate the restriction matrix that maps from the fine-grid to the
    # coarse-grid. Must make sure transpose matrices remain in CSR or BSR
    fn, kwargs = unpack_arg(restriction)
    if isspmatrix_csr(A):
        if restriction == 'galerkin':
            R = P.T.tocsr()
        elif fn == 'standard':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'distance_two':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'direct':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'one_point':         # Don't need A^T here
            temp_C = C.T.tocsr()
            R = one_point_interpolation(A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'injection':         # Don't need A^T or C^T here
            R = injection_interpolation(A, splitting, **kwargs)
            R = R.T.tocsr()
        else:
            raise ValueError('unknown interpolation method (%s)' % interpolation)
    else: 
        if restriction == 'galerkin':
            R = P.T.tobsr()
        elif fn == 'standard':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'distance_two':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'direct':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'one_point':         # Don't need A^T here
            temp_C = C.T.tocsr()
            R = one_point_interpolation(A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'injection':         # Don't need A^T or C^T here
            R = injection_interpolation(A, splitting, **kwargs)
            R = R.T.tobsr()
        else:
            raise ValueError('unknown interpolation method (%s)' % interpolation)
    
    levels[-1].complexity['restriction'] = kwargs['cost'][0]

    # Store relevant information for this level
    if keep:
        levels[-1].C = C                  # strength of connection matrix

    levels[-1].P = P                  # prolongation operator
    levels[-1].R = R                  # restriction operator
    levels[-1].splitting = splitting  # C/F splitting

    # Form coarse grid operator, get complexity
    levels[-1].complexity['RAP'] = mat_mat_complexity(R,A) / float(A.nnz)
    RA = R * A
    levels[-1].complexity['RAP'] += mat_mat_complexity(RA,P) / float(A.nnz)
    A = RA * P      # Galerkin operator, Ac = RAP

    # Make sure coarse-grid operator is in correct sparse format
    if (isspmatrix_csr(P) and (not isspmatrix_csr(A))):
        A = A.tocsr()
    elif (isspmatrix_bsr(P) and (not isspmatrix_bsr(A))):
        A = A.tobsr()

    # Form next level through Galerkin product
    levels.append(multilevel_solver.level())
    levels[-1].A = A
Ejemplo n.º 11
0
def extend_hierarchy(levels, strength, CF, interpolation, restriction, keep):
    """ helper function for local methods """

    A = levels[-1].A

    # Compute the strength-of-connection matrix C, where larger
    # C[i,j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength)
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        C = evolution_strength_of_connection(A, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))

    levels[-1].complexity['strength'] = kwargs['cost'][0]

    # Generate the C/F splitting
    fn, kwargs = unpack_arg(CF)
    if fn == 'RS':
        splitting = split.RS(C, **kwargs)
    elif fn == 'PMIS':
        splitting = split.PMIS(C, **kwargs)
    elif fn == 'PMISc':
        splitting = split.PMISc(C, **kwargs)
    elif fn == 'CLJP':
        splitting = split.CLJP(C, **kwargs)
    elif fn == 'CLJPc':
        splitting = split.CLJPc(C, **kwargs)
    elif fn == 'CR':
        splitting = CR(C, **kwargs)
    else:
        raise ValueError('unknown C/F splitting method (%s)' % CF)

    levels[-1].complexity['CF'] = kwargs['cost'][0]

    # Generate the interpolation matrix that maps from the coarse-grid to the
    # fine-grid
    fn, kwargs = unpack_arg(interpolation)
    if fn == 'standard':
        P = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'injection':
        P = injection_interpolation(A, splitting, **kwargs)
    else:
        raise ValueError('unknown interpolation method (%s)' % interpolation)
    levels[-1].complexity['interpolate'] = kwargs['cost'][0]

    # Generate the restriction matrix that maps from the fine-grid to the
    # coarse-grid. Must make sure transpose matrices remain in CSR or BSR
    fn, kwargs = unpack_arg(restriction)
    if isspmatrix_csr(A):
        if restriction == 'galerkin':
            R = P.T.tocsr()
        elif fn == 'standard':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'distance_two':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'direct':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'one_point':  # Don't need A^T here
            temp_C = C.T.tocsr()
            R = one_point_interpolation(A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'injection':  # Don't need A^T or C^T here
            R = injection_interpolation(A, splitting, **kwargs)
            R = R.T.tocsr()
        else:
            raise ValueError('unknown interpolation method (%s)' %
                             interpolation)
    else:
        if restriction == 'galerkin':
            R = P.T.tobsr()
        elif fn == 'standard':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'distance_two':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'direct':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'one_point':  # Don't need A^T here
            temp_C = C.T.tocsr()
            R = one_point_interpolation(A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'injection':  # Don't need A^T or C^T here
            R = injection_interpolation(A, splitting, **kwargs)
            R = R.T.tobsr()
        else:
            raise ValueError('unknown interpolation method (%s)' %
                             interpolation)

    levels[-1].complexity['restriction'] = kwargs['cost'][0]

    # Store relevant information for this level
    if keep:
        levels[-1].C = C  # strength of connection matrix

    levels[-1].P = P  # prolongation operator
    levels[-1].R = R  # restriction operator
    levels[-1].splitting = splitting  # C/F splitting

    # Form coarse grid operator, get complexity
    levels[-1].complexity['RAP'] = mat_mat_complexity(R, A) / float(A.nnz)
    RA = R * A
    levels[-1].complexity['RAP'] += mat_mat_complexity(RA, P) / float(A.nnz)
    A = RA * P  # Galerkin operator, Ac = RAP

    # Make sure coarse-grid operator is in correct sparse format
    if (isspmatrix_csr(P) and (not isspmatrix_csr(A))):
        A = A.tocsr()
    elif (isspmatrix_bsr(P) and (not isspmatrix_bsr(A))):
        A = A.tobsr()

    # Form next level through Galerkin product
    levels.append(multilevel_solver.level())
    levels[-1].A = A
Ejemplo n.º 12
0
def evolution_strength_of_connection(A, B=None, epsilon=4.0, k=2,
                                     proj_type="l2", weighting='diagonal',
                                     symmetrize_measure=True, cost=[0]):
    """
    Construct strength of connection matrix using an Evolution-based measure

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    B : {string, array}
        If B=None, then the near nullspace vector used is all ones.  If B is
        an (NxK) array, then B is taken to be the near nullspace vectors.
    epsilon : scalar
        Drop tolerance
    k : integer
        ODE num time steps, step size is assumed to be 1/rho(DinvA)
    proj_type : {'l2','D_A'}
        Define norm for constrained min prob, i.e. define projection
    weighting : {string}
        'block', 'diagonal' or 'local' construction of the D-inverse 
        used to precondition A before "evolving" delta-functions.  The
        local option is the cheapest.

    Returns
    -------
    Atilde : {csr_matrix}
        Sparse matrix of strength values

    References
    ----------
    .. [1] Olson, L. N., Schroder, J., Tuminaro, R. S.,
       "A New Perspective on Strength Measures in Algebraic Multigrid",
       submitted, June, 2008.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import evolution_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = evolution_strength_of_connection(A,  np.ones((A.shape[0],1)))
    """
    # local imports for evolution_strength_of_connection
    from pyamg.util.utils import scale_rows, get_block_diag, scale_columns
    from pyamg.util.linalg import approximate_spectral_radius

    # ====================================================================
    # Check inputs
    if epsilon < 1.0:
        raise ValueError("expected epsilon > 1.0")
    if k <= 0:
        raise ValueError("number of time steps must be > 0")
    if proj_type not in ['l2', 'D_A']:
        raise ValueError("proj_type must be 'l2' or 'D_A'")
    if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
        raise TypeError("expected csr_matrix or bsr_matrix")

    # ====================================================================
    # Format A and B correctly.
    # B must be in mat format, this isn't a deep copy
    if B is None:
        Bmat = np.mat(np.ones((A.shape[0], 1), dtype=A.dtype))
    else:
        Bmat = np.mat(B)
    
    # Is matrix A CSR?
    if (not sparse.isspmatrix_csr(A)):
        numPDEs = A.blocksize[0]
        csrflag = False
    else:
        numPDEs = 1
        csrflag = True

    # Pre-process A.  We need A in CSR, to be devoid of explicit 0's, have
    # sorted indices and be scaled by D-inverse
    if weighting == 'block': 
        Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
        Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]),
                                 np.arange(Dinv.shape[0] + 1)),
                                 shape=A.shape)
        Dinv_A = (Dinv * A).tocsr()
        cost[0] += 1
    elif weighting == 'diagonal':
        D = A.diagonal()
        Dinv = get_diagonal(A, norm_eq=False, inv=True)
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)
        cost[0] += 1
    elif weighting == 'local':
        D = np.abs(A)*np.ones((A.shape[0], 1), dtype=A.dtype)
        Dinv = np.zeros_like(D)
        Dinv[D != 0] = 1.0 / np.abs(D[D != 0])
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)
        cost[0] += 1
    else:
        raise ValueError('Unrecognized weighting for Evolution measure')

    A = A.tocsr()
    A.eliminate_zeros()
    A.sort_indices()

    # Handle preliminaries for the algorithm
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    if weighting == 'diagonal' or weighting == 'block':
        # Get spectral radius of Dinv*A, scales the time step size for the ODE
        rho_DinvA = approximate_spectral_radius(Dinv_A)
        cost[0] += 15   # 15 lanczos iterations to approximate spectral radius
    else:
        # Using local weighting, no need for spectral radius
        rho_DinvA = 1.0

    # Calculate D_A for later use in the minimization problem
    if proj_type == "D_A":
        D = A.diagonal()
        D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr')
    else:
        D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)

    # Calculate (I - delta_t Dinv A)^k
    # We transpose the product, so that we can efficiently access
    # the columns in CSR format.  We want the columns (not rows) because 
    # strength is based on the columns of (I - delta_t Dinv A)^k, i.e., 
    # relaxed delta functions
    
    # Calculate the number of time steps that can be done by squaring, and
    # the number of time steps that must be done incrementally
    nsquare = int(np.log2(k))
    ninc = k - 2**nsquare

    # Calculate one time step
    I = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
    Atilde = (I - (1.0/rho_DinvA)*Dinv_A)
    Atilde = Atilde.T.tocsr()
    cost[0] += 1

    # Construct a sparsity mask for Atilde that will restrict Atilde^T to the
    # nonzero pattern of A, with the added constraint that row i of Atilde^T
    # retains only the nonzeros that are also in the same PDE as i.
    mask = A.copy()

    # Restrict to same PDE
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(np.arange(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # If the total number of time steps is a power of two, then there is
    # a very efficient computational short-cut.  Otherwise, we support
    # other numbers of time steps, through an inefficient algorithm.
    if ninc > 0:
        warn("The most efficient time stepping for the Evolution Strength\
             Method is done in powers of two.\nYou have chosen " + str(k) +
             " time steps.")

        JacobiStep = csr_matrix(Atilde, copy=True)
        # Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare
        for i in range(nsquare):
            cost[0] += mat_mat_complexity(Atilde,Atilde)
            Atilde = Atilde*Atilde

        for i in range(ninc):
            cost[0] += mat_mat_complexity(Atilde,JacobiStep)
            Atilde = Atilde*JacobiStep

        del JacobiStep

        # Apply mask to Atilde, zeros in mask have already been eliminated at
        # start of routine.
        mask.data[:] = 1.0
        Atilde = Atilde.multiply(mask)
        Atilde.eliminate_zeros()
        Atilde.sort_indices()
        cost[0] += Atilde.nnz / float(A.nnz)

    elif nsquare == 0:
        if numPDEs > 1:
            # Apply mask to Atilde, zeros in mask have already been eliminated
            # at start of routine.
            mask.data[:] = 1.0
            Atilde = Atilde.multiply(mask)
            Atilde.eliminate_zeros()
            Atilde.sort_indices()

    else:
        # Use computational short-cut for case (ninc == 0) and (nsquare > 0)
        # Calculate Atilde^k only at the sparsity pattern of mask.
        for i in range(nsquare-1):
            cost[0] += mat_mat_complexity(Atilde,Atilde)
            Atilde = Atilde*Atilde

        # Call incomplete mat-mat mult
        AtildeCSC = Atilde.tocsc()
        AtildeCSC.sort_indices()
        mask.sort_indices()
        Atilde.sort_indices()
        amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices,
                                         Atilde.data, AtildeCSC.indptr,
                                         AtildeCSC.indices, AtildeCSC.data,
                                         mask.indptr, mask.indices, mask.data,
                                         dimen)
        cost[0] += mat_mat_complexity(Atilde,mask,incomplete=True) / float(A.nnz)

        del AtildeCSC, Atilde
        Atilde = mask
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    del Dinv, Dinv_A, mask

    # Calculate strength based on constrained min problem of
    # min( z - B*x ), such that
    # (B*x)|_i = z|_i, i.e. they are equal at point i
    # z = (I - (t/k) Dinv A)^k delta_i
    #
    # Strength is defined as the relative point-wise approx. error between
    # B*x and z.  We don't use the full z in this problem, only that part of
    # z that is in the sparsity pattern of A.
    #
    # Can use either the D-norm, and inner product, or l2-norm and inner-prod
    # to solve the constrained min problem.  Using D gives scale invariance.
    #
    # This is a quadratic minimization problem with a linear constraint, so
    # we can build a linear system and solve it to find the critical point,
    # i.e. minimum.
    #
    # We exploit a known shortcut for the case of NullDim = 1.  The shortcut is
    # mathematically equivalent to the longer constrained min. problem

    if NullDim == 1:
        # Use shortcut to solve constrained min problem if B is only a vector
        # Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) |
        # These ratios can be calculated by diagonal row and column scalings

        # Create necessary vectors for scaling Atilde
        #   Its not clear what to do where B == 0.  This is an
        #   an easy programming solution, that may make sense.
        Bmat_forscaling = np.ravel(Bmat)
        Bmat_forscaling[Bmat_forscaling == 0] = 1.0
        DAtilde = Atilde.diagonal()
        DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling
        cost[0] += Atilde.shape[0] / float(A.nnz)

        # Calculate best approximation, z_tilde, in span(B)
        #   Importantly, scale_rows and scale_columns leave zero entries
        #   in the matrix.  For previous implementations this was useful
        #   because we assume data and Atilde.data are the same length below
        data = Atilde.data.copy()
        Atilde.data[:] = 1.0
        Atilde = scale_rows(Atilde, DAtildeDivB)
        Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling))
        cost[0] += 2.0 * Atilde.nnz / float(A.nnz)

        # If angle in the complex plane between z and z_tilde is
        # greater than 90 degrees, then weak.  We can just look at the
        # dot product to determine if angle is greater than 90 degrees.
        angle = np.real(Atilde.data) * np.real(data) +\
            np.imag(Atilde.data) * np.imag(data)
        angle = angle < 0.0
        angle = np.array(angle, dtype=bool)
        cost[0] += Atilde.nnz / float(A.nnz)
        if Atilde.dtype is 'complex':
            cost[0] += Atilde.nnz / float(A.nnz)

        # Calculate Approximation ratio
        Atilde.data = Atilde.data/data
        cost[0] += Atilde.nnz / float(A.nnz)

        # If approximation ratio is less than tol, then weak connection
        weak_ratio = (np.abs(Atilde.data) < 1e-4)

        # Calculate Approximation error
        Atilde.data = abs(1.0 - Atilde.data)
        cost[0] += Atilde.nnz / float(A.nnz)

        # Set small ratios and large angles to weak
        Atilde.data[weak_ratio] = 0.0
        Atilde.data[angle] = 0.0

        # Set near perfect connections to 1e-4
        Atilde.eliminate_zeros()
        Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4

        del data, weak_ratio, angle

    else:
        # For use in computing local B_i^H*B, precompute the element-wise
        # multiply of each column of B with each other column.  We also scale
        # by 2.0 to account for BDB's eventual use in a constrained
        # minimization problem
        BDBCols = int(np.sum(np.arange(NullDim + 1)))
        BDB = np.zeros((dimen, BDBCols), dtype=A.dtype)
        counter = 0
        for i in range(NullDim):
            for j in range(i, NullDim):
                BDB[:, counter] = 2.0 *\
                    (np.conjugate(np.ravel(np.asarray(B[:, i]))) *
                        np.ravel(np.asarray(D_A * B[:, j])))
                counter = counter + 1
                cost[0] += B.shape[0] / float(A.nnz)

        # Choose tolerance for dropping "numerically zero" values later
        t = Atilde.dtype.char
        eps = np.finfo(np.float).eps
        feps = np.finfo(np.single).eps
        geps = np.finfo(np.longfloat).eps
        _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
        tol = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]

        # Use constrained min problem to define strength.
        # This function is doing similar to NullDim=1 with more bad guys.
        # Complexity accounts for computing the block inverse, and
        #   hat{z_i} = B_i*x, hat{z_i} .* hat{z_i},
        #   hat{z_i} = hat{z_i} / z_i, and abs(1.0 - hat{z_i}).
        cost[0] += ( Atilde.nnz*(3+NullDim) + (NullDim**3)*dimen ) / float(A.nnz)
        amg_core.evolution_strength_helper(Atilde.data,
                                           Atilde.indptr,
                                           Atilde.indices,
                                           Atilde.shape[0],
                                           np.ravel(np.asarray(B)),
                                           np.ravel(np.asarray(
                                               (D_A * np.conjugate(B)).T)),
                                           np.ravel(np.asarray(BDB)),
                                           BDBCols, NullDim, tol)

        Atilde.eliminate_zeros()

    # All of the strength values are real by this point, so ditch the complex
    # part
    Atilde.data = np.array(np.real(Atilde.data), dtype=float)

    # Apply drop tolerance
    if epsilon != np.inf:
        cost[0] += Atilde.nnz / float(A.nnz)
        amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr,
                                       Atilde.indices, Atilde.data)
        Atilde.eliminate_zeros()

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    I = sparse.eye(dimen, dimen, format="csr")
    I.data -= Atilde.diagonal()
    Atilde = Atilde + I
    cost[0] += Atilde.shape[0] / float(A.nnz)

    # If converted BSR to CSR, convert back and return amalgamated matrix,
    #   i.e. the sparsity structure of the blocks of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        n_blocks = Atilde.indices.shape[0]
        blocksize = Atilde.blocksize[0]*Atilde.blocksize[1]
        CSRdata = np.zeros((n_blocks,))
        amg_core.min_blocks(n_blocks, blocksize,
                            np.ravel(np.asarray(Atilde.data)), CSRdata)
        # Atilde = sparse.csr_matrix((data, row, col), shape=(*,*))
        Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr),
                                   shape=(int(Atilde.shape[0] / numPDEs),
                                          int(Atilde.shape[1] / numPDEs)))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0/Atilde.data
    cost[0] += Atilde.nnz / float(A.nnz)

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)
    cost[0] += Atilde.nnz / float(A.nnz)
    
    # Symmetrize
    if symmetrize_measure:
        Atilde = 0.5*(Atilde + Atilde.T)
        cost[0] += Atilde.nnz / float(A.nnz)

    return Atilde