Exemplo n.º 1
0
 def relax(A, x):
     fn, kwargs = unpack_arg(prepostsmoother, cost=False)
     if fn == 'gauss_seidel':
         gauss_seidel(A, x, np.zeros_like(x),
                      iterations=candidate_iters, sweep='symmetric')
     elif fn == 'gauss_seidel_nr':
         gauss_seidel_nr(A, x, np.zeros_like(x),
                         iterations=candidate_iters, sweep='symmetric')
     elif fn == 'gauss_seidel_ne':
         gauss_seidel_ne(A, x, np.zeros_like(x),
                         iterations=candidate_iters, sweep='symmetric')
     elif fn == 'jacobi':
         jacobi(A, x, np.zeros_like(x), iterations=1,
                omega=1.0 / rho_D_inv_A(A))
     elif fn == 'richardson':
         polynomial(A, x, np.zeros_like(x), iterations=1,
                    coefficients=[1.0/approximate_spectral_radius(A)])
     elif fn == 'gmres':
         x[:] = (gmres(A, np.zeros_like(x), x0=x,
                 maxiter=candidate_iters)[0]).reshape(x.shape)
     else:
         raise TypeError('Unrecognized smoother')
Exemplo n.º 2
0
 def relax(A, x):
     fn, kwargs = unpack_arg(prepostsmoother, cost=False)
     if fn == 'gauss_seidel':
         gauss_seidel(A,
                      x,
                      np.zeros_like(x),
                      iterations=candidate_iters,
                      sweep='symmetric')
     elif fn == 'gauss_seidel_nr':
         gauss_seidel_nr(A,
                         x,
                         np.zeros_like(x),
                         iterations=candidate_iters,
                         sweep='symmetric')
     elif fn == 'gauss_seidel_ne':
         gauss_seidel_ne(A,
                         x,
                         np.zeros_like(x),
                         iterations=candidate_iters,
                         sweep='symmetric')
     elif fn == 'jacobi':
         jacobi(A,
                x,
                np.zeros_like(x),
                iterations=1,
                omega=1.0 / rho_D_inv_A(A))
     elif fn == 'richardson':
         polynomial(A,
                    x,
                    np.zeros_like(x),
                    iterations=1,
                    coefficients=[1.0 / approximate_spectral_radius(A)])
     elif fn == 'gmres':
         x[:] = (gmres(A, np.zeros_like(x), x0=x,
                       maxiter=candidate_iters)[0]).reshape(x.shape)
     else:
         raise TypeError('Unrecognized smoother')
Exemplo n.º 3
0
def initial_setup_stage(A,
                        symmetry,
                        pdef,
                        candidate_iters,
                        epsilon,
                        max_levels,
                        max_coarse,
                        aggregate,
                        prepostsmoother,
                        smooth,
                        strength,
                        work,
                        B=None):
    """
    Computes a complete aggregation and the first near-nullspace candidate
    following Algorithm 3 in Brezina et al.

    Parameters
    ----------
    candidate_iters
        number of test relaxation iterations
    epsilon
        minimum acceptable relaxation convergence factor

    References
    ----------
    .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
       "Adaptive Smoothed Aggregation ($\alpha$SA) Multigrid"
       SIAM Review Volume 47,  Issue 2  (2005)
       http://www.cs.umn.edu/~maclach/research/aSA2.pdf
    """

    # Define relaxation routine
    def relax(A, x):
        fn, kwargs = unpack_arg(prepostsmoother, cost=False)
        if fn == 'gauss_seidel':
            gauss_seidel(A,
                         x,
                         np.zeros_like(x),
                         iterations=candidate_iters,
                         sweep='symmetric')
        elif fn == 'gauss_seidel_nr':
            gauss_seidel_nr(A,
                            x,
                            np.zeros_like(x),
                            iterations=candidate_iters,
                            sweep='symmetric')
        elif fn == 'gauss_seidel_ne':
            gauss_seidel_ne(A,
                            x,
                            np.zeros_like(x),
                            iterations=candidate_iters,
                            sweep='symmetric')
        elif fn == 'jacobi':
            jacobi(A,
                   x,
                   np.zeros_like(x),
                   iterations=1,
                   omega=1.0 / rho_D_inv_A(A))
        elif fn == 'richardson':
            polynomial(A,
                       x,
                       np.zeros_like(x),
                       iterations=1,
                       coefficients=[1.0 / approximate_spectral_radius(A)])
        elif fn == 'gmres':
            x[:] = (gmres(A, np.zeros_like(x), x0=x,
                          maxiter=candidate_iters)[0]).reshape(x.shape)
        else:
            raise TypeError('Unrecognized smoother')

    # flag for skipping steps f-i in step 4
    skip_f_to_i = True

    # step 1
    A_l = A
    if B is None:
        x = sp.rand(A_l.shape[0], 1).astype(A_l.dtype)
        # The following type check matches the usual 'complex' type,
        # but also numpy data types such as 'complex64', 'complex128'
        # and 'complex256'.
        if A_l.dtype.name.startswith('complex'):
            x = x + 1.0j * sp.rand(A_l.shape[0], 1)
    else:
        x = np.array(B, dtype=A_l.dtype)

    # step 2
    relax(A_l, x)
    work[:] += A_l.nnz * candidate_iters * 2

    # step 3
    # not advised to stop the iteration here: often the first relaxation pass
    # _is_ good, but the remaining passes are poor
    # if x_A_x/x_A_x_old < epsilon:
    #    # relaxation alone is sufficient
    #    print 'relaxation alone works: %g'%(x_A_x/x_A_x_old)
    #    return x, []

    # step 4
    As = [A]
    xs = [x]
    Ps = []
    AggOps = []
    StrengthOps = []

    while A.shape[0] > max_coarse and max_levels > 1:
        # The real check to break from the while loop is below

        # Begin constructing next level
        fn, kwargs = unpack_arg(strength[len(As) - 1])  # step 4b
        if fn == 'symmetric':
            C_l = symmetric_strength_of_connection(A_l, **kwargs)
            # Diagonal must be nonzero
            C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr')
        elif fn == 'classical':
            C_l = classical_strength_of_connection(A_l, **kwargs)
            # Diagonal must be nonzero
            C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr')
            if isspmatrix_bsr(A_l):
                C_l = amalgamate(C_l, A_l.blocksize[0])
        elif (fn == 'ode') or (fn == 'evolution'):
            C_l = evolution_strength_of_connection(
                A_l, np.ones((A_l.shape[0], 1), dtype=A.dtype), **kwargs)
        elif fn == 'predefined':
            C_l = kwargs['C'].tocsr()
        elif fn is None:
            C_l = A_l.tocsr()
        else:
            raise ValueError('unrecognized strength of connection method: %s' %
                             str(fn))

        # In SA, strength represents "distance", so we take magnitude of
        # complex values
        if C_l.dtype.name.startswith('complex'):
            C_l.data = np.abs(C_l.data)

        # Create a unified strength framework so that large values represent
        # strong connections and small values represent weak connections
        if (fn == 'ode') or (fn == 'evolution') or (fn == 'energy_based'):
            C_l.data = 1.0 / C_l.data

        # aggregation
        fn, kwargs = unpack_arg(aggregate[len(As) - 1])
        if fn == 'standard':
            AggOp = standard_aggregation(C_l, **kwargs)[0]
        elif fn == 'lloyd':
            AggOp = lloyd_aggregation(C_l, **kwargs)[0]
        elif fn == 'predefined':
            AggOp = kwargs['AggOp'].tocsr()
        else:
            raise ValueError('unrecognized aggregation method %s' % str(fn))

        T_l, x = fit_candidates(AggOp, x)  # step 4c

        fn, kwargs = unpack_arg(smooth[len(As) - 1])  # step 4d
        if fn == 'jacobi':
            P_l = jacobi_prolongation_smoother(A_l, T_l, C_l, x, **kwargs)
        elif fn == 'richardson':
            P_l = richardson_prolongation_smoother(A_l, T_l, **kwargs)
        elif fn == 'energy':
            P_l = energy_prolongation_smoother(A_l, T_l, C_l, x, None,
                                               (False, {}), **kwargs)
        elif fn is None:
            P_l = T_l
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # R should reflect A's structure # step 4e
        if symmetry == 'symmetric':
            A_l = P_l.T.asformat(P_l.format) * A_l * P_l
        elif symmetry == 'hermitian':
            A_l = P_l.H.asformat(P_l.format) * A_l * P_l

        StrengthOps.append(C_l)
        AggOps.append(AggOp)
        Ps.append(P_l)
        As.append(A_l)

        # skip to step 5 as in step 4e
        if (A_l.shape[0] <= max_coarse) or (len(AggOps) + 1 >= max_levels):
            break

        if not skip_f_to_i:
            x_hat = x.copy()  # step 4g
            relax(A_l, x)  # step 4h
            work[:] += A_l.nnz * candidate_iters * 2
            if pdef is True:
                x_A_x = np.dot(np.conjugate(x).T, A_l * x)
                xhat_A_xhat = np.dot(np.conjugate(x_hat).T, A_l * x_hat)
                err_ratio = (x_A_x / xhat_A_xhat)**(1.0 / candidate_iters)
            else:
                # use A.H A inner-product
                Ax = A_l * x
                # Axhat = A_l * x_hat
                x_A_x = np.dot(np.conjugate(Ax).T, Ax)
                xhat_A_xhat = np.dot(np.conjugate(x_hat).T, A_l * x_hat)
                err_ratio = (x_A_x / xhat_A_xhat)**(1.0 / candidate_iters)

            if err_ratio < epsilon:  # step 4i
                # print "sufficient convergence, skipping"
                skip_f_to_i = True
                if x_A_x == 0:
                    x = x_hat  # need to restore x
        else:
            # just carry out relaxation, don't check for convergence
            relax(A_l, x)  # step 4h
            work[:] += 2 * A_l.nnz * candidate_iters

        # store xs for diagnostic use and for use in step 5
        xs.append(x)

    # step 5
    # Extend coarse-level candidate to the finest level
    # --> note that we start with the x from the second coarsest level
    x = xs[-1]
    # make sure that xs[-1] has been relaxed by step 4h, i.e. relax(As[-2], x)
    for lev in range(len(Ps) - 2, -1, -1):  # lev = coarsest ... finest-1
        P = Ps[lev]  # I: lev --> lev+1
        A = As[lev]  # A on lev+1
        x = P * x
        relax(A, x)
        work[:] += A.nnz * candidate_iters * 2

    # Set predefined strength of connection and aggregation
    if len(AggOps) > 1:
        aggregate = [('predefined', {
            'AggOp': AggOps[i]
        }) for i in range(len(AggOps))]
        strength = [('predefined', {
            'C': StrengthOps[i]
        }) for i in range(len(StrengthOps))]

    return x, aggregate, strength  # first candidate
Exemplo n.º 4
0
def adaptive_sa_solver(A,
                       B=None,
                       symmetry='hermitian',
                       pdef=True,
                       num_candidates=1,
                       candidate_iters=5,
                       improvement_iters=0,
                       epsilon=0.1,
                       max_levels=10,
                       max_coarse=10,
                       aggregate='standard',
                       prepostsmoother=('gauss_seidel', {
                           'sweep': 'symmetric'
                       }),
                       smooth=('jacobi', {}),
                       strength='symmetric',
                       coarse_solver='pinv2',
                       eliminate_local=(False, {
                           'Ca': 1.0
                       }),
                       keep=False,
                       **kwargs):
    """
    Create a multilevel solver using Adaptive Smoothed Aggregation (aSA)

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Square matrix in CSR or BSR format
    B : {None, n x m dense matrix}
        If a matrix, then this forms the basis for the first m candidates.
        Also in this case, the initial setup stage is skipped, because this
        provides the first candidate(s).  If None, then a random initial guess
        and relaxation are used to inform the initial candidate.
    symmetry : {string}
        'symmetric' refers to both real and complex symmetric
        'hermitian' refers to both complex Hermitian and real Hermitian
        Note that for the strictly real case, these two options are the same
        Note that this flag does not denote definiteness of the operator
    pdef : {bool}
        True or False, whether A is known to be positive definite.
    num_candidates : {integer} : default 1
        Number of near-nullspace candidates to generate
    candidate_iters : {integer} : default 5
        Number of smoothing passes/multigrid cycles used at each level of
        the adaptive setup phase
    improvement_iters : {integer} : default 0
        Number of times each candidate is improved
    epsilon : {float} : default 0.1
        Target convergence factor
    max_levels : {integer} : default 10
        Maximum number of levels to be used in the multilevel solver.
    max_coarse : {integer} : default 500
        Maximum number of variables permitted on the coarse grid.
    prepostsmoother : {string or dict}
        Pre- and post-smoother used in the adaptive method
    strength : ['symmetric', 'classical', 'evolution',
                ('predefined', {'C': csr_matrix}), None]
        Method used to determine the strength of connection between unknowns of
        the linear system.  See smoothed_aggregation_solver(...) documentation.
    aggregate : ['standard', 'lloyd', 'naive',
                 ('predefined', {'AggOp': csr_matrix})]
        Method used to aggregate nodes.  See smoothed_aggregation_solver(...)
        documentation.
    smooth : ['jacobi', 'richardson', 'energy', None]
        Method used used to smooth the tentative prolongator.  See
        smoothed_aggregation_solver(...) documentation
    coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
        Solver used at the coarsest level of the MG hierarchy.
            Optionally, may be a tuple (fn, args), where fn is a string such as
        ['splu', 'lu', ...] or a callable function, and args is a dictionary of
        arguments to be passed to fn.
    eliminate_local : {tuple}
        Length 2 tuple.  If the first entry is True, then eliminate candidates
        where they aren't needed locally, using the second entry of the tuple
        to contain arguments to local elimination routine.  Given the rigid
        sparse data structures, this doesn't help much, if at all, with
        complexity.  Its more of a diagnostic utility.
    keep: {bool} : default False
        Flag to indicate keeping extra operators in the hierarchy for
        diagnostics.  For example, if True, then strength of connection (C),
        tentative prolongation (T), and aggregation (AggOp) are kept.

    Returns
    -------
    multilevel_solver : multilevel_solver
        Smoothed aggregation solver with adaptively generated candidates

    Notes
    -----

    - Floating point value representing the "work" required to generate
      the solver.  This value is the total cost of just relaxation, relative
      to the fine grid.  The relaxation method used is assumed to symmetric
      Gauss-Seidel.

    - Unlike the standard Smoothed Aggregation (SA) method, adaptive SA does
      not require knowledge of near-nullspace candidate vectors.  Instead, an
      adaptive procedure computes one or more candidates 'from scratch'.  This
      approach is useful when no candidates are known or the candidates have
      been invalidated due to changes to matrix A.

    Examples
    --------
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.aggregation import adaptive_sa_solver
    >>> import numpy as np
    >>> A=stencil_grid([[-1,-1,-1],[-1,8.0,-1],[-1,-1,-1]],\
                       (31,31),format='csr')
    >>> [asa,work] = adaptive_sa_solver(A,num_candidates=1)
    >>> residuals=[]
    >>> x=asa.solve(b=np.ones((A.shape[0],)), x0=np.ones((A.shape[0],)),\
                    residuals=residuals)

    References
    ----------
    .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
       "Adaptive Smoothed Aggregation ($\alpha$SA) Multigrid"
       SIAM Review Volume 47,  Issue 2  (2005)
       http://www.cs.umn.edu/~maclach/research/aSA2.pdf

    """

    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        try:
            A = csr_matrix(A)
            warn("Implicit conversion of A to CSR", SparseEfficiencyWarning)
        except:
            raise TypeError('Argument A must have type csr_matrix or\
                            bsr_matrix, or be convertible to csr_matrix')

    A = A.asfptype()
    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    # Track work in terms of relaxation
    work = np.zeros((1, ))

    # Levelize the user parameters, so that they become lists describing the
    # desired user option on each level.
    max_levels, max_coarse, strength =\
        levelize_strength_or_aggregation(strength, max_levels, max_coarse)
    max_levels, max_coarse, aggregate =\
        levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
    smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)

    # Develop initial candidate(s).  Note that any predefined aggregation is
    # preserved.
    if B is None:
        B, aggregate, strength =\
            initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon,
                                max_levels, max_coarse, aggregate,
                                prepostsmoother, smooth, strength, work)
        # Normalize B
        B = (1.0 / norm(B, 'inf')) * B
        num_candidates -= 1
    else:
        # Otherwise, use predefined candidates
        num_candidates -= B.shape[1]
        # Generate Aggregation and Strength Operators (the brute force way)
        sa = smoothed_aggregation_solver(A,
                                         B=B,
                                         symmetry=symmetry,
                                         presmoother=prepostsmoother,
                                         postsmoother=prepostsmoother,
                                         smooth=smooth,
                                         strength=strength,
                                         max_levels=max_levels,
                                         max_coarse=max_coarse,
                                         aggregate=aggregate,
                                         coarse_solver=coarse_solver,
                                         improve_candidates=None,
                                         keep=True,
                                         **kwargs)
        if len(sa.levels) > 1:
            # Set strength-of-connection and aggregation
            aggregate = [('predefined', {
                'AggOp': sa.levels[i].AggOp.tocsr()
            }) for i in range(len(sa.levels) - 1)]
            strength = [('predefined', {
                'C': sa.levels[i].C.tocsr()
            }) for i in range(len(sa.levels) - 1)]

    # Develop additional candidates
    for i in range(num_candidates):
        x = general_setup_stage(
            smoothed_aggregation_solver(A,
                                        B=B,
                                        symmetry=symmetry,
                                        presmoother=prepostsmoother,
                                        postsmoother=prepostsmoother,
                                        smooth=smooth,
                                        strength=strength,
                                        max_levels=max_levels,
                                        max_coarse=max_coarse,
                                        aggregate=aggregate,
                                        coarse_solver=coarse_solver,
                                        improve_candidates=None,
                                        keep=True,
                                        **kwargs), symmetry, candidate_iters,
            prepostsmoother, smooth, eliminate_local, coarse_solver, work)

        # Normalize x and add to candidate list
        x = x / norm(x, 'inf')
        if np.isinf(x[0]) or np.isnan(x[0]):
            raise ValueError('Adaptive candidate is all 0.')
        B = np.hstack((B, x.reshape(-1, 1)))

    # Improve candidates
    if B.shape[1] > 1 and improvement_iters > 0:
        b = np.zeros((A.shape[0], 1), dtype=A.dtype)
        for i in range(improvement_iters):
            for j in range(B.shape[1]):
                # Run a V-cycle built on everything except candidate j, while
                # using candidate j as the initial guess
                x0 = B[:, 0]
                B = B[:, 1:]
                sa_temp =\
                    smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
                                                presmoother=prepostsmoother,
                                                postsmoother=prepostsmoother,
                                                smooth=smooth,
                                                strength=strength,
                                                max_levels=max_levels,
                                                max_coarse=max_coarse,
                                                aggregate=aggregate,
                                                coarse_solver=coarse_solver,
                                                improve_candidates=None,
                                                keep=True, **kwargs)
                x = sa_temp.solve(b,
                                  x0=x0,
                                  tol=float(np.finfo(np.float).tiny),
                                  maxiter=candidate_iters,
                                  cycle='V')
                work[:] += 2 * sa_temp.operator_complexity() *\
                    sa_temp.levels[0].A.nnz * candidate_iters

                # Apply local elimination
                elim, elim_kwargs = unpack_arg(eliminate_local)
                if elim is True:
                    x = x / norm(x, 'inf')
                    eliminate_local_candidates(x, sa_temp.levels[0].AggOp, A,
                                               sa_temp.levels[0].T,
                                               **elim_kwargs)

                # Normalize x and add to candidate list
                x = x / norm(x, 'inf')
                if np.isinf(x[0]) or np.isnan(x[0]):
                    raise ValueError('Adaptive candidate is all 0.')
                B = np.hstack((B, x.reshape(-1, 1)))

    elif improvement_iters > 0:
        # Special case for improving a single candidate
        max_levels = len(aggregate) + 1
        max_coarse = 0
        for i in range(improvement_iters):
            B, aggregate, strength =\
                initial_setup_stage(A, symmetry, pdef, candidate_iters,
                                    epsilon, max_levels, max_coarse,
                                    aggregate, prepostsmoother, smooth,
                                    strength, work, B=B)
            # Normalize B
            B = (1.0 / norm(B, 'inf')) * B

    # Return smoother. Note, solver parameters are passed in via params
    # argument to be stored in final solver hierarchy and used to estimate
    # complexity.
    return [
        smoothed_aggregation_solver(A,
                                    B=B,
                                    symmetry=symmetry,
                                    presmoother=prepostsmoother,
                                    postsmoother=prepostsmoother,
                                    smooth=smooth,
                                    strength=strength,
                                    max_levels=max_levels,
                                    max_coarse=max_coarse,
                                    aggregate=aggregate,
                                    coarse_solver=coarse_solver,
                                    improve_candidates=None,
                                    keep=keep,
                                    **kwargs), work[0] / A.nnz
    ]
Exemplo n.º 5
0
def extend_hierarchy(levels, strength, aggregate, smooth, improve_candidates,
                     diagonal_dominance=False, keep=True):
    """Service routine to implement the strength of connection, aggregation,
    tentative prolongation construction, and prolongation smoothing.  Called by
    smoothed_aggregation_solver.
    """

    A = levels[-1].A
    B = levels[-1].B
    if A.symmetry == "nonsymmetric":
        AH = A.H.asformat(A.format)
        BH = levels[-1].BH

    # Compute the strength-of-connection matrix C, where larger
    # C[i, j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength[len(levels)-1])
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        if 'B' in kwargs:
            C = evolution_strength_of_connection(A, **kwargs)
        else:
            C = evolution_strength_of_connection(A, B, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'predefined':
        C = kwargs['C'].tocsr()
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A.tocsr()
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))
    
    levels[-1].complexity['strength'] = kwargs['cost'][0]

    # Avoid coarsening diagonally dominant rows
    flag, kwargs = unpack_arg(diagonal_dominance)
    if flag:
        C = eliminate_diag_dom_nodes(A, C, **kwargs)
        levels[-1].complexity['diag_dom'] = kwargs['cost'][0]

    # Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A).
    # AggOp is a boolean matrix, where the sparsity pattern for the k-th column
    # denotes the fine-grid nodes agglomerated into k-th coarse-grid node.
    fn, kwargs = unpack_arg(aggregate[len(levels)-1])
    if fn == 'standard':
        AggOp, Cnodes = standard_aggregation(C, **kwargs)
    elif fn == 'naive':
        AggOp, Cnodes = naive_aggregation(C, **kwargs)
    elif fn == 'lloyd':
        AggOp, Cnodes = lloyd_aggregation(C, **kwargs)
    elif fn == 'predefined':
        AggOp = kwargs['AggOp'].tocsr()
        Cnodes = kwargs['Cnodes']
    else:
        raise ValueError('unrecognized aggregation method %s' % str(fn))
    
    levels[-1].complexity['aggregation'] = kwargs['cost'][0] * (float(C.nnz)/A.nnz)

    # Improve near nullspace candidates by relaxing on A B = 0
    temp_cost = [0.0]
    fn, kwargs = unpack_arg(improve_candidates[len(levels)-1],cost=False)
    if fn is not None:
        b = np.zeros((A.shape[0], 1), dtype=A.dtype)
        B = relaxation_as_linear_operator((fn, kwargs), A, b, temp_cost) * B
        levels[-1].B = B
        if A.symmetry == "nonsymmetric":
            BH = relaxation_as_linear_operator((fn, kwargs), AH, b, temp_cost) * BH
            levels[-1].BH = BH

    levels[-1].complexity['candidates'] = temp_cost[0] * B.shape[1]

    # Compute the tentative prolongator, T, which is a tentative interpolation
    # matrix from the coarse-grid to the fine-grid.  T exactly interpolates
    # B_fine[:, 0:blocksize(A)] = T B_coarse[:, 0:blocksize(A)].
    # Orthogonalization complexity ~ 2nk^2, k = blocksize(A).
    temp_cost=[0.0]
    T, dummy = fit_candidates(AggOp, B[:, 0:blocksize(A)], cost=temp_cost)
    del dummy
    if A.symmetry == "nonsymmetric":
        TH, dummyH = fit_candidates(AggOp, BH[:, 0:blocksize(A)], cost=temp_cost)
        del dummyH

    levels[-1].complexity['tentative'] = temp_cost[0]/A.nnz
    
    # Create necessary root node matrices
    Cpt_params = (True, get_Cpt_params(A, Cnodes, AggOp, T))
    T = scale_T(T, Cpt_params[1]['P_I'], Cpt_params[1]['I_F'])
    levels[-1].complexity['tentative'] += T.nnz / float(A.nnz)
    if A.symmetry == "nonsymmetric":
        TH = scale_T(TH, Cpt_params[1]['P_I'], Cpt_params[1]['I_F'])
        levels[-1].complexity['tentative'] += TH.nnz / float(A.nnz)

    # Set coarse grid near nullspace modes as injected fine grid near
    # null-space modes
    B = Cpt_params[1]['P_I'].T*levels[-1].B
    if A.symmetry == "nonsymmetric":
        BH = Cpt_params[1]['P_I'].T*levels[-1].BH

    # Smooth the tentative prolongator, so that it's accuracy is greatly
    # improved for algebraically smooth error.
    fn, kwargs = unpack_arg(smooth[len(levels)-1])
    if fn == 'energy':
        P = energy_prolongation_smoother(A, T, C, B, levels[-1].B,
                                         Cpt_params=Cpt_params, **kwargs)
    elif fn is None:
        P = T
    else:
        raise ValueError('unrecognized prolongation smoother \
                          method %s' % str(fn))

    levels[-1].complexity['smooth_P'] = kwargs['cost'][0]

    # Compute the restriction matrix R, which interpolates from the fine-grid
    # to the coarse-grid.  If A is nonsymmetric, then R must be constructed
    # based on A.H.  Otherwise R = P.H or P.T.
    symmetry = A.symmetry
    if symmetry == 'hermitian':
        R = P.H
    elif symmetry == 'symmetric':
        R = P.T
    elif symmetry == 'nonsymmetric':
        fn, kwargs = unpack_arg(smooth[len(levels)-1])
        if fn == 'energy':
            R = energy_prolongation_smoother(AH, TH, C, BH, levels[-1].BH,
                                             Cpt_params=Cpt_params, **kwargs)
            R = R.H
            levels[-1].complexity['smooth_R'] = kwargs['cost'][0]
        elif fn is None:
            R = T.H
        else:
            raise ValueError('unrecognized prolongation smoother \
                              method %s' % str(fn))

    if keep:
        levels[-1].C = C                        # strength of connection matrix
        levels[-1].AggOp = AggOp                # aggregation operator
        levels[-1].T = T                        # tentative prolongator
        levels[-1].Fpts = Cpt_params[1]['Fpts'] # Fpts
        levels[-1].P_I = Cpt_params[1]['P_I']   # Injection operator
        levels[-1].I_F = Cpt_params[1]['I_F']   # Identity on F-pts
        levels[-1].I_C = Cpt_params[1]['I_C']   # Identity on C-pts

    levels[-1].P = P                            # smoothed prolongator
    levels[-1].R = R                            # restriction operator
    levels[-1].Cpts = Cpt_params[1]['Cpts']     # Cpts (i.e., rootnodes)

    # Form coarse grid operator, get complexity
    levels[-1].complexity['RAP'] = mat_mat_complexity(R,A) / float(A.nnz)
    RA = R * A
    levels[-1].complexity['RAP'] += mat_mat_complexity(RA,P) / float(A.nnz)
    A = RA * P      # Galerkin operator, Ac = RAP
    A.symmetry = symmetry

    levels.append(multilevel_solver.level())
    levels[-1].A = A
    levels[-1].B = B                          # right near nullspace candidates

    if A.symmetry == "nonsymmetric":
        levels[-1].BH = BH                   # left near nullspace candidates
Exemplo n.º 6
0
def extend_hierarchy(levels, strength, CF, interp, restrict, filter_operator,
                     coarse_grid_P, coarse_grid_R, keep):
    """ helper function for local methods """

    # Filter operator. Need to keep original matrix on fineest level for
    # computing residuals
    if (filter_operator is not None) and (filter_operator[1] != 0): 
        if len(levels) == 1:
            A = deepcopy(levels[-1].A)
        else:
            A = levels[-1].A
        filter_matrix_rows(A, filter_operator[1], diagonal=True, lump=filter_operator[0])
    else:
        A = levels[-1].A

    # Check if matrix was filtered to be diagonal --> coarsest grid
    if A.nnz == A.shape[0]:
        return 1

    # Zero initial complexities for strength, splitting and interpolation
    levels[-1].complexity['CF'] = 0.0
    levels[-1].complexity['strength'] = 0.0
    levels[-1].complexity['interpolate'] = 0.0

    # Compute the strength-of-connection matrix C, where larger
    # C[i,j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength)
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        C = evolution_strength_of_connection(A, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))
    levels[-1].complexity['strength'] += kwargs['cost'][0] * A.nnz / float(A.nnz)

    # Generate the C/F splitting
    fn, kwargs = unpack_arg(CF)
    if fn == 'RS':
        splitting = RS(C, **kwargs)
    elif fn == 'PMIS':
        splitting = PMIS(C, **kwargs)
    elif fn == 'PMISc':
        splitting = PMISc(C, **kwargs)
    elif fn == 'CLJP':
        splitting = CLJP(C, **kwargs)
    elif fn == 'CLJPc':
        splitting = CLJPc(C, **kwargs)
    elif fn == 'CR':
        splitting = CR(C, **kwargs)
    elif fn == 'weighted_matching':
        splitting, soc = weighted_matching(C, **kwargs)
        if soc is not None:
            C = soc
    else:
        raise ValueError('unknown C/F splitting method (%s)' % CF)
    levels[-1].complexity['CF'] += kwargs['cost'][0] * C.nnz / float(A.nnz)
    temp = np.sum(splitting)
    if (temp == len(splitting)) or (temp == 0):
        return 1

    # Generate the interpolation matrix that maps from the coarse-grid to the
    # fine-grid
    r_flag = False
    fn, kwargs = unpack_arg(interp)
    if fn == 'standard':
        P = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'inject':
        P = injection_interpolation(A, splitting, **kwargs)
    elif fn == 'neumann':
        P = neumann_ideal_interpolation(A, splitting, **kwargs)
    elif fn == 'scaledAfc':
        P = scaled_Afc_interpolation(A, splitting, **kwargs)
    elif fn == 'air':
        if isspmatrix_bsr(A):
            temp_A = bsr_matrix(A.T)
            P = local_AIR(temp_A, splitting, **kwargs)
            P = bsr_matrix(P.T)
        else:
            temp_A = csr_matrix(A.T)
            P = local_AIR(temp_A, splitting, **kwargs)
            P = csr_matrix(P.T)
    elif fn == 'restrict':
        r_flag = True
    else:
        raise ValueError('unknown interpolation method (%s)' % interp)
    levels[-1].complexity['interpolate'] += kwargs['cost'][0] * A.nnz / float(A.nnz)

    # Build restriction operator
    fn, kwargs = unpack_arg(restrict)
    if fn is None:
        R = P.T
    elif fn == 'air':
        R = local_AIR(A, splitting, **kwargs)
    elif fn == 'neumann':
        R = neumann_AIR(A, splitting, **kwargs)
    elif fn == 'one_point':         # Don't need A^T here
        temp_C = C.T.tocsr()
        R = one_point_interpolation(A, temp_C, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R = R.T.tobsr()
        else:
            R = R.T.tocsr()
    elif fn == 'inject':            # Don't need A^T or C^T here
        R = injection_interpolation(A, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R = R.T.tobsr()
        else:
            R = R.T.tocsr()
    elif fn == 'standard':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        else: 
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    elif fn == 'distance_two':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        else: 
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    elif fn == 'direct':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()        
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    else:
        raise ValueError('unknown restriction method (%s)' % restrict)

    # If set P = R^T
    if r_flag:
        P = R.T

    # Optional different interpolation for RAP
    fn, kwargs = unpack_arg(coarse_grid_P)
    if fn == 'standard':
        P_temp = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P_temp = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P_temp = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P_temp = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'inject':
        P_temp = injection_interpolation(A, splitting, **kwargs)
    elif fn == 'neumann':
        P_temp = neumann_ideal_interpolation(A, splitting, **kwargs)
    elif fn == 'air':
        if isspmatrix_bsr(A): 
            temp_A = bsr_matrix(A.T)
            P_temp = local_AIR(temp_A, splitting, **kwargs)
            P_temp = bsr_matrix(P_temp.T)
        else:
            temp_A = csr_matrix(A.T)
            P_temp = local_AIR(temp_A, splitting, **kwargs)
            P_temp = csr_matrix(P_temp.T)
    else:
        P_temp = P

    # Optional different restriction for RAP
    fn, kwargs = unpack_arg(coarse_grid_R)
    if fn == 'air':
        R_temp = local_AIR(A, splitting, **kwargs)
    elif fn == 'neumann':
        R_temp = neumann_AIR(A, splitting, **kwargs)
    elif fn == 'one_point':         # Don't need A^T here
        temp_C = C.T.tocsr()
        R_temp = one_point_interpolation(A, temp_C, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R_temp = R_temp.T.tobsr()
        else:
            R_temp = R_temp.T.tocsr()
    elif fn == 'inject':            # Don't need A^T or C^T here
        R_temp = injection_interpolation(A, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R_temp = R_temp.T.tobsr()
        else:
            R_temp = R_temp.T.tocsr()
    elif fn == 'standard':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tobsr()
        else: 
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tocsr()
    elif fn == 'distance_two':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tobsr()
        else: 
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tocsr()
    elif fn == 'direct':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tobsr()        
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tocsr()
    else:
        R_temp = R

    # Store relevant information for this level
    if keep:
        levels[-1].C = C              # strength of connection matrix

    levels[-1].P = P                  # prolongation operator
    levels[-1].R = R                  # restriction operator
    levels[-1].splitting = splitting  # C/F splitting

    # Form coarse grid operator, get complexity
    #levels[-1].complexity['RAP'] = mat_mat_complexity(R_temp,A) / float(A.nnz)
    #RA = R_temp * A
    #levels[-1].complexity['RAP'] += mat_mat_complexity(RA,P_temp) / float(A.nnz)
    #A = RA * P_temp
    
    # RL: RAP = R*(A*P)
    levels[-1].complexity['RAP'] = mat_mat_complexity(A, P_temp) / float(A.nnz)
    AP = A * P_temp
    levels[-1].complexity['RAP'] += mat_mat_complexity(R_temp, AP) / float(A.nnz)
    A = R_temp * AP
    

    # Make sure coarse-grid operator is in correct sparse format
    if (isspmatrix_csr(P) and (not isspmatrix_csr(A))):
        A = A.tocsr()
    elif (isspmatrix_bsr(P) and (not isspmatrix_bsr(A))):
        A = A.tobsr()

    A.eliminate_zeros()
    levels.append(multilevel_solver.level())
    levels[-1].A = A
    return 0
Exemplo n.º 7
0
def change_smoothers(ml, presmoother, postsmoother):
    '''
    Initialize pre- and post- smoothers throughout a multilevel_solver, with
    the option of having different smoothers at different levels

    For each level of the multilevel_solver 'ml' (except the coarsest level),
    initialize the .presmoother() and .postsmoother() methods used in the
    multigrid cycle.

    Parameters
    ----------
    ml : {pyamg multilevel hierarchy}
        Data structure that stores the multigrid hierarchy.
    presmoother : {None, string, tuple, list}
        presmoother can be (1) the name of a supported smoother, e.g.
        "gauss_seidel", (2) a tuple of the form ('method','opts') where
        'method' is the name of a supported smoother and 'opts' a dict of
        keyword arguments to the smoother, or (3) a list of instances of
        options 1 or 2.  See the Examples section for illustrations of the
        format.

        If presmoother is a list, presmoother[i] determines the smoothing
        strategy for level i.  Else, presmoother defines the same strategy
        for all levels.

        If len(presmoother) < len(ml.levels), then
        presmoother[-1] is used for all remaining levels

        If len(presmoother) > len(ml.levels), then
        the remaining smoothing strategies are ignored

    postsmoother : {string, tuple, list}
        Defines postsmoother in identical fashion to presmoother

    Returns
    -------
    ml changed in place
    ml.level[i].smoothers['presmoother']   <===  presmoother[i]
    ml.level[i].smoothers['postsmoother']  <===  postsmoother[i]
    ml.symmetric_smoothing is marked True/False depending on whether
        the smoothing scheme is symmetric. 

    Notes
    -----
    - Parameter 'omega' of the Jacobi, Richardson, and jacobi_ne
      methods is scaled by the spectral radius of the matrix on
      each level.  Therefore 'omega' should be in the interval (0,2).
    - Parameter 'withrho' (default: True) controls whether the omega is
      rescaled by the spectral radius in jacobi, block_jacobi, and jacobi_ne
    - By initializing the smoothers after the hierarchy has been setup, allows
      for "algebraically" directed relaxation, such as strength_based_schwarz,
      which uses only the strong connections of a degree-of-freedom to define
      overlapping regions
    - Available smoother methods::

        gauss_seidel
        block_gauss_seidel
        jacobi
        block_jacobi
        richardson
        sor
        chebyshev
        gauss_seidel_nr
        gauss_seidel_ne
        jacobi_ne
        cg
        gmres
        cgne
        cgnr
        schwarz
        strength_based_schwarz
        None

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.aggregation import smoothed_aggregation_solver
    >>> from pyamg.relaxation.smoothing import change_smoothers
    >>> from pyamg.util.linalg import norm
    >>> from scipy import rand, array, mean
    >>> A = poisson((10,10), format='csr')
    >>> b = rand(A.shape[0],)
    >>> ml = smoothed_aggregation_solver(A, max_coarse=10)
    >>> #
    >>> # Set all levels to use gauss_seidel's defaults
    >>> smoothers = 'gauss_seidel'
    >>> change_smoothers(ml, presmoother=smoothers, postsmoother=smoothers)
    >>> residuals=[]
    >>> x = ml.solve(b, tol=1e-8, residuals=residuals)
    >>> #
    >>> # Set all levels to use three iterations of gauss_seidel's defaults
    >>> smoothers = ('gauss_seidel', {'iterations' : 3})
    >>> change_smoothers(ml, presmoother=smoothers, postsmoother=None)
    >>> residuals=[]
    >>> x = ml.solve(b, tol=1e-8, residuals=residuals)
    >>> #
    >>> # Set level 0 to use gauss_seidel's defaults, and all
    >>> # subsequent levels to use 5 iterations of cgnr
    >>> smoothers = ['gauss_seidel', ('cgnr', {'maxiter' : 5})]
    >>> change_smoothers(ml, presmoother=smoothers, postsmoother=smoothers)
    >>> residuals=[]
    >>> x = ml.solve(b, tol=1e-8, residuals=residuals)
    '''

    ml.symmetric_smoothing = True

    # interpret arguments into list
    if isinstance(presmoother, str) or isinstance(presmoother, tuple) or\
       (presmoother is None):
        presmoother = [presmoother]
    elif not isinstance(presmoother, list):
        raise ValueError('Unrecognized presmoother')

    if isinstance(postsmoother, str) or isinstance(postsmoother, tuple) or\
       (postsmoother is None):
        postsmoother = [postsmoother]
    elif not isinstance(postsmoother, list):
        raise ValueError('Unrecognized postsmoother')

    # set ml.levels[i].presmoother = presmoother[i],
    #     ml.levels[i].postsmoother = postsmoother[i]
    fn1 = None  # Predefine to keep scope beyond first loop
    fn2 = None
    kwargs1 = {}
    kwargs2 = {}
    min_len = min(len(presmoother), len(postsmoother), len(ml.levels[:-1]))
    same = (len(presmoother) == len(postsmoother))
    for i in range(0, min_len):
        # unpack presmoother[i]
        fn1, kwargs1 = unpack_arg(presmoother[i], cost=False)
        # get function handle
        try:
            setup_presmoother = eval('setup_' + str(fn1))
        except NameError:
            raise NameError("invalid presmoother method: ", fn1)
        ml.levels[i].presmoother = setup_presmoother(ml.levels[i], **kwargs1)

        # unpack postsmoother[i]
        fn2, kwargs2 = unpack_arg(postsmoother[i], cost=False)
        # get function handle
        try:
            setup_postsmoother = eval('setup_' + str(fn2))
        except NameError:
            raise NameError("invalid postsmoother method: ", fn2)
        ml.levels[i].postsmoother = setup_postsmoother(ml.levels[i], **kwargs2)

        # Save tuples in ml to check cycle complexity
        ml.levels[i].smoothers['presmoother'] = [fn1, kwargs1]
        ml.levels[i].smoothers['postsmoother'] = [fn2, kwargs2]

        # Check if symmetric smoothing scheme
        try:
            it1 = kwargs1['iterations']
        except:
            it1 = DEFAULT_NITER
        try:
            it2 = kwargs2['iterations']
        except:
            it2 = DEFAULT_NITER
        if (it1 != it2):
            ml.symmetric_smoothing = False
        elif (fn1 != fn2):
            if ((fn1 == 'CF_jacobi' and fn2 == 'FC_jacobi') or \
                (fn1 == 'CF_block_jacobi' and fn2 == 'FC_block_jacobi')):
                try:
                    fit1 = kwargs1['F_iterations']
                except:
                    fit1 = DEFAULT_NITER
                try:
                    fit2 = kwargs2['F_iterations']
                except:
                    fit2 = DEFAULT_NITER
                try:
                    cit1 = kwargs1['C_iterations']
                except:
                    cit1 = DEFAULT_NITER
                try:
                    cit2 = kwargs2['C_iterations']
                except:
                    cit2 = DEFAULT_NITER
                if ((fit1 == fit2) and (cit1 == cit2)):
                    pass
                else:
                    ml.symmetric_smoothing = False
            else:
                ml.symmetric_smoothing = False
        elif fn1 in KRYLOV_RELAXATION or fn2 in KRYLOV_RELAXATION:
            ml.symmetric_smoothing = False
        elif fn1 not in SYMMETRIC_RELAXATION:
            if ('CF' in fn1) or ('FC' in fn1):
                ml.symmetric_smoothing = False
            else:
                try:
                    sweep1 = kwargs1['sweep']
                except:
                    sweep1 = DEFAULT_SWEEP
                try:
                    sweep2 = kwargs2['sweep']
                except:
                    sweep2 = DEFAULT_SWEEP
                if  (sweep1 == 'forward' and sweep2 == 'backward') or \
                    (sweep1 == 'backward' and sweep2 == 'forward') or \
                    (sweep1 == 'symmetric' and sweep2 == 'symmetric'):
                    pass
                else:
                    ml.symmetric_smoothing = False

    if len(presmoother) < len(postsmoother):
        mid_len = min(len(postsmoother), len(ml.levels[:-1]))
        for i in range(min_len, mid_len):
            # Set up presmoother
            ml.levels[i].presmoother = setup_presmoother(
                ml.levels[i], **kwargs1)

            # unpack postsmoother[i]
            fn2, kwargs2 = unpack_arg(postsmoother[i], cost=False)
            # get function handle
            try:
                setup_postsmoother = eval('setup_' + str(fn2))
            except NameError:
                raise NameError("invalid postsmoother method: ", fn2)
            ml.levels[i].postsmoother = setup_postsmoother(
                ml.levels[i], **kwargs2)

            # Save tuples in ml to check cycle complexity
            ml.levels[i].smoothers['presmoother'] = [fn1, kwargs1]
            ml.levels[i].smoothers['postsmoother'] = [fn2, kwargs2]

            # Check if symmetric smoothing scheme
            try:
                it1 = kwargs1['iterations']
            except:
                it1 = DEFAULT_NITER
            try:
                it2 = kwargs2['iterations']
            except:
                it2 = DEFAULT_NITER
            if (it1 != it2):
                ml.symmetric_smoothing = False
            elif (fn1 != fn2):
                if ((fn1 == 'CF_jacobi' and fn2 == 'FC_jacobi') or \
                    (fn1 == 'CF_block_jacobi' and fn2 == 'FC_block_jacobi')):
                    try:
                        fit1 = kwargs1['F_iterations']
                    except:
                        fit1 = DEFAULT_NITER
                    try:
                        fit2 = kwargs2['F_iterations']
                    except:
                        fit2 = DEFAULT_NITER
                    try:
                        cit1 = kwargs1['C_iterations']
                    except:
                        cit1 = DEFAULT_NITER
                    try:
                        cit2 = kwargs2['C_iterations']
                    except:
                        cit2 = DEFAULT_NITER
                    if ((fit1 == fit2) and (cit1 == cit2)):
                        pass
                    else:
                        ml.symmetric_smoothing = False
                else:
                    ml.symmetric_smoothing = False
            elif fn1 in KRYLOV_RELAXATION or fn2 in KRYLOV_RELAXATION:
                ml.symmetric_smoothing = False
            elif fn1 not in SYMMETRIC_RELAXATION:
                if ('CF' in fn1) or ('FC' in fn1):
                    ml.symmetric_smoothing = False
                else:
                    try:
                        sweep1 = kwargs1['sweep']
                    except:
                        sweep1 = DEFAULT_SWEEP
                    try:
                        sweep2 = kwargs2['sweep']
                    except:
                        sweep2 = DEFAULT_SWEEP
                    if  (sweep1 == 'forward' and sweep2 == 'backward') or \
                        (sweep1 == 'backward' and sweep2 == 'forward') or \
                        (sweep1 == 'symmetric' and sweep2 == 'symmetric'):
                        pass
                    else:
                        ml.symmetric_smoothing = False

    elif len(presmoother) > len(postsmoother):
        mid_len = min(len(presmoother), len(ml.levels[:-1]))
        for i in range(min_len, mid_len):
            # unpack presmoother[i]
            fn1, kwargs1 = unpack_arg(presmoother[i], cost=False)
            # get function handle
            try:
                setup_presmoother = eval('setup_' + str(fn1))
            except NameError:
                raise NameError("invalid presmoother method: ", fn1)
            ml.levels[i].presmoother = setup_presmoother(
                ml.levels[i], **kwargs1)

            # Set up postsmoother
            ml.levels[i].postsmoother = setup_postsmoother(
                ml.levels[i], **kwargs2)

            # Save tuples in ml to check cycle complexity
            ml.levels[i].smoothers['presmoother'] = [fn1, kwargs1]
            ml.levels[i].smoothers['postsmoother'] = [fn2, kwargs2]

            # Check if symmetric smoothing scheme
            try:
                it1 = kwargs1['iterations']
            except:
                it1 = DEFAULT_NITER
            try:
                it2 = kwargs2['iterations']
            except:
                it2 = DEFAULT_NITER
            if (it1 != it2):
                ml.symmetric_smoothing = False
            elif (fn1 != fn2):
                if ((fn1 == 'CF_jacobi' and fn2 == 'FC_jacobi') or \
                    (fn1 == 'CF_block_jacobi' and fn2 == 'FC_block_jacobi')):
                    try:
                        fit1 = kwargs1['F_iterations']
                    except:
                        fit1 = DEFAULT_NITER
                    try:
                        fit2 = kwargs2['F_iterations']
                    except:
                        fit2 = DEFAULT_NITER
                    try:
                        cit1 = kwargs1['C_iterations']
                    except:
                        cit1 = DEFAULT_NITER
                    try:
                        cit2 = kwargs2['C_iterations']
                    except:
                        cit2 = DEFAULT_NITER
                    if ((fit1 == fit2) and (cit1 == cit2)):
                        pass
                    else:
                        ml.symmetric_smoothing = False
                else:
                    ml.symmetric_smoothing = False
            elif fn1 in KRYLOV_RELAXATION or fn2 in KRYLOV_RELAXATION:
                ml.symmetric_smoothing = False
            elif fn1 not in SYMMETRIC_RELAXATION:
                if ('CF' in fn1) or ('FC' in fn1):
                    ml.symmetric_smoothing = False
                else:
                    try:
                        sweep1 = kwargs1['sweep']
                    except:
                        sweep1 = DEFAULT_SWEEP
                    try:
                        sweep2 = kwargs2['sweep']
                    except:
                        sweep2 = DEFAULT_SWEEP
                    if  (sweep1 == 'forward' and sweep2 == 'backward') or \
                        (sweep1 == 'backward' and sweep2 == 'forward') or \
                        (sweep1 == 'symmetric' and sweep2 == 'symmetric'):
                        pass
                    else:
                        ml.symmetric_smoothing = False

    else:
        mid_len = min_len

    # Fill in remaining levels
    for i in range(mid_len, len(ml.levels[:-1])):
        ml.levels[i].presmoother = setup_presmoother(ml.levels[i], **kwargs1)
        ml.levels[i].postsmoother = setup_postsmoother(ml.levels[i], **kwargs2)
        ml.levels[i].smoothers['presmoother'] = [fn1, kwargs1]
        ml.levels[i].smoothers['postsmoother'] = [fn2, kwargs2]
Exemplo n.º 8
0
def initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon,
                        max_levels, max_coarse, aggregate, prepostsmoother,
                        smooth, strength, work, B=None):
    """
    Computes a complete aggregation and the first near-nullspace candidate
    following Algorithm 3 in Brezina et al.

    Parameters
    ----------
    candidate_iters
        number of test relaxation iterations
    epsilon
        minimum acceptable relaxation convergence factor

    References
    ----------
    .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
       "Adaptive Smoothed Aggregation ($\alpha$SA) Multigrid"
       SIAM Review Volume 47,  Issue 2  (2005)
       http://www.cs.umn.edu/~maclach/research/aSA2.pdf
    """

    # Define relaxation routine
    def relax(A, x):
        fn, kwargs = unpack_arg(prepostsmoother, cost=False)
        if fn == 'gauss_seidel':
            gauss_seidel(A, x, np.zeros_like(x),
                         iterations=candidate_iters, sweep='symmetric')
        elif fn == 'gauss_seidel_nr':
            gauss_seidel_nr(A, x, np.zeros_like(x),
                            iterations=candidate_iters, sweep='symmetric')
        elif fn == 'gauss_seidel_ne':
            gauss_seidel_ne(A, x, np.zeros_like(x),
                            iterations=candidate_iters, sweep='symmetric')
        elif fn == 'jacobi':
            jacobi(A, x, np.zeros_like(x), iterations=1,
                   omega=1.0 / rho_D_inv_A(A))
        elif fn == 'richardson':
            polynomial(A, x, np.zeros_like(x), iterations=1,
                       coefficients=[1.0/approximate_spectral_radius(A)])
        elif fn == 'gmres':
            x[:] = (gmres(A, np.zeros_like(x), x0=x,
                    maxiter=candidate_iters)[0]).reshape(x.shape)
        else:
            raise TypeError('Unrecognized smoother')

    # flag for skipping steps f-i in step 4
    skip_f_to_i = True

    # step 1
    A_l = A
    if B is None:
        x = sp.rand(A_l.shape[0], 1).astype(A_l.dtype)
        # The following type check matches the usual 'complex' type,
        # but also numpy data types such as 'complex64', 'complex128'
        # and 'complex256'.
        if A_l.dtype.name.startswith('complex'):
            x = x + 1.0j*sp.rand(A_l.shape[0], 1)
    else:
        x = np.array(B, dtype=A_l.dtype)

    # step 2
    relax(A_l, x)
    work[:] += A_l.nnz * candidate_iters*2

    # step 3
    # not advised to stop the iteration here: often the first relaxation pass
    # _is_ good, but the remaining passes are poor
    # if x_A_x/x_A_x_old < epsilon:
    #    # relaxation alone is sufficient
    #    print 'relaxation alone works: %g'%(x_A_x/x_A_x_old)
    #    return x, []

    # step 4
    As = [A]
    xs = [x]
    Ps = []
    AggOps = []
    StrengthOps = []

    while A.shape[0] > max_coarse and max_levels > 1:
        # The real check to break from the while loop is below

        # Begin constructing next level
        fn, kwargs = unpack_arg(strength[len(As)-1])  # step 4b
        if fn == 'symmetric':
            C_l = symmetric_strength_of_connection(A_l, **kwargs)
            # Diagonal must be nonzero
            C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr')
        elif fn == 'classical':
            C_l = classical_strength_of_connection(A_l, **kwargs)
            # Diagonal must be nonzero
            C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr')
            if isspmatrix_bsr(A_l):
                C_l = amalgamate(C_l, A_l.blocksize[0])
        elif (fn == 'ode') or (fn == 'evolution'):
            C_l = evolution_strength_of_connection(A_l,
                                                   np.ones(
                                                       (A_l.shape[0], 1),
                                                       dtype=A.dtype),
                                                   **kwargs)
        elif fn == 'predefined':
            C_l = kwargs['C'].tocsr()
        elif fn is None:
            C_l = A_l.tocsr()
        else:
            raise ValueError('unrecognized strength of connection method: %s' %
                             str(fn))

        # In SA, strength represents "distance", so we take magnitude of
        # complex values
        if C_l.dtype.name.startswith('complex'):
            C_l.data = np.abs(C_l.data)

        # Create a unified strength framework so that large values represent
        # strong connections and small values represent weak connections
        if (fn == 'ode') or (fn == 'evolution') or (fn == 'energy_based'):
            C_l.data = 1.0 / C_l.data

        # aggregation
        fn, kwargs = unpack_arg(aggregate[len(As) - 1])
        if fn == 'standard':
            AggOp = standard_aggregation(C_l, **kwargs)[0]
        elif fn == 'lloyd':
            AggOp = lloyd_aggregation(C_l, **kwargs)[0]
        elif fn == 'predefined':
            AggOp = kwargs['AggOp'].tocsr()
        else:
            raise ValueError('unrecognized aggregation method %s' % str(fn))

        T_l, x = fit_candidates(AggOp, x)  # step 4c

        fn, kwargs = unpack_arg(smooth[len(As)-1])  # step 4d
        if fn == 'jacobi':
            P_l = jacobi_prolongation_smoother(A_l, T_l, C_l, x, **kwargs)
        elif fn == 'richardson':
            P_l = richardson_prolongation_smoother(A_l, T_l, **kwargs)
        elif fn == 'energy':
            P_l = energy_prolongation_smoother(A_l, T_l, C_l, x, None,
                                               (False, {}), **kwargs)
        elif fn is None:
            P_l = T_l
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # R should reflect A's structure # step 4e
        if symmetry == 'symmetric':
            A_l = P_l.T.asformat(P_l.format) * A_l * P_l
        elif symmetry == 'hermitian':
            A_l = P_l.H.asformat(P_l.format) * A_l * P_l

        StrengthOps.append(C_l)
        AggOps.append(AggOp)
        Ps.append(P_l)
        As.append(A_l)

        # skip to step 5 as in step 4e
        if (A_l.shape[0] <= max_coarse) or (len(AggOps) + 1 >= max_levels):
            break

        if not skip_f_to_i:
            x_hat = x.copy()  # step 4g
            relax(A_l, x)  # step 4h
            work[:] += A_l.nnz*candidate_iters*2
            if pdef is True:
                x_A_x = np.dot(np.conjugate(x).T, A_l*x)
                xhat_A_xhat = np.dot(np.conjugate(x_hat).T, A_l*x_hat)
                err_ratio = (x_A_x/xhat_A_xhat)**(1.0/candidate_iters)
            else:
                # use A.H A inner-product
                Ax = A_l * x
                # Axhat = A_l * x_hat
                x_A_x = np.dot(np.conjugate(Ax).T, Ax)
                xhat_A_xhat = np.dot(np.conjugate(x_hat).T, A_l*x_hat)
                err_ratio = (x_A_x/xhat_A_xhat)**(1.0/candidate_iters)

            if err_ratio < epsilon:  # step 4i
                # print "sufficient convergence, skipping"
                skip_f_to_i = True
                if x_A_x == 0:
                    x = x_hat  # need to restore x
        else:
            # just carry out relaxation, don't check for convergence
            relax(A_l, x)  # step 4h
            work[:] += 2 * A_l.nnz * candidate_iters

        # store xs for diagnostic use and for use in step 5
        xs.append(x)

    # step 5
    # Extend coarse-level candidate to the finest level
    # --> note that we start with the x from the second coarsest level
    x = xs[-1]
    # make sure that xs[-1] has been relaxed by step 4h, i.e. relax(As[-2], x)
    for lev in range(len(Ps)-2, -1, -1):  # lev = coarsest ... finest-1
        P = Ps[lev]                     # I: lev --> lev+1
        A = As[lev]                     # A on lev+1
        x = P * x
        relax(A, x)
        work[:] += A.nnz*candidate_iters*2

    # Set predefined strength of connection and aggregation
    if len(AggOps) > 1:
        aggregate = [('predefined', {'AggOp': AggOps[i]})
                     for i in range(len(AggOps))]
        strength = [('predefined', {'C': StrengthOps[i]})
                    for i in range(len(StrengthOps))]

    return x, aggregate, strength  # first candidate
Exemplo n.º 9
0
def coarse_grid_solver(solver):
    """Return a coarse grid solver suitable for multilevel_solver

    Parameters
    ----------
    solver : {string, callable, tuple}
        The solver method is either (1) a string such as 'splu' or 'pinv' of a
        callable object which receives only parameters (A, b) and returns an
        (approximate or exact) solution to the linear system Ax = b, or (2) a
        callable object that takes parameters (A,b) and returns an (approximate
        or exact) solution to Ax = b, or (3) a tuple of the form
        (string|callable, args), where args is a dictionary of arguments to
        be passed to the function denoted by string or callable.

        The set of valid string arguments is:
            - Sparse direct methods:
                + splu : sparse LU solver
            - Sparse iterative methods:
                + the name of any method in scipy.sparse.linalg.isolve or
                  pyamg.krylov (e.g. 'cg').
                  Methods in pyamg.krylov take precedence.
                + relaxation method, such as 'gauss_seidel' or 'jacobi',
                  present in pyamg.relaxation
            - Dense methods:
                + pinv     : pseudoinverse (QR)
                + pinv2    : pseudoinverse (SVD)
                + lu       : LU factorization
                + cholesky : Cholesky factorization

    Returns
    -------
    ptr : generic_solver
        A class for use as a standalone or coarse grids solver

    Examples
    --------
    >>> import numpy as np
    >>> from scipy.sparse import spdiags
    >>> from pyamg.gallery import poisson
    >>> from pyamg import coarse_grid_solver
    >>> A = poisson((10, 10), format='csr')
    >>> b = A * np.ones(A.shape[0])
    >>> cgs = coarse_grid_solver('lu')
    >>> x = cgs(A, b)
    """

    solver, kwargs = unpack_arg(solver, cost=False)

    if solver in ['pinv', 'pinv2']:
        def solve(self, A, b):
            if not hasattr(self, 'P'):
                self.P = getattr(sp.linalg, solver)(A.todense(), **kwargs)
            return np.dot(self.P, b)

    elif solver == 'lu':
        def solve(self, A, b):
            if not hasattr(self, 'LU'):
                self.LU = sp.linalg.lu_factor(A.todense(), **kwargs)
            return sp.linalg.lu_solve(self.LU, b)

    elif solver == 'cholesky':
        def solve(self, A, b):
            if not hasattr(self, 'L'):
                self.L = sp.linalg.cho_factor(A.todense(), **kwargs)
            return sp.linalg.cho_solve(self.L, b)

    elif solver == 'splu':
        def solve(self, A, b):
            if not hasattr(self, 'LU'):
                # for multiple candidates in B, A will often have a couple zero
                # rows/columns that must be removed
                Acsc = A.tocsc()
                Acsc.eliminate_zeros()
                diffptr = Acsc.indptr[:-1] - Acsc.indptr[1:]
                nonzero_cols = (diffptr != 0).nonzero()[0]
                Map = sp.sparse.eye(Acsc.shape[0], Acsc.shape[1], format='csc')
                Map = Map[:, nonzero_cols]
                Acsc = Map.T.tocsc() * Acsc * Map
                self.LU = sp.sparse.linalg.splu(Acsc, **kwargs)
                self.LU_Map = Map

            return self.LU_Map * self.LU.solve(np.ravel(self.LU_Map.T * b))

    elif solver in ['bicg', 'bicgstab', 'cg', 'cgs', 'gmres', 'qmr', 'minres']:
        from pyamg import krylov
        if hasattr(krylov, solver):
            fn = getattr(krylov, solver)
        else:
            fn = getattr(sp.sparse.linalg.isolve, solver)

        def solve(self, A, b):
            if 'tol' not in kwargs:
                eps = np.finfo(np.float).eps
                feps = np.finfo(np.single).eps
                geps = np.finfo(np.longfloat).eps
                _array_precision = {'f': 0, 'd': 1, 'g': 2,
                                    'F': 0, 'D': 1, 'G': 2}
                kwargs['tol'] = {0: feps * 1e3, 1: eps * 1e6,
                                 2: geps * 1e6}[_array_precision[A.dtype.char]]

            return fn(A, b, **kwargs)[0]

    elif solver in ['gauss_seidel', 'jacobi', 'block_gauss_seidel', 'schwarz',
                    'block_jacobi', 'richardson', 'sor', 'chebyshev',
                    'jacobi_ne', 'gauss_seidel_ne', 'gauss_seidel_nr']:

        if 'iterations' not in kwargs:
            kwargs['iterations'] = 10

        def solve(self, A, b):
            from pyamg.relaxation import smoothing
            from pyamg import multilevel_solver

            lvl = multilevel_solver.level()
            lvl.A = A
            fn = getattr(smoothing, 'setup_' + str(solver))
            relax = fn(lvl, **kwargs)
            x = np.zeros_like(b)
            relax(A, x, b)

            return x

    elif solver is None:
        # No coarse grid solve
        def solve(self, A, b):
            return 0 * b  # should this return b instead?

    elif callable(solver):
        def solve(self, A, b):
            return solver(A, b, **kwargs)

    else:
        raise ValueError('unknown solver: %s' % solver)


    class generic_solver:
        def __call__(self, A, b):
            # make sure x is same dimensions and type as b
            b = np.asanyarray(b)

            if A.nnz == 0:
                # if A.nnz = 0, then we expect no correction
                x = np.zeros(b.shape)
            else:
                x = solve(self, A, b)

            if isinstance(b, np.ndarray):
                x = np.asarray(x)
            elif isinstance(b, np.matrix):
                x = np.asmatrix(x)
            else:
                raise ValueError('unrecognized type')

            return x.reshape(b.shape)

        def __repr__(self):
            return 'coarse_grid_solver(' + repr(solver) + ')'

        def name(self):
            return repr(solver)

    return generic_solver()
Exemplo n.º 10
0
def extend_hierarchy(levels, strength, CF, interpolation, restriction, keep):
    """ helper function for local methods """

    A = levels[-1].A

    # Compute the strength-of-connection matrix C, where larger
    # C[i,j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength)
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        C = evolution_strength_of_connection(A, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))

    levels[-1].complexity['strength'] = kwargs['cost'][0]

    # Generate the C/F splitting
    fn, kwargs = unpack_arg(CF)
    if fn == 'RS':
        splitting = split.RS(C, **kwargs)
    elif fn == 'PMIS':
        splitting = split.PMIS(C, **kwargs)
    elif fn == 'PMISc':
        splitting = split.PMISc(C, **kwargs)
    elif fn == 'CLJP':
        splitting = split.CLJP(C, **kwargs)
    elif fn == 'CLJPc':
        splitting = split.CLJPc(C, **kwargs)
    elif fn == 'CR':
        splitting = CR(C, **kwargs)
    else:
        raise ValueError('unknown C/F splitting method (%s)' % CF)

    levels[-1].complexity['CF'] = kwargs['cost'][0]

    # Generate the interpolation matrix that maps from the coarse-grid to the
    # fine-grid
    fn, kwargs = unpack_arg(interpolation)
    if fn == 'standard':
        P = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'injection':
        P = injection_interpolation(A, splitting, **kwargs)
    else:
        raise ValueError('unknown interpolation method (%s)' % interpolation)
    levels[-1].complexity['interpolate'] = kwargs['cost'][0]

    # Generate the restriction matrix that maps from the fine-grid to the
    # coarse-grid. Must make sure transpose matrices remain in CSR or BSR
    fn, kwargs = unpack_arg(restriction)
    if isspmatrix_csr(A):
        if restriction == 'galerkin':
            R = P.T.tocsr()
        elif fn == 'standard':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'distance_two':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'direct':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'one_point':         # Don't need A^T here
            temp_C = C.T.tocsr()
            R = one_point_interpolation(A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'injection':         # Don't need A^T or C^T here
            R = injection_interpolation(A, splitting, **kwargs)
            R = R.T.tocsr()
        else:
            raise ValueError('unknown interpolation method (%s)' % interpolation)
    else: 
        if restriction == 'galerkin':
            R = P.T.tobsr()
        elif fn == 'standard':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'distance_two':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'direct':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'one_point':         # Don't need A^T here
            temp_C = C.T.tocsr()
            R = one_point_interpolation(A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'injection':         # Don't need A^T or C^T here
            R = injection_interpolation(A, splitting, **kwargs)
            R = R.T.tobsr()
        else:
            raise ValueError('unknown interpolation method (%s)' % interpolation)
    
    levels[-1].complexity['restriction'] = kwargs['cost'][0]

    # Store relevant information for this level
    if keep:
        levels[-1].C = C                  # strength of connection matrix

    levels[-1].P = P                  # prolongation operator
    levels[-1].R = R                  # restriction operator
    levels[-1].splitting = splitting  # C/F splitting

    # Form coarse grid operator, get complexity
    levels[-1].complexity['RAP'] = mat_mat_complexity(R,A) / float(A.nnz)
    RA = R * A
    levels[-1].complexity['RAP'] += mat_mat_complexity(RA,P) / float(A.nnz)
    A = RA * P      # Galerkin operator, Ac = RAP

    # Make sure coarse-grid operator is in correct sparse format
    if (isspmatrix_csr(P) and (not isspmatrix_csr(A))):
        A = A.tocsr()
    elif (isspmatrix_bsr(P) and (not isspmatrix_bsr(A))):
        A = A.tobsr()

    # Form next level through Galerkin product
    levels.append(multilevel_solver.level())
    levels[-1].A = A
Exemplo n.º 11
0
def change_smoothers(ml, presmoother, postsmoother):
    '''
    Initialize pre- and post- smoothers throughout a multilevel_solver, with
    the option of having different smoothers at different levels

    For each level of the multilevel_solver 'ml' (except the coarsest level),
    initialize the .presmoother() and .postsmoother() methods used in the
    multigrid cycle.

    Parameters
    ----------
    ml : {pyamg multilevel hierarchy}
        Data structure that stores the multigrid hierarchy.
    presmoother : {None, string, tuple, list}
        presmoother can be (1) the name of a supported smoother, e.g.
        "gauss_seidel", (2) a tuple of the form ('method','opts') where
        'method' is the name of a supported smoother and 'opts' a dict of
        keyword arguments to the smoother, or (3) a list of instances of
        options 1 or 2.  See the Examples section for illustrations of the
        format.

        If presmoother is a list, presmoother[i] determines the smoothing
        strategy for level i.  Else, presmoother defines the same strategy
        for all levels.

        If len(presmoother) < len(ml.levels), then
        presmoother[-1] is used for all remaining levels

        If len(presmoother) > len(ml.levels), then
        the remaining smoothing strategies are ignored

    postsmoother : {string, tuple, list}
        Defines postsmoother in identical fashion to presmoother

    Returns
    -------
    ml changed in place
    ml.level[i].smoothers['presmoother']   <===  presmoother[i]
    ml.level[i].smoothers['postsmoother']  <===  postsmoother[i]
    ml.symmetric_smoothing is marked True/False depending on whether
        the smoothing scheme is symmetric. 

    Notes
    -----
    - Parameter 'omega' of the Jacobi, Richardson, and jacobi_ne
      methods is scaled by the spectral radius of the matrix on
      each level.  Therefore 'omega' should be in the interval (0,2).
    - Parameter 'withrho' (default: True) controls whether the omega is
      rescaled by the spectral radius in jacobi, block_jacobi, and jacobi_ne
    - By initializing the smoothers after the hierarchy has been setup, allows
      for "algebraically" directed relaxation, such as strength_based_schwarz,
      which uses only the strong connections of a degree-of-freedom to define
      overlapping regions
    - Available smoother methods::

        gauss_seidel
        block_gauss_seidel
        jacobi
        block_jacobi
        richardson
        sor
        chebyshev
        gauss_seidel_nr
        gauss_seidel_ne
        jacobi_ne
        cg
        gmres
        cgne
        cgnr
        schwarz
        strength_based_schwarz
        None

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.aggregation import smoothed_aggregation_solver
    >>> from pyamg.relaxation.smoothing import change_smoothers
    >>> from pyamg.util.linalg import norm
    >>> from scipy import rand, array, mean
    >>> A = poisson((10,10), format='csr')
    >>> b = rand(A.shape[0],)
    >>> ml = smoothed_aggregation_solver(A, max_coarse=10)
    >>> #
    >>> # Set all levels to use gauss_seidel's defaults
    >>> smoothers = 'gauss_seidel'
    >>> change_smoothers(ml, presmoother=smoothers, postsmoother=smoothers)
    >>> residuals=[]
    >>> x = ml.solve(b, tol=1e-8, residuals=residuals)
    >>> #
    >>> # Set all levels to use three iterations of gauss_seidel's defaults
    >>> smoothers = ('gauss_seidel', {'iterations' : 3})
    >>> change_smoothers(ml, presmoother=smoothers, postsmoother=None)
    >>> residuals=[]
    >>> x = ml.solve(b, tol=1e-8, residuals=residuals)
    >>> #
    >>> # Set level 0 to use gauss_seidel's defaults, and all
    >>> # subsequent levels to use 5 iterations of cgnr
    >>> smoothers = ['gauss_seidel', ('cgnr', {'maxiter' : 5})]
    >>> change_smoothers(ml, presmoother=smoothers, postsmoother=smoothers)
    >>> residuals=[]
    >>> x = ml.solve(b, tol=1e-8, residuals=residuals)
    '''

    ml.symmetric_smoothing = True

    # interpret arguments into list
    if isinstance(presmoother, str) or isinstance(presmoother, tuple) or\
       (presmoother is None):
        presmoother = [presmoother]
    elif not isinstance(presmoother, list):
        raise ValueError('Unrecognized presmoother')

    if isinstance(postsmoother, str) or isinstance(postsmoother, tuple) or\
       (postsmoother is None):
        postsmoother = [postsmoother]
    elif not isinstance(postsmoother, list):
        raise ValueError('Unrecognized postsmoother')

    # set ml.levels[i].presmoother = presmoother[i],
    #     ml.levels[i].postsmoother = postsmoother[i]
    fn1 = None      # Predefine to keep scope beyond first loop
    fn2 = None
    kwargs1 = {}
    kwargs2 = {}
    min_len = min(len(presmoother), len(postsmoother), len(ml.levels[:-1]))
    same = (len(presmoother) == len(postsmoother))
    for i in range(0, min_len):
        # unpack presmoother[i]
        fn1, kwargs1 = unpack_arg(presmoother[i], cost=False)
        # get function handle
        try:
            setup_presmoother = eval('setup_' + str(fn1))
        except NameError:
            raise NameError("invalid presmoother method: ", fn1)
        ml.levels[i].presmoother = setup_presmoother(ml.levels[i], **kwargs1)

        # unpack postsmoother[i]
        fn2, kwargs2 = unpack_arg(postsmoother[i], cost=False)
        # get function handle
        try:
            setup_postsmoother = eval('setup_' + str(fn2))
        except NameError:
            raise NameError("invalid postsmoother method: ", fn2)
        ml.levels[i].postsmoother = setup_postsmoother(ml.levels[i], **kwargs2)

        # Save tuples in ml to check cycle complexity
        ml.levels[i].smoothers['presmoother'] = [fn1, kwargs1]
        ml.levels[i].smoothers['postsmoother'] = [fn2, kwargs2]

        # Check if symmetric smoothing scheme
        try:
            it1 = kwargs1['iterations']
        except:
            it1 = DEFAULT_NITER
        try:
            it2 = kwargs2['iterations']
        except:
            it2 = DEFAULT_NITER
        if (it1 != it2):
            ml.symmetric_smoothing = False
        elif (fn1 != fn2):
            if ((fn1 == 'CF_jacobi' and fn2 == 'FC_jacobi') or \
                (fn1 == 'CF_block_jacobi' and fn2 == 'FC_block_jacobi')):
                try:
                    fit1 = kwargs1['F_iterations']
                except:
                    fit1 = DEFAULT_NITER
                try:
                    fit2 = kwargs2['F_iterations']
                except:
                    fit2 = DEFAULT_NITER
                try:
                    cit1 = kwargs1['C_iterations']
                except:
                    cit1 = DEFAULT_NITER
                try:
                    cit2 = kwargs2['C_iterations']
                except:
                    cit2 = DEFAULT_NITER
                if ((fit1 == fit2) and (cit1 == cit2)):
                    pass
                else:
                    ml.symmetric_smoothing = False
            else:
                ml.symmetric_smoothing = False
        elif fn1 in KRYLOV_RELAXATION or fn2 in KRYLOV_RELAXATION:
            ml.symmetric_smoothing = False
        elif fn1 not in SYMMETRIC_RELAXATION:
            if ('CF' in fn1) or ('FC' in fn1):
                ml.symmetric_smoothing = False
            else:
                try:
                    sweep1 = kwargs1['sweep']
                except:
                    sweep1 = DEFAULT_SWEEP
                try:
                    sweep2 = kwargs2['sweep']
                except:
                    sweep2 = DEFAULT_SWEEP
                if  (sweep1 == 'forward' and sweep2 == 'backward') or \
                    (sweep1 == 'backward' and sweep2 == 'forward') or \
                    (sweep1 == 'symmetric' and sweep2 == 'symmetric'):
                    pass
                else:
                    ml.symmetric_smoothing = False

    if len(presmoother) < len(postsmoother):
        mid_len = min(len(postsmoother), len(ml.levels[:-1]))
        for i in range(min_len, mid_len):
            # Set up presmoother
            ml.levels[i].presmoother = setup_presmoother(ml.levels[i], **kwargs1)
            
            # unpack postsmoother[i]
            fn2, kwargs2 = unpack_arg(postsmoother[i], cost=False)
            # get function handle
            try:
                setup_postsmoother = eval('setup_' + str(fn2))
            except NameError:
                raise NameError("invalid postsmoother method: ", fn2)
            ml.levels[i].postsmoother = setup_postsmoother(ml.levels[i], **kwargs2)

            # Save tuples in ml to check cycle complexity
            ml.levels[i].smoothers['presmoother'] = [fn1, kwargs1]
            ml.levels[i].smoothers['postsmoother'] = [fn2, kwargs2]

            # Check if symmetric smoothing scheme
            try:
                it1 = kwargs1['iterations']
            except:
                it1 = DEFAULT_NITER
            try:
                it2 = kwargs2['iterations']
            except:
                it2 = DEFAULT_NITER
            if (it1 != it2):
                ml.symmetric_smoothing = False
            elif (fn1 != fn2):
                if ((fn1 == 'CF_jacobi' and fn2 == 'FC_jacobi') or \
                    (fn1 == 'CF_block_jacobi' and fn2 == 'FC_block_jacobi')):
                    try:
                        fit1 = kwargs1['F_iterations']
                    except:
                        fit1 = DEFAULT_NITER
                    try:
                        fit2 = kwargs2['F_iterations']
                    except:
                        fit2 = DEFAULT_NITER
                    try:
                        cit1 = kwargs1['C_iterations']
                    except:
                        cit1 = DEFAULT_NITER
                    try:
                        cit2 = kwargs2['C_iterations']
                    except:
                        cit2 = DEFAULT_NITER
                    if ((fit1 == fit2) and (cit1 == cit2)):
                        pass
                    else:
                        ml.symmetric_smoothing = False
                else:
                    ml.symmetric_smoothing = False
            elif fn1 in KRYLOV_RELAXATION or fn2 in KRYLOV_RELAXATION:
                ml.symmetric_smoothing = False
            elif fn1 not in SYMMETRIC_RELAXATION:
                if ('CF' in fn1) or ('FC' in fn1):
                    ml.symmetric_smoothing = False
                else:
                    try:
                        sweep1 = kwargs1['sweep']
                    except:
                        sweep1 = DEFAULT_SWEEP
                    try:
                        sweep2 = kwargs2['sweep']
                    except:
                        sweep2 = DEFAULT_SWEEP
                    if  (sweep1 == 'forward' and sweep2 == 'backward') or \
                        (sweep1 == 'backward' and sweep2 == 'forward') or \
                        (sweep1 == 'symmetric' and sweep2 == 'symmetric'):
                        pass
                    else:
                        ml.symmetric_smoothing = False

    elif len(presmoother) > len(postsmoother):
        mid_len = min(len(presmoother), len(ml.levels[:-1]))
        for i in range(min_len, mid_len):
            # unpack presmoother[i]
            fn1, kwargs1 = unpack_arg(presmoother[i], cost=False)
            # get function handle
            try:
                setup_presmoother = eval('setup_' + str(fn1))
            except NameError:
                raise NameError("invalid presmoother method: ", fn1)
            ml.levels[i].presmoother = setup_presmoother(ml.levels[i], **kwargs1)

            # Set up postsmoother
            ml.levels[i].postsmoother = setup_postsmoother(ml.levels[i], **kwargs2)

            # Save tuples in ml to check cycle complexity
            ml.levels[i].smoothers['presmoother'] = [fn1, kwargs1]
            ml.levels[i].smoothers['postsmoother'] = [fn2, kwargs2]

            # Check if symmetric smoothing scheme
            try:
                it1 = kwargs1['iterations']
            except:
                it1 = DEFAULT_NITER
            try:
                it2 = kwargs2['iterations']
            except:
                it2 = DEFAULT_NITER
            if (it1 != it2):
                ml.symmetric_smoothing = False
            elif (fn1 != fn2):
                if ((fn1 == 'CF_jacobi' and fn2 == 'FC_jacobi') or \
                    (fn1 == 'CF_block_jacobi' and fn2 == 'FC_block_jacobi')):
                    try:
                        fit1 = kwargs1['F_iterations']
                    except:
                        fit1 = DEFAULT_NITER
                    try:
                        fit2 = kwargs2['F_iterations']
                    except:
                        fit2 = DEFAULT_NITER
                    try:
                        cit1 = kwargs1['C_iterations']
                    except:
                        cit1 = DEFAULT_NITER
                    try:
                        cit2 = kwargs2['C_iterations']
                    except:
                        cit2 = DEFAULT_NITER
                    if ((fit1 == fit2) and (cit1 == cit2)):
                        pass
                    else:
                        ml.symmetric_smoothing = False
                else:
                    ml.symmetric_smoothing = False
            elif fn1 in KRYLOV_RELAXATION or fn2 in KRYLOV_RELAXATION:
                ml.symmetric_smoothing = False
            elif fn1 not in SYMMETRIC_RELAXATION:
                if ('CF' in fn1) or ('FC' in fn1):
                    ml.symmetric_smoothing = False
                else:
                    try:
                        sweep1 = kwargs1['sweep']
                    except:
                        sweep1 = DEFAULT_SWEEP
                    try:
                        sweep2 = kwargs2['sweep']
                    except:
                        sweep2 = DEFAULT_SWEEP
                    if  (sweep1 == 'forward' and sweep2 == 'backward') or \
                        (sweep1 == 'backward' and sweep2 == 'forward') or \
                        (sweep1 == 'symmetric' and sweep2 == 'symmetric'):
                        pass
                    else:
                        ml.symmetric_smoothing = False

    else:  
        mid_len = min_len

    # Fill in remaining levels
    for i in range(mid_len, len(ml.levels[:-1])):
        ml.levels[i].presmoother = setup_presmoother(ml.levels[i], **kwargs1)
        ml.levels[i].postsmoother = setup_postsmoother(ml.levels[i], **kwargs2)
        ml.levels[i].smoothers['presmoother'] = [fn1, kwargs1]
        ml.levels[i].smoothers['postsmoother'] = [fn2, kwargs2]
Exemplo n.º 12
0
def extend_hierarchy(levels, strength, CF, interpolation, restriction, keep):
    """ helper function for local methods """

    A = levels[-1].A

    # Compute the strength-of-connection matrix C, where larger
    # C[i,j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength)
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        C = evolution_strength_of_connection(A, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))

    levels[-1].complexity['strength'] = kwargs['cost'][0]

    # Generate the C/F splitting
    fn, kwargs = unpack_arg(CF)
    if fn == 'RS':
        splitting = split.RS(C, **kwargs)
    elif fn == 'PMIS':
        splitting = split.PMIS(C, **kwargs)
    elif fn == 'PMISc':
        splitting = split.PMISc(C, **kwargs)
    elif fn == 'CLJP':
        splitting = split.CLJP(C, **kwargs)
    elif fn == 'CLJPc':
        splitting = split.CLJPc(C, **kwargs)
    elif fn == 'CR':
        splitting = CR(C, **kwargs)
    else:
        raise ValueError('unknown C/F splitting method (%s)' % CF)

    levels[-1].complexity['CF'] = kwargs['cost'][0]

    # Generate the interpolation matrix that maps from the coarse-grid to the
    # fine-grid
    fn, kwargs = unpack_arg(interpolation)
    if fn == 'standard':
        P = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'injection':
        P = injection_interpolation(A, splitting, **kwargs)
    else:
        raise ValueError('unknown interpolation method (%s)' % interpolation)
    levels[-1].complexity['interpolate'] = kwargs['cost'][0]

    # Generate the restriction matrix that maps from the fine-grid to the
    # coarse-grid. Must make sure transpose matrices remain in CSR or BSR
    fn, kwargs = unpack_arg(restriction)
    if isspmatrix_csr(A):
        if restriction == 'galerkin':
            R = P.T.tocsr()
        elif fn == 'standard':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'distance_two':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'direct':
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'one_point':  # Don't need A^T here
            temp_C = C.T.tocsr()
            R = one_point_interpolation(A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
        elif fn == 'injection':  # Don't need A^T or C^T here
            R = injection_interpolation(A, splitting, **kwargs)
            R = R.T.tocsr()
        else:
            raise ValueError('unknown interpolation method (%s)' %
                             interpolation)
    else:
        if restriction == 'galerkin':
            R = P.T.tobsr()
        elif fn == 'standard':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'distance_two':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'direct':
            temp_A = A.T.tobsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'one_point':  # Don't need A^T here
            temp_C = C.T.tocsr()
            R = one_point_interpolation(A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        elif fn == 'injection':  # Don't need A^T or C^T here
            R = injection_interpolation(A, splitting, **kwargs)
            R = R.T.tobsr()
        else:
            raise ValueError('unknown interpolation method (%s)' %
                             interpolation)

    levels[-1].complexity['restriction'] = kwargs['cost'][0]

    # Store relevant information for this level
    if keep:
        levels[-1].C = C  # strength of connection matrix

    levels[-1].P = P  # prolongation operator
    levels[-1].R = R  # restriction operator
    levels[-1].splitting = splitting  # C/F splitting

    # Form coarse grid operator, get complexity
    levels[-1].complexity['RAP'] = mat_mat_complexity(R, A) / float(A.nnz)
    RA = R * A
    levels[-1].complexity['RAP'] += mat_mat_complexity(RA, P) / float(A.nnz)
    A = RA * P  # Galerkin operator, Ac = RAP

    # Make sure coarse-grid operator is in correct sparse format
    if (isspmatrix_csr(P) and (not isspmatrix_csr(A))):
        A = A.tocsr()
    elif (isspmatrix_bsr(P) and (not isspmatrix_bsr(A))):
        A = A.tobsr()

    # Form next level through Galerkin product
    levels.append(multilevel_solver.level())
    levels[-1].A = A
Exemplo n.º 13
0
def general_setup_stage(ml, symmetry, candidate_iters, prepostsmoother,
                        smooth, eliminate_local, coarse_solver, work):
    """
    Computes additional candidates and improvements
    following Algorithm 4 in Brezina et al.

    Parameters
    ----------
    candidate_iters
        number of test relaxation iterations
    epsilon
        minimum acceptable relaxation convergence factor

    References
    ----------
    .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
       "Adaptive Smoothed Aggregation (alphaSA) Multigrid"
       SIAM Review Volume 47,  Issue 2  (2005)
       http://www.cs.umn.edu/~maclach/research/aSA2.pdf
    """

    def make_bridge(T):
        M, N = T.shape
        K = T.blocksize[0]
        bnnz = T.indptr[-1]
        # the K+1 represents the new dof introduced by the new candidate.  the
        # bridge 'T' ignores this new dof and just maps zeros there
        data = np.zeros((bnnz, K+1, K), dtype=T.dtype)
        data[:, :-1, :] = T.data
        return bsr_matrix((data, T.indices, T.indptr),
                          shape=((K + 1) * int(M / K), N))

    def expand_candidates(B_old, nodesize):
        # insert a new dof that is always zero, to create NullDim+1 dofs per
        # node in B
        NullDim = B_old.shape[1]
        nnodes = int(B_old.shape[0] / nodesize)
        Bnew = np.zeros((nnodes, nodesize+1, NullDim), dtype=B_old.dtype)
        Bnew[:, :-1, :] = B_old.reshape(nnodes, nodesize, NullDim)
        return Bnew.reshape(-1, NullDim)

    levels = ml.levels

    x = sp.rand(levels[0].A.shape[0], 1)
    if levels[0].A.dtype.name.startswith('complex'):
        x = x + 1.0j*sp.rand(levels[0].A.shape[0], 1)
    b = np.zeros_like(x)

    x = ml.solve(b, x0=x, tol=float(np.finfo(np.float).tiny),
                 maxiter=candidate_iters)
    work[:] += ml.operator_complexity()*ml.levels[0].A.nnz*candidate_iters*2

    T0 = levels[0].T.copy()

    # TEST FOR CONVERGENCE HERE

    for i in range(len(ml.levels) - 2):
        # alpha-SA paper does local elimination here, but after talking
        # to Marian, its not clear that this helps things
        # fn, kwargs = unpack_arg(eliminate_local)
        # if fn == True:
        #    eliminate_local_candidates(x,levels[i].AggOp,levels[i].A,
        #    levels[i].T, **kwargs)

        # add candidate to B
        B = np.hstack((levels[i].B, x.reshape(-1, 1)))

        # construct Ptent
        T, R = fit_candidates(levels[i].AggOp, B)

        levels[i].T = T
        x = R[:, -1].reshape(-1, 1)

        # smooth P
        fn, kwargs = unpack_arg(smooth[i])
        if fn == 'jacobi':
            levels[i].P = jacobi_prolongation_smoother(levels[i].A, T,
                                                       levels[i].C, R,
                                                       **kwargs)
        elif fn == 'richardson':
            levels[i].P = richardson_prolongation_smoother(levels[i].A, T,
                                                           **kwargs)
        elif fn == 'energy':
            levels[i].P = energy_prolongation_smoother(levels[i].A, T,
                                                       levels[i].C, R, None,
                                                       (False, {}), **kwargs)
            x = R[:, -1].reshape(-1, 1)
        elif fn is None:
            levels[i].P = T
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # construct R
        if symmetry == 'symmetric':  # R should reflect A's structure
            levels[i].R = levels[i].P.T.asformat(levels[i].P.format)
        elif symmetry == 'hermitian':
            levels[i].R = levels[i].P.H.asformat(levels[i].P.format)

        # construct coarse A
        levels[i+1].A = levels[i].R * levels[i].A * levels[i].P

        # construct bridging P
        T_bridge = make_bridge(levels[i+1].T)
        R_bridge = levels[i+2].B

        # smooth bridging P
        fn, kwargs = unpack_arg(smooth[i+1])
        if fn == 'jacobi':
            levels[i+1].P = jacobi_prolongation_smoother(levels[i+1].A,
                                                         T_bridge,
                                                         levels[i+1].C,
                                                         R_bridge, **kwargs)
        elif fn == 'richardson':
            levels[i+1].P = richardson_prolongation_smoother(levels[i+1].A,
                                                             T_bridge,
                                                             **kwargs)
        elif fn == 'energy':
            levels[i+1].P = energy_prolongation_smoother(levels[i+1].A,
                                                         T_bridge,
                                                         levels[i+1].C,
                                                         R_bridge, None,
                                                         (False, {}), **kwargs)
        elif fn is None:
            levels[i+1].P = T_bridge
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # construct the "bridging" R
        if symmetry == 'symmetric':  # R should reflect A's structure
            levels[i+1].R = levels[i+1].P.T.asformat(levels[i+1].P.format)
        elif symmetry == 'hermitian':
            levels[i+1].R = levels[i+1].P.H.asformat(levels[i+1].P.format)

        # run solver on candidate
        solver = multilevel_solver(levels[i+1:], coarse_solver=coarse_solver)
        change_smoothers(solver, presmoother=prepostsmoother,
                         postsmoother=prepostsmoother)
        x = solver.solve(np.zeros_like(x), x0=x,
                         tol=float(np.finfo(np.float).tiny),
                         maxiter=candidate_iters)
        work[:] += 2 * solver.operator_complexity() * solver.levels[0].A.nnz *\
            candidate_iters*2

        # update values on next level
        levels[i+1].B = R[:, :-1].copy()
        levels[i+1].T = T_bridge

    # note that we only use the x from the second coarsest level
    fn, kwargs = unpack_arg(prepostsmoother, cost=False)
    for lvl in reversed(levels[:-2]):
        x = lvl.P * x
        work[:] += lvl.A.nnz*candidate_iters*2

        if fn == 'gauss_seidel':
            # only relax at nonzeros, so as not to mess up any locally dropped
            # candidates
            indices = np.ravel(x).nonzero()[0]
            gauss_seidel_indexed(lvl.A, x, np.zeros_like(x), indices,
                                 iterations=candidate_iters, sweep='symmetric')

        elif fn == 'gauss_seidel_ne':
            gauss_seidel_ne(lvl.A, x, np.zeros_like(x),
                            iterations=candidate_iters, sweep='symmetric')

        elif fn == 'gauss_seidel_nr':
            gauss_seidel_nr(lvl.A, x, np.zeros_like(x),
                            iterations=candidate_iters, sweep='symmetric')

        elif fn == 'jacobi':
            jacobi(lvl.A, x, np.zeros_like(x), iterations=1,
                   omega=1.0 / rho_D_inv_A(lvl.A))

        elif fn == 'richardson':
            polynomial(lvl.A, x, np.zeros_like(x), iterations=1,
                       coefficients=[1.0/approximate_spectral_radius(lvl.A)])

        elif fn == 'gmres':
            x[:] = (gmres(lvl.A, np.zeros_like(x), x0=x,
                          maxiter=candidate_iters)[0]).reshape(x.shape)
        else:
            raise TypeError('Unrecognized smoother')

    # x will be dense again, so we have to drop locally again
    elim, elim_kwargs = unpack_arg(eliminate_local)
    if elim is True:
        x = x/norm(x, 'inf')
        eliminate_local_candidates(x, levels[0].AggOp, levels[0].A, T0,
                                   **elim_kwargs)

    return x.reshape(-1, 1)
Exemplo n.º 14
0
def general_setup_stage(ml, symmetry, candidate_iters, prepostsmoother, smooth,
                        eliminate_local, coarse_solver, work):
    """
    Computes additional candidates and improvements
    following Algorithm 4 in Brezina et al.

    Parameters
    ----------
    candidate_iters
        number of test relaxation iterations
    epsilon
        minimum acceptable relaxation convergence factor

    References
    ----------
    .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
       "Adaptive Smoothed Aggregation (alphaSA) Multigrid"
       SIAM Review Volume 47,  Issue 2  (2005)
       http://www.cs.umn.edu/~maclach/research/aSA2.pdf
    """
    def make_bridge(T):
        M, N = T.shape
        K = T.blocksize[0]
        bnnz = T.indptr[-1]
        # the K+1 represents the new dof introduced by the new candidate.  the
        # bridge 'T' ignores this new dof and just maps zeros there
        data = np.zeros((bnnz, K + 1, K), dtype=T.dtype)
        data[:, :-1, :] = T.data
        return bsr_matrix((data, T.indices, T.indptr),
                          shape=((K + 1) * int(M / K), N))

    def expand_candidates(B_old, nodesize):
        # insert a new dof that is always zero, to create NullDim+1 dofs per
        # node in B
        NullDim = B_old.shape[1]
        nnodes = int(B_old.shape[0] / nodesize)
        Bnew = np.zeros((nnodes, nodesize + 1, NullDim), dtype=B_old.dtype)
        Bnew[:, :-1, :] = B_old.reshape(nnodes, nodesize, NullDim)
        return Bnew.reshape(-1, NullDim)

    levels = ml.levels

    x = sp.rand(levels[0].A.shape[0], 1)
    if levels[0].A.dtype.name.startswith('complex'):
        x = x + 1.0j * sp.rand(levels[0].A.shape[0], 1)
    b = np.zeros_like(x)

    x = ml.solve(b,
                 x0=x,
                 tol=float(np.finfo(np.float).tiny),
                 maxiter=candidate_iters)
    work[:] += ml.operator_complexity(
    ) * ml.levels[0].A.nnz * candidate_iters * 2

    T0 = levels[0].T.copy()

    # TEST FOR CONVERGENCE HERE

    for i in range(len(ml.levels) - 2):
        # alpha-SA paper does local elimination here, but after talking
        # to Marian, its not clear that this helps things
        # fn, kwargs = unpack_arg(eliminate_local)
        # if fn == True:
        #    eliminate_local_candidates(x,levels[i].AggOp,levels[i].A,
        #    levels[i].T, **kwargs)

        # add candidate to B
        B = np.hstack((levels[i].B, x.reshape(-1, 1)))

        # construct Ptent
        T, R = fit_candidates(levels[i].AggOp, B)

        levels[i].T = T
        x = R[:, -1].reshape(-1, 1)

        # smooth P
        fn, kwargs = unpack_arg(smooth[i])
        if fn == 'jacobi':
            levels[i].P = jacobi_prolongation_smoother(levels[i].A, T,
                                                       levels[i].C, R,
                                                       **kwargs)
        elif fn == 'richardson':
            levels[i].P = richardson_prolongation_smoother(
                levels[i].A, T, **kwargs)
        elif fn == 'energy':
            levels[i].P = energy_prolongation_smoother(levels[i].A, T,
                                                       levels[i].C, R, None,
                                                       (False, {}), **kwargs)
            x = R[:, -1].reshape(-1, 1)
        elif fn is None:
            levels[i].P = T
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # construct R
        if symmetry == 'symmetric':  # R should reflect A's structure
            levels[i].R = levels[i].P.T.asformat(levels[i].P.format)
        elif symmetry == 'hermitian':
            levels[i].R = levels[i].P.H.asformat(levels[i].P.format)

        # construct coarse A
        levels[i + 1].A = levels[i].R * levels[i].A * levels[i].P

        # construct bridging P
        T_bridge = make_bridge(levels[i + 1].T)
        R_bridge = levels[i + 2].B

        # smooth bridging P
        fn, kwargs = unpack_arg(smooth[i + 1])
        if fn == 'jacobi':
            levels[i + 1].P = jacobi_prolongation_smoother(
                levels[i + 1].A, T_bridge, levels[i + 1].C, R_bridge, **kwargs)
        elif fn == 'richardson':
            levels[i + 1].P = richardson_prolongation_smoother(
                levels[i + 1].A, T_bridge, **kwargs)
        elif fn == 'energy':
            levels[i + 1].P = energy_prolongation_smoother(
                levels[i + 1].A, T_bridge, levels[i + 1].C, R_bridge, None,
                (False, {}), **kwargs)
        elif fn is None:
            levels[i + 1].P = T_bridge
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # construct the "bridging" R
        if symmetry == 'symmetric':  # R should reflect A's structure
            levels[i + 1].R = levels[i + 1].P.T.asformat(levels[i +
                                                                1].P.format)
        elif symmetry == 'hermitian':
            levels[i + 1].R = levels[i + 1].P.H.asformat(levels[i +
                                                                1].P.format)

        # run solver on candidate
        solver = multilevel_solver(levels[i + 1:], coarse_solver=coarse_solver)
        change_smoothers(solver,
                         presmoother=prepostsmoother,
                         postsmoother=prepostsmoother)
        x = solver.solve(np.zeros_like(x),
                         x0=x,
                         tol=float(np.finfo(np.float).tiny),
                         maxiter=candidate_iters)
        work[:] += 2 * solver.operator_complexity() * solver.levels[0].A.nnz *\
            candidate_iters*2

        # update values on next level
        levels[i + 1].B = R[:, :-1].copy()
        levels[i + 1].T = T_bridge

    # note that we only use the x from the second coarsest level
    fn, kwargs = unpack_arg(prepostsmoother, cost=False)
    for lvl in reversed(levels[:-2]):
        x = lvl.P * x
        work[:] += lvl.A.nnz * candidate_iters * 2

        if fn == 'gauss_seidel':
            # only relax at nonzeros, so as not to mess up any locally dropped
            # candidates
            indices = np.ravel(x).nonzero()[0]
            gauss_seidel_indexed(lvl.A,
                                 x,
                                 np.zeros_like(x),
                                 indices,
                                 iterations=candidate_iters,
                                 sweep='symmetric')

        elif fn == 'gauss_seidel_ne':
            gauss_seidel_ne(lvl.A,
                            x,
                            np.zeros_like(x),
                            iterations=candidate_iters,
                            sweep='symmetric')

        elif fn == 'gauss_seidel_nr':
            gauss_seidel_nr(lvl.A,
                            x,
                            np.zeros_like(x),
                            iterations=candidate_iters,
                            sweep='symmetric')

        elif fn == 'jacobi':
            jacobi(lvl.A,
                   x,
                   np.zeros_like(x),
                   iterations=1,
                   omega=1.0 / rho_D_inv_A(lvl.A))

        elif fn == 'richardson':
            polynomial(lvl.A,
                       x,
                       np.zeros_like(x),
                       iterations=1,
                       coefficients=[1.0 / approximate_spectral_radius(lvl.A)])

        elif fn == 'gmres':
            x[:] = (gmres(lvl.A,
                          np.zeros_like(x),
                          x0=x,
                          maxiter=candidate_iters)[0]).reshape(x.shape)
        else:
            raise TypeError('Unrecognized smoother')

    # x will be dense again, so we have to drop locally again
    elim, elim_kwargs = unpack_arg(eliminate_local)
    if elim is True:
        x = x / norm(x, 'inf')
        eliminate_local_candidates(x, levels[0].AggOp, levels[0].A, T0,
                                   **elim_kwargs)

    return x.reshape(-1, 1)
Exemplo n.º 15
0
def extend_hierarchy(levels, strength, CF, interp, restrict, filter_operator,
                     coarse_grid_P, coarse_grid_R, keep):
    """ helper function for local methods """

    # Filter operator. Need to keep original matrix on fineest level for
    # computing residuals
    if (filter_operator is not None) and (filter_operator[1] != 0):
        if len(levels) == 1:
            A = deepcopy(levels[-1].A)
        else:
            A = levels[-1].A
        filter_matrix_rows(A,
                           filter_operator[1],
                           diagonal=True,
                           lump=filter_operator[0])
    else:
        A = levels[-1].A

    # Check if matrix was filtered to be diagonal --> coarsest grid
    if A.nnz == A.shape[0]:
        return 1

    # Zero initial complexities for strength, splitting and interpolation
    levels[-1].complexity['CF'] = 0.0
    levels[-1].complexity['strength'] = 0.0
    levels[-1].complexity['interpolate'] = 0.0

    # Compute the strength-of-connection matrix C, where larger
    # C[i,j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength)
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        C = evolution_strength_of_connection(A, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))
    levels[-1].complexity['strength'] += kwargs['cost'][0] * A.nnz / float(
        A.nnz)

    # Generate the C/F splitting
    fn, kwargs = unpack_arg(CF)
    if fn == 'RS':
        splitting = RS(C, **kwargs)
    elif fn == 'PMIS':
        splitting = PMIS(C, **kwargs)
    elif fn == 'PMISc':
        splitting = PMISc(C, **kwargs)
    elif fn == 'CLJP':
        splitting = CLJP(C, **kwargs)
    elif fn == 'CLJPc':
        splitting = CLJPc(C, **kwargs)
    elif fn == 'CR':
        splitting = CR(C, **kwargs)
    elif fn == 'weighted_matching':
        splitting, soc = weighted_matching(C, **kwargs)
        if soc is not None:
            C = soc
    else:
        raise ValueError('unknown C/F splitting method (%s)' % CF)
    levels[-1].complexity['CF'] += kwargs['cost'][0] * C.nnz / float(A.nnz)
    temp = np.sum(splitting)
    if (temp == len(splitting)) or (temp == 0):
        return 1

    # Generate the interpolation matrix that maps from the coarse-grid to the
    # fine-grid
    r_flag = False
    fn, kwargs = unpack_arg(interp)
    if fn == 'standard':
        P = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'inject':
        P = injection_interpolation(A, splitting, **kwargs)
    elif fn == 'neumann':
        P = neumann_ideal_interpolation(A, splitting, **kwargs)
    elif fn == 'scaledAfc':
        P = scaled_Afc_interpolation(A, splitting, **kwargs)
    elif fn == 'air':
        if isspmatrix_bsr(A):
            temp_A = bsr_matrix(A.T)
            P = local_AIR(temp_A, splitting, **kwargs)
            P = bsr_matrix(P.T)
        else:
            temp_A = csr_matrix(A.T)
            P = local_AIR(temp_A, splitting, **kwargs)
            P = csr_matrix(P.T)
    elif fn == 'restrict':
        r_flag = True
    else:
        raise ValueError('unknown interpolation method (%s)' % interp)
    levels[-1].complexity['interpolate'] += kwargs['cost'][0] * A.nnz / float(
        A.nnz)

    # Build restriction operator
    fn, kwargs = unpack_arg(restrict)
    if fn is None:
        R = P.T
    elif fn == 'air':
        R = local_AIR(A, splitting, **kwargs)
    elif fn == 'neumann':
        R = neumann_AIR(A, splitting, **kwargs)
    elif fn == 'one_point':  # Don't need A^T here
        temp_C = C.T.tocsr()
        R = one_point_interpolation(A, temp_C, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R = R.T.tobsr()
        else:
            R = R.T.tocsr()
    elif fn == 'inject':  # Don't need A^T or C^T here
        R = injection_interpolation(A, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R = R.T.tobsr()
        else:
            R = R.T.tocsr()
    elif fn == 'standard':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = standard_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    elif fn == 'distance_two':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = distance_two_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    elif fn == 'direct':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tobsr()
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R = R.T.tocsr()
    else:
        raise ValueError('unknown restriction method (%s)' % restrict)

    # If set P = R^T
    if r_flag:
        P = R.T

    # Optional different interpolation for RAP
    fn, kwargs = unpack_arg(coarse_grid_P)
    if fn == 'standard':
        P_temp = standard_interpolation(A, C, splitting, **kwargs)
    elif fn == 'distance_two':
        P_temp = distance_two_interpolation(A, C, splitting, **kwargs)
    elif fn == 'direct':
        P_temp = direct_interpolation(A, C, splitting, **kwargs)
    elif fn == 'one_point':
        P_temp = one_point_interpolation(A, C, splitting, **kwargs)
    elif fn == 'inject':
        P_temp = injection_interpolation(A, splitting, **kwargs)
    elif fn == 'neumann':
        P_temp = neumann_ideal_interpolation(A, splitting, **kwargs)
    elif fn == 'air':
        if isspmatrix_bsr(A):
            temp_A = bsr_matrix(A.T)
            P_temp = local_AIR(temp_A, splitting, **kwargs)
            P_temp = bsr_matrix(P_temp.T)
        else:
            temp_A = csr_matrix(A.T)
            P_temp = local_AIR(temp_A, splitting, **kwargs)
            P_temp = csr_matrix(P_temp.T)
    else:
        P_temp = P

    # Optional different restriction for RAP
    fn, kwargs = unpack_arg(coarse_grid_R)
    if fn == 'air':
        R_temp = local_AIR(A, splitting, **kwargs)
    elif fn == 'neumann':
        R_temp = neumann_AIR(A, splitting, **kwargs)
    elif fn == 'one_point':  # Don't need A^T here
        temp_C = C.T.tocsr()
        R_temp = one_point_interpolation(A, temp_C, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R_temp = R_temp.T.tobsr()
        else:
            R_temp = R_temp.T.tocsr()
    elif fn == 'inject':  # Don't need A^T or C^T here
        R_temp = injection_interpolation(A, splitting, **kwargs)
        if isspmatrix_bsr(A):
            R_temp = R_temp.T.tobsr()
        else:
            R_temp = R_temp.T.tocsr()
    elif fn == 'standard':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = standard_interpolation(temp_A, temp_C, splitting,
                                            **kwargs)
            R_temp = R_temp.T.tobsr()
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = standard_interpolation(temp_A, temp_C, splitting,
                                            **kwargs)
            R_temp = R_temp.T.tocsr()
    elif fn == 'distance_two':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = distance_two_interpolation(temp_A, temp_C, splitting,
                                                **kwargs)
            R_temp = R_temp.T.tobsr()
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = distance_two_interpolation(temp_A, temp_C, splitting,
                                                **kwargs)
            R_temp = R_temp.T.tocsr()
    elif fn == 'direct':
        if isspmatrix_bsr(A):
            temp_A = A.T.tobsr()
            temp_C = C.T.tobsr()
            R_temp = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tobsr()
        else:
            temp_A = A.T.tocsr()
            temp_C = C.T.tocsr()
            R_temp = direct_interpolation(temp_A, temp_C, splitting, **kwargs)
            R_temp = R_temp.T.tocsr()
    else:
        R_temp = R

    # Store relevant information for this level
    if keep:
        levels[-1].C = C  # strength of connection matrix

    levels[-1].P = P  # prolongation operator
    levels[-1].R = R  # restriction operator
    levels[-1].splitting = splitting  # C/F splitting

    # Form coarse grid operator, get complexity
    #levels[-1].complexity['RAP'] = mat_mat_complexity(R_temp,A) / float(A.nnz)
    #RA = R_temp * A
    #levels[-1].complexity['RAP'] += mat_mat_complexity(RA,P_temp) / float(A.nnz)
    #A = RA * P_temp

    # RL: RAP = R*(A*P)
    levels[-1].complexity['RAP'] = mat_mat_complexity(A, P_temp) / float(A.nnz)
    AP = A * P_temp
    levels[-1].complexity['RAP'] += mat_mat_complexity(R_temp, AP) / float(
        A.nnz)
    A = R_temp * AP

    # Make sure coarse-grid operator is in correct sparse format
    if (isspmatrix_csr(P) and (not isspmatrix_csr(A))):
        A = A.tocsr()
    elif (isspmatrix_bsr(P) and (not isspmatrix_bsr(A))):
        A = A.tobsr()

    A.eliminate_zeros()
    levels.append(multilevel_solver.level())
    levels[-1].A = A
    return 0
Exemplo n.º 16
0
def extend_hierarchy(levels, strength, aggregate, smooth, improve_candidates,
                     diagonal_dominance=False, keep=True):
    """Service routine to implement the strength of connection, aggregation,
    tentative prolongation construction, and prolongation smoothing.  Called by
    smoothed_aggregation_solver.
    """

    A = levels[-1].A
    B = levels[-1].B
    if A.symmetry == "nonsymmetric":
        AH = A.H.asformat(A.format)
        BH = levels[-1].BH

    # Compute the strength-of-connection matrix C, where larger
    # C[i,j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength[len(levels)-1])
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        if 'B' in kwargs:
            C = evolution_strength_of_connection(A, **kwargs)
        else:
            C = evolution_strength_of_connection(A, B, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'predefined':
        C = kwargs['C'].tocsr()
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn == 'affinity':
        C = affinity_distance(A, **kwargs)
    elif fn is None:
        C = A.tocsr()
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))

    levels[-1].complexity['strength'] = kwargs['cost'][0]
 
    # Avoid coarsening diagonally dominant rows
    flag, kwargs = unpack_arg(diagonal_dominance)
    if flag:
        C = eliminate_diag_dom_nodes(A, C, **kwargs)
        levels[-1].complexity['diag_dom'] = kwargs['cost'][0]

    # Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A).
    # AggOp is a boolean matrix, where the sparsity pattern for the k-th column
    # denotes the fine-grid nodes agglomerated into k-th coarse-grid node.
    fn, kwargs = unpack_arg(aggregate[len(levels)-1])
    if fn == 'standard':
        AggOp = standard_aggregation(C, **kwargs)[0]
    elif fn == 'naive':
        AggOp = naive_aggregation(C, **kwargs)[0]
    elif fn == 'lloyd':
        AggOp = lloyd_aggregation(C, **kwargs)[0]
    elif fn == 'predefined':
        AggOp = kwargs['AggOp'].tocsr()
    else:
        raise ValueError('unrecognized aggregation method %s' % str(fn))

    levels[-1].complexity['aggregation'] = kwargs['cost'][0] * (float(C.nnz)/A.nnz)

    # Improve near nullspace candidates by relaxing on A B = 0
    temp_cost = [0.0]
    fn, kwargs = unpack_arg(improve_candidates[len(levels)-1], cost=False)
    if fn is not None:
        b = np.zeros((A.shape[0], 1), dtype=A.dtype)
        B = relaxation_as_linear_operator((fn, kwargs), A, b, temp_cost) * B
        levels[-1].B = B
        if A.symmetry == "nonsymmetric":
            BH = relaxation_as_linear_operator((fn, kwargs), AH, b, temp_cost) * BH
            levels[-1].BH = BH

    levels[-1].complexity['candidates'] = temp_cost[0] * B.shape[1]

    # Compute the tentative prolongator, T, which is a tentative interpolation
    # matrix from the coarse-grid to the fine-grid.  T exactly interpolates
    # B_fine = T B_coarse. Orthogonalization complexity ~ 2nk^2, k=B.shape[1].
    temp_cost=[0.0]
    T, B = fit_candidates(AggOp, B, cost=temp_cost)
    if A.symmetry == "nonsymmetric":
        TH, BH = fit_candidates(AggOp, BH, cost=temp_cost)

    levels[-1].complexity['tentative'] = temp_cost[0]/A.nnz

    # Smooth the tentative prolongator, so that it's accuracy is greatly
    # improved for algebraically smooth error.
    fn, kwargs = unpack_arg(smooth[len(levels)-1])
    if fn == 'jacobi':
        P = jacobi_prolongation_smoother(A, T, C, B, **kwargs)
    elif fn == 'richardson':
        P = richardson_prolongation_smoother(A, T, **kwargs)
    elif fn == 'energy':
        P = energy_prolongation_smoother(A, T, C, B, None, (False, {}),
                                         **kwargs)
    elif fn is None:
        P = T
    else:
        raise ValueError('unrecognized prolongation smoother method %s' %
                         str(fn))

    levels[-1].complexity['smooth_P'] = kwargs['cost'][0]

    # Compute the restriction matrix, R, which interpolates from the fine-grid
    # to the coarse-grid.  If A is nonsymmetric, then R must be constructed
    # based on A.H.  Otherwise R = P.H or P.T.
    symmetry = A.symmetry
    if symmetry == 'hermitian':
        R = P.H
    elif symmetry == 'symmetric':
        R = P.T
    elif symmetry == 'nonsymmetric':
        fn, kwargs = unpack_arg(smooth[len(levels)-1])
        if fn == 'jacobi':
            R = jacobi_prolongation_smoother(AH, TH, C, BH, **kwargs).H
        elif fn == 'richardson':
            R = richardson_prolongation_smoother(AH, TH, **kwargs).H
        elif fn == 'energy':
            R = energy_prolongation_smoother(AH, TH, C, BH, None, (False, {}),
                                             **kwargs)
            R = R.H
        elif fn is None:
            R = T.H
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))
        levels[-1].complexity['smooth_R'] = kwargs['cost'][0]

    if keep:
        levels[-1].C = C            # strength of connection matrix
        levels[-1].AggOp = AggOp    # aggregation operator
        levels[-1].T = T            # tentative prolongator

    levels[-1].P = P  # smoothed prolongator
    levels[-1].R = R  # restriction operator

    # Form coarse grid operator, get complexity
    levels[-1].complexity['RAP'] = mat_mat_complexity(R,A) / float(A.nnz)
    RA = R * A
    levels[-1].complexity['RAP'] += mat_mat_complexity(RA,P) / float(A.nnz)
    A = RA * P      # Galerkin operator, Ac = RAP
    A.symmetry = symmetry

    levels.append(multilevel_solver.level())
    levels[-1].A = A
    levels[-1].B = B           # right near nullspace candidates

    if A.symmetry == "nonsymmetric":
        levels[-1].BH = BH     # left near nullspace candidates
Exemplo n.º 17
0
def adaptive_sa_solver(A, B=None, symmetry='hermitian',
                       pdef=True, num_candidates=1, candidate_iters=5,
                       improvement_iters=0, epsilon=0.1,
                       max_levels=10, max_coarse=10, aggregate='standard',
                       prepostsmoother=('gauss_seidel',
                                        {'sweep': 'symmetric'}),
                       smooth=('jacobi', {}), strength='symmetric',
                       coarse_solver='pinv2',
                       eliminate_local=(False, {'Ca': 1.0}), keep=False,
                       **kwargs):
    """
    Create a multilevel solver using Adaptive Smoothed Aggregation (aSA)

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Square matrix in CSR or BSR format
    B : {None, n x m dense matrix}
        If a matrix, then this forms the basis for the first m candidates.
        Also in this case, the initial setup stage is skipped, because this
        provides the first candidate(s).  If None, then a random initial guess
        and relaxation are used to inform the initial candidate.
    symmetry : {string}
        'symmetric' refers to both real and complex symmetric
        'hermitian' refers to both complex Hermitian and real Hermitian
        Note that for the strictly real case, these two options are the same
        Note that this flag does not denote definiteness of the operator
    pdef : {bool}
        True or False, whether A is known to be positive definite.
    num_candidates : {integer} : default 1
        Number of near-nullspace candidates to generate
    candidate_iters : {integer} : default 5
        Number of smoothing passes/multigrid cycles used at each level of
        the adaptive setup phase
    improvement_iters : {integer} : default 0
        Number of times each candidate is improved
    epsilon : {float} : default 0.1
        Target convergence factor
    max_levels : {integer} : default 10
        Maximum number of levels to be used in the multilevel solver.
    max_coarse : {integer} : default 500
        Maximum number of variables permitted on the coarse grid.
    prepostsmoother : {string or dict}
        Pre- and post-smoother used in the adaptive method
    strength : ['symmetric', 'classical', 'evolution',
                ('predefined', {'C': csr_matrix}), None]
        Method used to determine the strength of connection between unknowns of
        the linear system.  See smoothed_aggregation_solver(...) documentation.
    aggregate : ['standard', 'lloyd', 'naive',
                 ('predefined', {'AggOp': csr_matrix})]
        Method used to aggregate nodes.  See smoothed_aggregation_solver(...)
        documentation.
    smooth : ['jacobi', 'richardson', 'energy', None]
        Method used used to smooth the tentative prolongator.  See
        smoothed_aggregation_solver(...) documentation
    coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
        Solver used at the coarsest level of the MG hierarchy.
            Optionally, may be a tuple (fn, args), where fn is a string such as
        ['splu', 'lu', ...] or a callable function, and args is a dictionary of
        arguments to be passed to fn.
    eliminate_local : {tuple}
        Length 2 tuple.  If the first entry is True, then eliminate candidates
        where they aren't needed locally, using the second entry of the tuple
        to contain arguments to local elimination routine.  Given the rigid
        sparse data structures, this doesn't help much, if at all, with
        complexity.  Its more of a diagnostic utility.
    keep: {bool} : default False
        Flag to indicate keeping extra operators in the hierarchy for
        diagnostics.  For example, if True, then strength of connection (C),
        tentative prolongation (T), and aggregation (AggOp) are kept.

    Returns
    -------
    multilevel_solver : multilevel_solver
        Smoothed aggregation solver with adaptively generated candidates

    Notes
    -----

    - Floating point value representing the "work" required to generate
      the solver.  This value is the total cost of just relaxation, relative
      to the fine grid.  The relaxation method used is assumed to symmetric
      Gauss-Seidel.

    - Unlike the standard Smoothed Aggregation (SA) method, adaptive SA does
      not require knowledge of near-nullspace candidate vectors.  Instead, an
      adaptive procedure computes one or more candidates 'from scratch'.  This
      approach is useful when no candidates are known or the candidates have
      been invalidated due to changes to matrix A.

    Examples
    --------
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.aggregation import adaptive_sa_solver
    >>> import numpy as np
    >>> A=stencil_grid([[-1,-1,-1],[-1,8.0,-1],[-1,-1,-1]],\
                       (31,31),format='csr')
    >>> [asa,work] = adaptive_sa_solver(A,num_candidates=1)
    >>> residuals=[]
    >>> x=asa.solve(b=np.ones((A.shape[0],)), x0=np.ones((A.shape[0],)),\
                    residuals=residuals)

    References
    ----------
    .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
       "Adaptive Smoothed Aggregation ($\alpha$SA) Multigrid"
       SIAM Review Volume 47,  Issue 2  (2005)
       http://www.cs.umn.edu/~maclach/research/aSA2.pdf

    """

    if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
        try:
            A = csr_matrix(A)
            warn("Implicit conversion of A to CSR", SparseEfficiencyWarning)
        except:
            raise TypeError('Argument A must have type csr_matrix or\
                            bsr_matrix, or be convertible to csr_matrix')

    A = A.asfptype()
    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    # Track work in terms of relaxation
    work = np.zeros((1,))

    # Levelize the user parameters, so that they become lists describing the
    # desired user option on each level.
    max_levels, max_coarse, strength =\
        levelize_strength_or_aggregation(strength, max_levels, max_coarse)
    max_levels, max_coarse, aggregate =\
        levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
    smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)

    # Develop initial candidate(s).  Note that any predefined aggregation is
    # preserved.
    if B is None:
        B, aggregate, strength =\
            initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon,
                                max_levels, max_coarse, aggregate,
                                prepostsmoother, smooth, strength, work)
        # Normalize B
        B = (1.0/norm(B, 'inf')) * B
        num_candidates -= 1
    else:
        # Otherwise, use predefined candidates
        num_candidates -= B.shape[1]
        # Generate Aggregation and Strength Operators (the brute force way)
        sa = smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
                                         presmoother=prepostsmoother,
                                         postsmoother=prepostsmoother,
                                         smooth=smooth,
                                         strength=strength,
                                         max_levels=max_levels,
                                         max_coarse=max_coarse,
                                         aggregate=aggregate,
                                         coarse_solver=coarse_solver,
                                         improve_candidates=None,
                                         keep=True, **kwargs)
        if len(sa.levels) > 1:
            # Set strength-of-connection and aggregation
            aggregate = [('predefined', {'AggOp': sa.levels[i].AggOp.tocsr()})
                         for i in range(len(sa.levels) - 1)]
            strength = [('predefined', {'C': sa.levels[i].C.tocsr()})
                        for i in range(len(sa.levels) - 1)]

    # Develop additional candidates
    for i in range(num_candidates):
        x = general_setup_stage(
            smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
                                        presmoother=prepostsmoother,
                                        postsmoother=prepostsmoother,
                                        smooth=smooth,
                                        strength=strength,
                                        max_levels=max_levels,
                                        max_coarse=max_coarse,
                                        aggregate=aggregate,
                                        coarse_solver=coarse_solver,
                                        improve_candidates=None,
                                        keep=True, **kwargs),
            symmetry, candidate_iters, prepostsmoother, smooth,
            eliminate_local, coarse_solver, work)

        # Normalize x and add to candidate list
        x = x/norm(x, 'inf')
        if np.isinf(x[0]) or np.isnan(x[0]):
            raise ValueError('Adaptive candidate is all 0.')
        B = np.hstack((B, x.reshape(-1, 1)))

    # Improve candidates
    if B.shape[1] > 1 and improvement_iters > 0:
        b = np.zeros((A.shape[0], 1), dtype=A.dtype)
        for i in range(improvement_iters):
            for j in range(B.shape[1]):
                # Run a V-cycle built on everything except candidate j, while
                # using candidate j as the initial guess
                x0 = B[:, 0]
                B = B[:, 1:]
                sa_temp =\
                    smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
                                                presmoother=prepostsmoother,
                                                postsmoother=prepostsmoother,
                                                smooth=smooth,
                                                strength=strength,
                                                max_levels=max_levels,
                                                max_coarse=max_coarse,
                                                aggregate=aggregate,
                                                coarse_solver=coarse_solver,
                                                improve_candidates=None,
                                                keep=True, **kwargs)
                x = sa_temp.solve(b, x0=x0,
                                  tol=float(np.finfo(np.float).tiny),
                                  maxiter=candidate_iters, cycle='V')
                work[:] += 2 * sa_temp.operator_complexity() *\
                    sa_temp.levels[0].A.nnz * candidate_iters

                # Apply local elimination
                elim, elim_kwargs = unpack_arg(eliminate_local)
                if elim is True:
                    x = x/norm(x, 'inf')
                    eliminate_local_candidates(x, sa_temp.levels[0].AggOp, A,
                                               sa_temp.levels[0].T,
                                               **elim_kwargs)

                # Normalize x and add to candidate list
                x = x/norm(x, 'inf')
                if np.isinf(x[0]) or np.isnan(x[0]):
                    raise ValueError('Adaptive candidate is all 0.')
                B = np.hstack((B, x.reshape(-1, 1)))

    elif improvement_iters > 0:
        # Special case for improving a single candidate
        max_levels = len(aggregate) + 1
        max_coarse = 0
        for i in range(improvement_iters):
            B, aggregate, strength =\
                initial_setup_stage(A, symmetry, pdef, candidate_iters,
                                    epsilon, max_levels, max_coarse,
                                    aggregate, prepostsmoother, smooth,
                                    strength, work, B=B)
            # Normalize B
            B = (1.0/norm(B, 'inf'))*B

    # Return smoother. Note, solver parameters are passed in via params
    # argument to be stored in final solver hierarchy and used to estimate
    # complexity.
    return [smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
                                        presmoother=prepostsmoother,
                                        postsmoother=prepostsmoother,
                                        smooth=smooth,
                                        strength=strength,
                                        max_levels=max_levels,
                                        max_coarse=max_coarse,
                                        aggregate=aggregate,
                                        coarse_solver=coarse_solver,
                                        improve_candidates=None,
                                        keep=keep,
                                        **kwargs),
            work[0]/A.nnz]