コード例 #1
0
ファイル: cr.py プロジェクト: GaZ3ll3/pyamg
def _CRsweep(A, Findex, Cindex, nu, thetacr, method):

    n = A.shape[0]    # problem size
    numax = nu
    z = np.zeros((n,))
    e = np.ones((n,))
    e[Cindex] = 0.0
    enorm = norm(e)
    rhok = 1

    for it in range(1, numax+1):

        if method == 'habituated':
            gauss_seidel(A, e, z, iterations=1)
            e[Cindex] = 0.0
        elif method == 'concurrent':
            gauss_seidel_indexed(A, e, z, indices=Findex, iterations=1)
        else:
            raise NotImplementedError('method not recognized: need \
                                       habituated or concurrent')

        enorm_old = enorm
        enorm = norm(e)
        rhok_old = rhok
        rhok = enorm / enorm_old

        # criteria 1
        if (abs(rhok - rhok_old) / rhok < 0.1) and (it >= nu):
            return rhok, e

        # criteria 2
        if rhok < 0.1 * thetacr:
            return rhok, e
コード例 #2
0
def _CRsweep(A, Findex, Cindex, nu, thetacr, method):

    n = A.shape[0]  # problem size
    numax = nu
    z = np.zeros((n, ))
    e = np.ones((n, ))
    e[Cindex] = 0.0
    enorm = norm(e)
    rhok = 1

    for it in range(1, numax + 1):

        if method == 'habituated':
            gauss_seidel(A, e, z, iterations=1)
            e[Cindex] = 0.0
        elif method == 'concurrent':
            gauss_seidel_indexed(A, e, z, indices=Findex, iterations=1)
        else:
            raise NotImplementedError, 'method not recognized: need habituated or concurrent'

        enorm_old = enorm
        enorm = norm(e)
        rhok_old = rhok
        rhok = enorm / enorm_old

        # criteria 1
        if (abs(rhok - rhok_old) / rhok < 0.1) and (it >= nu):
            return rhok, e

        # criteria 2
        if rhok < 0.1 * thetacr:
            return rhok, e
コード例 #3
0
    def smoother(self, x, b, nu, tol=1.e-10):
        if self.withscipy:
            gauss_seidel(self.A, x, b, iterations=nu)
            return x
        else:
#            if self.withPETSc and importPETSc:
            if False:
                _b = PETSc.Vec().createWithArray(b, comm=PETSc.COMM_SELF)
                _x = PETSc.Vec().createWithArray(np.zeros_like(b), comm=PETSc.COMM_SELF)

                self.ksp_smt.rtol = tol
                self.ksp_smt.max_it = nu
    #            self.ksp_smt.setConvergenceHistory()
                self.ksp_smt.solve(_b, _x)
                return _x.getArray()
            else:
                gauss_seidel(self._A, x, b, iterations=nu) # TODO: to remove
                return x
コード例 #4
0
ファイル: adaptive.py プロジェクト: GaZ3ll3/pyamg
 def relax(A, x):
     fn, kwargs = unpack_arg(prepostsmoother)
     if fn == 'gauss_seidel':
         gauss_seidel(A, x, numpy.zeros_like(x),
                      iterations=candidate_iters, sweep='symmetric')
     elif fn == 'gauss_seidel_nr':
         gauss_seidel_nr(A, x, numpy.zeros_like(x),
                         iterations=candidate_iters, sweep='symmetric')
     elif fn == 'gauss_seidel_ne':
         gauss_seidel_ne(A, x, numpy.zeros_like(x),
                         iterations=candidate_iters, sweep='symmetric')
     elif fn == 'jacobi':
         jacobi(A, x, numpy.zeros_like(x), iterations=1,
                omega=1.0 / rho_D_inv_A(A))
     elif fn == 'richardson':
         polynomial(A, x, numpy.zeros_like(x), iterations=1,
                    coeffients=[1.0/approximate_spectral_radius(A)])
     elif fn == 'gmres':
         x[:] = (gmres(A, numpy.zeros_like(x), x0=x,
                 maxiter=candidate_iters)[0]).reshape(x.shape)
     else:
         raise TypeError('Unrecognized smoother')
コード例 #5
0
ファイル: adaptive.py プロジェクト: gaussWu/pyamg
 def relax(A, x):
     fn, kwargs = unpack_arg(prepostsmoother)
     if fn == 'gauss_seidel':
         gauss_seidel(A, x, numpy.zeros_like(x),
                      iterations=candidate_iters, sweep='symmetric')
     elif fn == 'gauss_seidel_nr':
         gauss_seidel_nr(A, x, numpy.zeros_like(x),
                         iterations=candidate_iters, sweep='symmetric')
     elif fn == 'gauss_seidel_ne':
         gauss_seidel_ne(A, x, numpy.zeros_like(x),
                         iterations=candidate_iters, sweep='symmetric')
     elif fn == 'jacobi':
         jacobi(A, x, numpy.zeros_like(x), iterations=1,
                omega=1.0 / rho_D_inv_A(A))
     elif fn == 'richardson':
         polynomial(A, x, numpy.zeros_like(x), iterations=1,
                    coeffients=[1.0/approximate_spectral_radius(A)])
     elif fn == 'gmres':
         x[:] = (gmres(A, numpy.zeros_like(x), x0=x,
                 maxiter=candidate_iters)[0]).reshape(x.shape)
     else:
         raise TypeError('Unrecognized smoother')
コード例 #6
0
ファイル: cr.py プロジェクト: GaZ3ll3/pyamg
def CR(S, method='habituated', maxiter=20):
    """Use Compatible Relaxation to compute a C/F splitting

    Parameters
    ----------
    S : csr_matrix
        sparse matrix (n x n) usually matrix A of Ax=b
    method : {'habituated','concurrent'}
        Method used during relaxation:
            - concurrent: GS relaxation on F-points, leaving e_c = 0
            - habituated: full relaxation, setting e_c = 0
    maxiter : int
        maximum number of outer iterations (lambda)

    Returns
    -------
    splitting : array
        C/F list of 1's (coarse pt) and 0's (fine pt) (n x 1)

    References
    ----------
    .. [1] Livne, O.E., "Coarsening by compatible relaxation."
       Numer. Linear Algebra Appl. 11, No. 2-3, 205-227 (2004).

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.classical.cr import CR
    >>> A = poisson((20,20),format='csr')
    >>> splitting = CR(A)

    """
    # parameters (paper notation)
    ntests = 3      # (nu) number of random tests to do per iteration
    nrelax = 4      # (eta) number of relaxation sweeps per test

    smagic = 1.0    # (s) parameter in [1,5] to account for fill-in
    gamma = 1.5     # (gamma) cycle index.  use 1.5 for 2d
    G = 30          # (G) number of equivalence classes (# of bins)
    tdepth = 1      # (t) drop depth on parse of L bins
    delta = 0       # (delta) drop threshold on parse of L bins
    alphai = 0.25   # (alpha_inc) quota increase

    # initializations
    alpha = 0.0     # coarsening ratio, quota
    beta = np.inf      # quality criterion
    beta1 = np.inf     # quality criterion, older
    beta2 = np.inf     # quality criterion, oldest
    n = S.shape[0]    # problem size
    nC = 0          # number of current Coarse points
    rhs = np.zeros((n, 1))  # rhs for Ae=0

    if not isspmatrix(S):
        raise TypeError('expecting sparse matrix')

    S = binormalize(S)

    splitting = np.zeros((S.shape[0], 1), dtype='intc')

    # out iterations ---------------
    for m in range(0, maxiter):

        mu = 0.0  # convergence rate
        E = np.zeros((n, 1))  # slowness measure

        # random iterations ---------------
        for k in range(0, ntests):

            e = 0.5*(1 + scipy.rand(n, 1))
            e[splitting > 0] = 0

            enorm = norm(e)

            # relaxation iterations ---------------
            for l in range(0, nrelax):

                if method == 'habituated':
                    gauss_seidel(S, e, np.zeros((n, 1)), iterations=1)
                    e[splitting > 0] = 0
                elif method == 'concurrent':
                    raise NotImplementedError('not implemented: need an \
                                                F-smoother')
                else:
                    raise NotImplementedError('method not recognized: need \
                                               habituated or concurrent')

                enorm_old = enorm
                enorm = norm(e)

                if enorm <= 1e-14:
                    # break out of loops
                    ntests = k
                    nrelax = l
                    maxiter = m
            # end relax

            # check slowness
            E = np.where(np.abs(e) > E, np.abs(e), E)

            # update convergence rate
            mu = mu + enorm/enorm_old
        # end random tests
        mu = mu/ntests

        # work
        alpha = float(nC)/n

        W = (1 + (smagic-1)*gamma*alpha)/(1-gamma*alpha)

        # quality criterion
        beta2 = beta1
        beta1 = beta
        beta = np.power(max([mu, 0.1]), 1.0 / W)

        # check if we're doing well
        if (beta > beta1 and beta1 > beta2) or \
           m == (maxiter-1) or max(E) < 1e-13:
            return splitting.ravel()

        # now add points
        #
        # update limit on additions to splitting (C)
        if alpha < 1e-13:
            alpha = 0.25
        else:
            alpha = (1-alphai) * alpha + alphai * (1/gamma)

        nCmax = np.ceil(alpha * n)

        L = np.ceil(G * E / E.max()).ravel()

        binid = G

        # add whole bins (and t-depth nodes) at a time
        u = np.zeros((n, 1))
        # TODO This loop may never halt...
        #      Perhaps loop over nC < nCmax and binid > 0 ?
        while nC < nCmax:
            if delta > 0:
                raise NotImplementedError
            if tdepth != 1:
                raise NotImplementedError

            (roots,) = np.where(L == binid)

            for root in roots:
                if L[root] >= 0:
                    cols = S[root, :].indices
                    splitting[root] = 1    # add roots
                    nC += 1
                    L[cols] = -1
            binid -= 1

            #L[troots] = -1          # mark t-rings visited
            #u[:]=0.0
            #u[roots] = 1.0
            #for depth in range(0,tdepth):
            #    u = np.abs(S) * u
            #(troots,tmp) = np.where(u>0)

    return splitting.ravel()
コード例 #7
0
def CR(S, method='habituated', maxiter=20):
    """Use Compatible Relaxation to compute a C/F splitting 

    Parameters
    ----------
    S : csr_matrix
        sparse matrix (n x n) usually matrix A of Ax=b
    method : {'habituated','concurrent'}
        Method used during relaxation:
            - concurrent: GS relaxation on F-points, leaving e_c = 0
            - habituated: full relaxation, setting e_c = 0
    maxiter : int
        maximum number of outer iterations (lambda)

    Returns
    -------
    splitting : array
        C/F list of 1's (coarse pt) and 0's (fine pt) (n x 1)

    References
    ----------
    .. [1] Livne, O.E., "Coarsening by compatible relaxation."
       Numer. Linear Algebra Appl. 11, No. 2-3, 205-227 (2004).

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.classical.cr import CR
    >>> A = poisson((20,20),format='csr')
    >>> splitting = CR(A)

    """
    # parameters (paper notation)
    ntests = 3  # (nu) number of random tests to do per iteration
    nrelax = 4  # (eta) number of relaxation sweeps per test

    smagic = 1.0  # (s) parameter in [1,5] to account for fill-in
    gamma = 1.5  # (gamma) cycle index.  use 1.5 for 2d
    G = 30  # (G) number of equivalence classes (# of bins)
    tdepth = 1  # (t) drop depth on parse of L bins
    delta = 0  # (delta) drop threshold on parse of L bins
    alphai = 0.25  # (alpha_inc) quota increase

    # initializations
    alpha = 0.0  # coarsening ratio, quota
    beta = np.inf  # quality criterion
    beta1 = np.inf  # quality criterion, older
    beta2 = np.inf  # quality criterion, oldest
    n = S.shape[0]  # problem size
    nC = 0  # number of current Coarse points
    rhs = np.zeros((n, 1))
    # rhs for Ae=0

    if not isspmatrix(S):
        raise TypeError('expecting sparse matrix')

    S = binormalize(S)

    splitting = np.zeros((S.shape[0], 1), dtype='intc')

    # out iterations ---------------
    for m in range(0, maxiter):

        mu = 0.0  # convergence rate
        E = np.zeros((n, 1))  # slowness measure

        # random iterations ---------------
        for k in range(0, ntests):

            e = 0.5 * (1 + scipy.rand(n, 1))
            e[splitting > 0] = 0

            enorm = norm(e)

            # relaxation iterations ---------------
            for l in range(0, nrelax):

                if method == 'habituated':
                    gauss_seidel(S, e, np.zeros((n, 1)), iterations=1)
                    e[splitting > 0] = 0
                elif method == 'concurrent':
                    raise NotImplementedError, 'not implemented: need an F-smoother'
                else:
                    raise NotImplementedError, 'method not recognized: need habituated or concurrent'

                enorm_old = enorm
                enorm = norm(e)

                if enorm <= 1e-14:
                    # break out of loops
                    ntests = k
                    nrelax = l
                    maxiter = m
            # end relax

            # check slowness
            E = np.where(np.abs(e) > E, np.abs(e), E)

            # update convergence rate
            mu = mu + enorm / enorm_old
        # end random tests
        mu = mu / ntests

        # work
        alpha = float(nC) / n

        W = (1 + (smagic - 1) * gamma * alpha) / (1 - gamma * alpha)

        # quality criterion
        beta2 = beta1
        beta1 = beta
        beta = np.power(max([mu, 0.1]), 1.0 / W)

        # check if we're doing well
        if (beta > beta1
                and beta1 > beta2) or m == (maxiter - 1) or max(E) < 1e-13:
            return splitting.ravel()

        # now add points
        #
        # update limit on additions to splitting (C)
        if alpha < 1e-13:
            alpha = 0.25
        else:
            alpha = (1 - alphai) * alpha + alphai * (1 / gamma)

        nCmax = np.ceil(alpha * n)

        L = np.ceil(G * E / E.max()).ravel()

        binid = G

        # add whole bins (and t-depth nodes) at a time
        u = np.zeros((n, 1))
        # TODO This loop may never halt...
        #      Perhaps loop over nC < nCmax and binid > 0 ?
        while nC < nCmax:
            if delta > 0:
                raise NotImplementedError
            if tdepth != 1:
                raise NotImplementedError

            (roots, ) = np.where(L == binid)

            for root in roots:
                if L[root] >= 0:
                    cols = S[root, :].indices
                    splitting[root] = 1  # add roots
                    nC += 1
                    L[cols] = -1
            binid -= 1

            #L[troots] = -1          # mark t-rings visited
            #u[:]=0.0
            #u[roots] = 1.0
            #for depth in range(0,tdepth):
            #    u = np.abs(S) * u
            #(troots,tmp) = np.where(u>0)

    return splitting.ravel()
コード例 #8
0
def gauss_seidel_iter(A, x, b, iterations=1):
    gauss_seidel(A, x, b, iterations=iterations)