Exemplo n.º 1
0
        def solve(self, A, b):
            if 'tol' not in kwargs:
                kwargs['tol'] = set_tol(A.dtype)

            return fn(A, b, **kwargs)[0]
Exemplo n.º 2
0
def pinv_array(a, cond=None):
    """Calculate the Moore-Penrose pseudo inverse of each block of the three dimensional array a.

    Parameters
    ----------
    a   : {dense array}
        Is of size (n, m, m)
    cond : {float}
        Used by gelss to filter numerically zeros singular values.
        If None, a suitable value is chosen for you.

    Returns
    -------
    Nothing, a is modified in place so that a[k] holds the pseudoinverse
    of that block.

    Notes
    -----
    By using lapack wrappers, this can be much faster for large n, than
    directly calling pinv2

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.util.linalg import pinv_array
    >>> a = np.array([[[1.,2.],[1.,1.]], [[1.,1.],[3.,3.]]])
    >>> ac = a.copy()
    >>> # each block of a is inverted in-place
    >>> pinv_array(a)

    """
    from pyamg.util.utils import set_tol

    n = a.shape[0]
    m = a.shape[1]

    if m == 1:
        # Pseudo-inverse of 1 x 1 matrices is trivial
        zero_entries = (a == 0.0).nonzero()[0]
        a[zero_entries] = 1.0
        a[:] = 1.0 / a
        a[zero_entries] = 0.0
        del zero_entries

    else:
        # The block size is greater than 1

        # Create necessary arrays and function pointers for calculating pinv
        gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
                                              (np.ones((1, ), dtype=a.dtype)))
        RHS = np.eye(m, dtype=a.dtype)
        lwork = _compute_lwork(gelss_lwork, m, m, m)

        # Choose tolerance for which singular values are zero in *gelss below
        if cond is None:
            cond = set_tol(a.dtype)

        # Invert each block of a
        for kk in range(n):
            gelssoutput = gelss(a[kk],
                                RHS,
                                cond=cond,
                                lwork=lwork,
                                overwrite_a=True,
                                overwrite_b=False)
            a[kk] = gelssoutput[1]
Exemplo n.º 3
0
def reference_evolution_soc(A, B, epsilon=4.0, k=2, proj_type="l2"):
    """
    All python reference implementation for Evolution Strength of Connection

    --> If doing imaginary test, both A and B should be imaginary type upon
    entry

    --> This does the "unsymmetrized" version of the ode measure
    """

    # number of PDEs per point is defined implicitly by block size
    csrflag = sparse.isspmatrix_csr(A)
    if csrflag:
        numPDEs = 1
    else:
        numPDEs = A.blocksize[0]
        A = A.tocsr()

    # Preliminaries
    near_zero = np.finfo(float).eps
    sqrt_near_zero = np.sqrt(np.sqrt(near_zero))
    Bmat = np.array(B)
    A.eliminate_zeros()
    A.sort_indices()
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    # Get spectral radius of Dinv*A, this is the time step size for the ODE
    D = A.diagonal()
    Dinv = np.zeros_like(D)
    mask = (D != 0.0)
    Dinv[mask] = 1.0 / D[mask]
    Dinv[D == 0] = 1.0
    Dinv_A = scale_rows(A, Dinv, copy=True)
    rho_DinvA = approximate_spectral_radius(Dinv_A)

    # Calculate (Atilde^k) naively
    S = (sparse.eye(dimen, dimen, format="csr") - (1.0 / rho_DinvA) * Dinv_A)
    Atilde = sparse.eye(dimen, dimen, format="csr")
    for i in range(k):
        Atilde = S * Atilde

    # Strength Info should be row-based, so transpose Atilde
    Atilde = Atilde.T.tocsr()

    # Construct and apply a sparsity mask for Atilde that restricts Atilde^T to
    # the nonzero pattern of A, with the added constraint that row i of
    # Atilde^T retains only the nonzeros that are also in the same PDE as i.

    mask = A.copy()

    # Only consider strength at dofs from your PDE.  Use mask to enforce this
    # by zeroing out all entries in Atilde that aren't from your PDE.
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(np.arange(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # Apply mask to Atilde, zeros in mask have already been eliminated at start
    # of routine.
    mask.data[:] = 1.0
    Atilde = Atilde.multiply(mask)
    Atilde.eliminate_zeros()
    Atilde.sort_indices()
    del mask

    # Calculate strength based on constrained min problem of
    LHS = np.zeros((NullDim + 1, NullDim + 1), dtype=A.dtype)
    RHS = np.zeros((NullDim + 1, ), dtype=A.dtype)

    # Choose tolerance for dropping "numerically zero" values later
    tol = set_tol(Atilde.dtype)

    for i in range(dimen):

        # Get rowptrs and col indices from Atilde
        rowstart = Atilde.indptr[i]
        rowend = Atilde.indptr[i + 1]
        length = rowend - rowstart
        colindx = Atilde.indices[rowstart:rowend]

        # Local diagonal of A is used for scale invariant min problem
        D_A = np.eye(length, dtype=A.dtype)
        if proj_type == "D_A":
            for j in range(length):
                D_A[j, j] = D[colindx[j]]

        # Find row i's position in colindx, matrix must have sorted column
        # indices.
        iInRow = colindx.searchsorted(i)

        if length <= NullDim:
            # Do nothing, because the number of nullspace vectors will
            # be able to perfectly approximate this row of Atilde.
            Atilde.data[rowstart:rowend] = 1.0
        else:
            # Grab out what we want from Atilde and B.  Put into zi, Bi
            zi = Atilde.data[rowstart:rowend]

            Bi = Bmat[colindx, :]

            # Construct constrained min problem
            CC = D_A[iInRow, iInRow]
            LHS[0:NullDim, 0:NullDim] = 2.0 * Bi.conj().T.dot(D_A.dot(Bi))
            LHS[0:NullDim,
                NullDim] = D_A[iInRow, iInRow] * (Bi[iInRow, :].conj().T)
            LHS[NullDim, 0:NullDim] = Bi[iInRow, :]
            RHS[0:NullDim] = 2.0 * Bi.conj().T.dot(D_A.dot(zi))
            RHS[NullDim] = zi[iInRow]

            # Calc Soln to Min Problem
            x = sla.pinv(LHS).dot(RHS)

            # Calculate best constrained approximation to zi with span(Bi), and
            # filter out "numerically" zero values.  This is important because
            # we look only at the sign of values below when calculating angle.
            zihat = Bi.dot(x[:-1])
            tol_i = np.max(np.abs(zihat)) * tol
            zihat.real[np.abs(zihat.real) < tol_i] = 0.0
            if np.iscomplexobj(zihat):
                zihat.imag[np.abs(zihat.imag) < tol_i] = 0.0

            # if angle in the complex plane between individual entries is
            # greater than 90 degrees, then weak.  We can just look at the dot
            # product to determine if angle is greater than 90 degrees.
            angle = np.real(np.ravel(zihat))*np.real(np.ravel(zi)) +\
                np.imag(np.ravel(zihat))*np.imag(np.ravel(zi))
            angle = angle < 0.0
            angle = np.array(angle, dtype=bool)

            # Calculate approximation ratio
            zi = zihat / zi

            # If the ratio is small, then weak connection
            zi[np.abs(zi) <= 1e-4] = 1e100

            # If angle is greater than 90 degrees, then weak connection
            zi[angle] = 1e100

            # Calculate Relative Approximation Error
            zi = np.abs(1.0 - zi)

            # important to make "perfect" connections explicitly nonzero
            zi[zi < sqrt_near_zero] = 1e-4

            # Calculate and apply drop-tol.  Ignore diagonal by making it very
            # large
            zi[iInRow] = 1e5
            drop_tol = np.min(zi) * epsilon
            zi[zi > drop_tol] = 0.0
            Atilde.data[rowstart:rowend] = np.ravel(zi)

    # Clean up, and return Atilde
    Atilde.eliminate_zeros()
    Atilde.data = np.array(np.real(Atilde.data), dtype=float)

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    Id = sparse.eye(dimen, dimen, format="csr")
    Id.data -= Atilde.diagonal()
    Atilde = Atilde + Id

    # If converted BSR to CSR we return amalgamated matrix with the minimum
    # nonzero for each block making up the nonzeros of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        # Atilde = sparse.csr_matrix((data, row, col), shape=(*,*))
        At = []
        for i in range(Atilde.indices.shape[0]):
            Atmin = Atilde.data[i, :, :][Atilde.data[i, :, :].nonzero()]
            At.append(Atmin.min())

        Atilde = sparse.csr_matrix(
            (np.array(At), Atilde.indices, Atilde.indptr),
            shape=(int(Atilde.shape[0] / numPDEs),
                   int(Atilde.shape[1] / numPDEs)))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0 / Atilde.data

    # Scale Atilde by the largest magnitude entry in each row
    largest_row_entry = np.zeros((Atilde.shape[0], ), dtype=Atilde.dtype)
    for i in range(Atilde.shape[0]):
        for j in range(Atilde.indptr[i], Atilde.indptr[i + 1]):
            val = abs(Atilde.data[j])
            if val > largest_row_entry[i]:
                largest_row_entry[i] = val

    largest_row_entry[largest_row_entry != 0] =\
        1.0 / largest_row_entry[largest_row_entry != 0]
    Atilde = Atilde.tocsr()
    Atilde = scale_rows(Atilde, largest_row_entry, copy=True)

    return Atilde
Exemplo n.º 4
0
def _approximate_eigenvalues(A,
                             tol,
                             maxiter,
                             symmetric=None,
                             initial_guess=None):
    """Apprixmate eigenvalues.

    Used by approximate_spectral_radius and condest.

    Returns [W, E, H, V, breakdown_flag], where W and E are the eigenvectors
    and eigenvalues of the Hessenberg matrix H, respectively, and V is the
    Krylov space.  breakdown_flag denotes whether Lanczos/Arnoldi suffered
    breakdown.  E is therefore the approximate eigenvalues of A.

    To obtain approximate eigenvectors of A, compute V*W.
    """
    from scipy.sparse.linalg import aslinearoperator
    from pyamg.util.utils import set_tol

    A = aslinearoperator(A)  # A could be dense or sparse, or something weird

    # Choose tolerance for deciding if break-down has occurred
    breakdown = set_tol(A.dtype)
    breakdown_flag = False

    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    maxiter = min(A.shape[0], maxiter)

    if initial_guess is None:
        v0 = np.random.rand(A.shape[1], 1)
        if A.dtype == complex:
            v0 = v0 + 1.0j * np.random.rand(A.shape[1], 1)
    else:
        v0 = initial_guess

    v0 /= norm(v0)

    # Important to type H based on v0, so that a real nonsymmetric matrix, can
    # have an imaginary initial guess for its Arnoldi Krylov space
    H = np.zeros((maxiter + 1, maxiter),
                 dtype=np.find_common_type([v0.dtype, A.dtype], []))

    V = [v0]

    beta = 0.0
    for j in range(maxiter):
        w = A * V[-1]

        if symmetric:
            if j >= 1:
                H[j - 1, j] = beta
                w -= beta * V[-2]

            alpha = np.dot(np.conjugate(w.ravel()), V[-1].ravel())
            H[j, j] = alpha
            w -= alpha * V[-1]  # axpy(V[-1],w,-alpha)

            beta = norm(w)
            H[j + 1, j] = beta

            if (H[j + 1, j] < breakdown):
                breakdown_flag = True
                break

            w /= beta

            V.append(w)
            V = V[-2:]  # retain only last two vectors

        else:
            # orthogonalize against Vs
            for i, v in enumerate(V):
                H[i, j] = np.dot(np.conjugate(v.ravel()), w.ravel())
                w = w - H[i, j] * v

            H[j + 1, j] = norm(w)

            if (H[j + 1, j] < breakdown):
                breakdown_flag = True
                if H[j + 1, j] != 0.0:
                    w = w / H[j + 1, j]
                V.append(w)
                break

            w = w / H[j + 1, j]
            V.append(w)

            # if upper 2x2 block of Hessenberg matrix H is almost symmetric,
            # and the user has not explicitly specified symmetric=False,
            # then switch to symmetric Lanczos algorithm
            # if symmetric is not False and j == 1:
            #    if abs(H[1,0] - H[0,1]) < 1e-12:
            #        #print "using symmetric mode"
            #        symmetric = True
            #        V = V[1:]
            #        H[1,0] = H[0,1]
            #        beta = H[2,1]

    # print "Approximated spectral radius in %d iterations" % (j + 1)

    from scipy.linalg import eig

    Eigs, Vects = eig(H[:j + 1, :j + 1], left=False, right=True)

    return (Vects, Eigs, H, V, breakdown_flag)
Exemplo n.º 5
0
def schwarz_parameters(A,
                       subdomain=None,
                       subdomain_ptr=None,
                       inv_subblock=None,
                       inv_subblock_ptr=None):
    """Set Schwarz parameters.

    Helper function for setting up Schwarz relaxation.  This function avoids
    recomputing the subdomains and block inverses manytimes, e.g., it avoids a
    costly double computation when setting up pre and post smoothing with
    Schwarz.

    Parameters
    ----------
    A {csr_matrix}

    Returns
    -------
    A.schwarz_parameters[0] is subdomain
    A.schwarz_parameters[1] is subdomain_ptr
    A.schwarz_parameters[2] is inv_subblock
    A.schwarz_parameters[3] is inv_subblock_ptr

    """
    # Check if A has a pre-existing set of Schwarz parameters
    if hasattr(A, 'schwarz_parameters'):
        if subdomain is not None and subdomain_ptr is not None:
            # check that the existing parameters correspond to the same
            # subdomains
            if np.array(A.schwarz_parameters[0] == subdomain).all() and \
               np.array(A.schwarz_parameters[1] == subdomain_ptr).all():
                return A.schwarz_parameters
        else:
            return A.schwarz_parameters

    # Default is to use the overlapping regions defined by A's sparsity pattern
    if subdomain is None or subdomain_ptr is None:
        subdomain_ptr = A.indptr.copy()
        subdomain = A.indices.copy()

    # Extract each subdomain's block from the matrix
    if inv_subblock is None or inv_subblock_ptr is None:
        inv_subblock_ptr = np.zeros(subdomain_ptr.shape, dtype=A.indices.dtype)
        blocksize = (subdomain_ptr[1:] - subdomain_ptr[:-1])
        inv_subblock_ptr[1:] = np.cumsum(blocksize * blocksize)

        # Extract each block column from A
        inv_subblock = np.zeros((inv_subblock_ptr[-1], ), dtype=A.dtype)
        amg_core.extract_subblocks(A.indptr, A.indices, A.data, inv_subblock,
                                   inv_subblock_ptr, subdomain, subdomain_ptr,
                                   int(subdomain_ptr.shape[0] - 1), A.shape[0])
        # Choose tolerance for which singular values are zero in *gelss below
        cond = set_tol(A.dtype)

        # Invert each block column
        my_pinv, = la.get_lapack_funcs(['gelss'], (np.ones(
            (1, ), dtype=A.dtype)))
        for i in range(subdomain_ptr.shape[0] - 1):
            m = blocksize[i]
            rhs = np.eye(m, m, dtype=A.dtype)
            j0 = inv_subblock_ptr[i]
            j1 = inv_subblock_ptr[i + 1]
            gelssoutput = my_pinv(inv_subblock[j0:j1].reshape(m, m),
                                  rhs,
                                  cond=cond,
                                  overwrite_a=True,
                                  overwrite_b=True)
            inv_subblock[j0:j1] = np.ravel(gelssoutput[1])

    A.schwarz_parameters = (subdomain, subdomain_ptr, inv_subblock,
                            inv_subblock_ptr)
    return A.schwarz_parameters