示例#1
0
文件: iterative.py 项目: 317070/scipy
def qmr(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M1=None, M2=None, callback=None):
    """Use Quasi-Minimal Residual iteration to solve A x = b

    Parameters
    ----------
    A : {sparse matrix, dense matrix, LinearOperator}
        The real-valued N-by-N matrix of the linear system.
        It is required that the linear operator can produce
        ``Ax`` and ``A^T x``.
    b : {array, matrix}
        Right hand side of the linear system. Has shape (N,) or (N,1).

    Returns
    -------
    x : {array, matrix}
        The converged solution.
    info : integer
        Provides convergence information:
            0  : successful exit
            >0 : convergence to tolerance not achieved, number of iterations
            <0 : illegal input or breakdown

    Other Parameters
    ----------------
    x0  : {array, matrix}
        Starting guess for the solution.
    tol : float
        Tolerance to achieve. The algorithm terminates when either the relative
        or the absolute residual is below `tol`.
    maxiter : integer
        Maximum number of iterations.  Iteration will stop after maxiter
        steps even if the specified tolerance has not been achieved.
    M1 : {sparse matrix, dense matrix, LinearOperator}
        Left preconditioner for A.
    M2 : {sparse matrix, dense matrix, LinearOperator}
        Right preconditioner for A. Used together with the left
        preconditioner M1.  The matrix M1*A*M2 should have better
        conditioned than A alone.
    callback : function
        User-supplied function to call after each iteration.  It is called
        as callback(xk), where xk is the current solution vector.
    xtype : {'f','d','F','D'}
        This parameter is DEPRECATED -- avoid using it.

        The type of the result.  If None, then it will be determined from
        A.dtype.char and b.  If A does not have a typecode method then it
        will compute A.matvec(x0) to get a typecode.   To save the extra
        computation when A does not have a typecode attribute use xtype=0
        for the same type as b or use xtype='f','d','F',or 'D'.
        This parameter has been superceeded by LinearOperator.

    See Also
    --------
    LinearOperator

    """
    A_ = A
    A,M,x,b,postprocess = make_system(A,None,x0,b,xtype)

    if M1 is None and M2 is None:
        if hasattr(A_,'psolve'):
            def left_psolve(b):
                return A_.psolve(b,'left')

            def right_psolve(b):
                return A_.psolve(b,'right')

            def left_rpsolve(b):
                return A_.rpsolve(b,'left')

            def right_rpsolve(b):
                return A_.rpsolve(b,'right')
            M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve)
            M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve)
        else:
            def id(b):
                return b
            M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
            M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)

    n = len(b)
    if maxiter is None:
        maxiter = n*10

    ltr = _type_conv[x.dtype.char]
    revcom = getattr(_iterative, ltr + 'qmrrevcom')
    stoptest = getattr(_iterative, ltr + 'stoptest2')

    resid = tol
    ndx1 = 1
    ndx2 = -1
    work = np.zeros(11*n,x.dtype)
    ijob = 1
    info = 0
    ftflag = True
    bnrm2 = -1.0
    iter_ = maxiter
    while True:
        olditer = iter_
        x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
           revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
        if callback is not None and iter_ > olditer:
            callback(x)
        slice1 = slice(ndx1-1, ndx1-1+n)
        slice2 = slice(ndx2-1, ndx2-1+n)
        if (ijob == -1):
            if callback is not None:
                callback(x)
            break
        elif (ijob == 1):
            work[slice2] *= sclr2
            work[slice2] += sclr1*A.matvec(work[slice1])
        elif (ijob == 2):
            work[slice2] *= sclr2
            work[slice2] += sclr1*A.rmatvec(work[slice1])
        elif (ijob == 3):
            work[slice1] = M1.matvec(work[slice2])
        elif (ijob == 4):
            work[slice1] = M2.matvec(work[slice2])
        elif (ijob == 5):
            work[slice1] = M1.rmatvec(work[slice2])
        elif (ijob == 6):
            work[slice1] = M2.rmatvec(work[slice2])
        elif (ijob == 7):
            work[slice2] *= sclr2
            work[slice2] += sclr1*A.matvec(x)
        elif (ijob == 8):
            if ftflag:
                info = -1
                ftflag = False
            bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info)
        ijob = 2

    if info > 0 and iter_ == maxiter and resid > tol:
        # info isn't set appropriately otherwise
        info = iter_

    return postprocess(x), info
示例#2
0
Am = csr_matrix(array([[-2, 1, 0, 0, 0, 9],
                       [1, -2, 1, 0, 5, 0],
                       [0, 1, -2, 1, 0, 0],
                       [0, 0, 1, -2, 1, 0],
                       [0, 3, 0, 1, -2, 1],
                       [1, 0, 0, 0, 1, -2]]))
b = array([1, 2, 3, 4, 5, 6])
count = [0]


def matvec(v):
    count[0] += 1
    return Am*v


A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)


def do_solve(**kw):
    count[0] = 0
    with suppress_warnings() as sup:
        sup.filter(DeprecationWarning, ".*called without specifying.*")
        x0, flag = lgmres(A, b, x0=zeros(A.shape[0]),
                          inner_m=6, tol=1e-14, **kw)
    count_0 = count[0]
    assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
    return x0, count_0


class TestLGMRES(object):
    def test_preconditioner(self):
示例#3
0
def qmr(A, b, x0=None, tol=1e-5, maxiter=None, M1=None, M2=None, callback=None,
        atol=None):
    """Use Quasi-Minimal Residual iteration to solve ``Ax = b``.

    Parameters
    ----------
    A : {sparse matrix, dense matrix, LinearOperator}
        The real-valued N-by-N matrix of the linear system.
        It is required that the linear operator can produce
        ``Ax`` and ``A^T x``.
    b : {array, matrix}
        Right hand side of the linear system. Has shape (N,) or (N,1).

    Returns
    -------
    x : {array, matrix}
        The converged solution.
    info : integer
        Provides convergence information:
            0  : successful exit
            >0 : convergence to tolerance not achieved, number of iterations
            <0 : illegal input or breakdown

    Other Parameters
    ----------------
    x0  : {array, matrix}
        Starting guess for the solution.
    tol, atol : float, optional
        Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
        The default for ``atol`` is ``'legacy'``, which emulates
        a different legacy behavior.

        .. warning::

           The default value for `atol` will be changed in a future release.
           For future compatibility, specify `atol` explicitly.
    maxiter : integer
        Maximum number of iterations.  Iteration will stop after maxiter
        steps even if the specified tolerance has not been achieved.
    M1 : {sparse matrix, dense matrix, LinearOperator}
        Left preconditioner for A.
    M2 : {sparse matrix, dense matrix, LinearOperator}
        Right preconditioner for A. Used together with the left
        preconditioner M1.  The matrix M1*A*M2 should have better
        conditioned than A alone.
    callback : function
        User-supplied function to call after each iteration.  It is called
        as callback(xk), where xk is the current solution vector.

    See Also
    --------
    LinearOperator

    Examples
    --------
    >>> from scipy.sparse import csc_matrix
    >>> from scipy.sparse.linalg import qmr
    >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
    >>> b = np.array([2, 4, -1], dtype=float)
    >>> x, exitCode = qmr(A, b)
    >>> print(exitCode)            # 0 indicates successful convergence
    0
    >>> np.allclose(A.dot(x), b)
    True
    """
    A_ = A
    A, M, x, b, postprocess = make_system(A, None, x0, b)

    if M1 is None and M2 is None:
        if hasattr(A_,'psolve'):
            def left_psolve(b):
                return A_.psolve(b,'left')

            def right_psolve(b):
                return A_.psolve(b,'right')

            def left_rpsolve(b):
                return A_.rpsolve(b,'left')

            def right_rpsolve(b):
                return A_.rpsolve(b,'right')
            M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve)
            M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve)
        else:
            def id(b):
                return b
            M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
            M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)

    n = len(b)
    if maxiter is None:
        maxiter = n*10

    ltr = _type_conv[x.dtype.char]
    revcom = getattr(_iterative, ltr + 'qmrrevcom')

    get_residual = lambda: np.linalg.norm(A.matvec(x) - b)
    atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'qmr')
    if atol == 'exit':
        return postprocess(x), 0

    resid = atol
    ndx1 = 1
    ndx2 = -1
    # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
    work = _aligned_zeros(11*n,x.dtype)
    ijob = 1
    info = 0
    ftflag = True
    iter_ = maxiter
    while True:
        olditer = iter_
        x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
           revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
        if callback is not None and iter_ > olditer:
            callback(x)
        slice1 = slice(ndx1-1, ndx1-1+n)
        slice2 = slice(ndx2-1, ndx2-1+n)
        if (ijob == -1):
            if callback is not None:
                callback(x)
            break
        elif (ijob == 1):
            work[slice2] *= sclr2
            work[slice2] += sclr1*A.matvec(work[slice1])
        elif (ijob == 2):
            work[slice2] *= sclr2
            work[slice2] += sclr1*A.rmatvec(work[slice1])
        elif (ijob == 3):
            work[slice1] = M1.matvec(work[slice2])
        elif (ijob == 4):
            work[slice1] = M2.matvec(work[slice2])
        elif (ijob == 5):
            work[slice1] = M1.rmatvec(work[slice2])
        elif (ijob == 6):
            work[slice1] = M2.rmatvec(work[slice2])
        elif (ijob == 7):
            work[slice2] *= sclr2
            work[slice2] += sclr1*A.matvec(x)
        elif (ijob == 8):
            if ftflag:
                info = -1
                ftflag = False
            resid, info = _stoptest(work[slice1], atol)
        ijob = 2

    if info > 0 and iter_ == maxiter and not (resid <= atol):
        # info isn't set appropriately otherwise
        info = iter_

    return postprocess(x), info
示例#4
0
def make_system(A, M, x0, b, xtype=None):
    """Make a linear system Ax=b

    Parameters
    ----------
    A : LinearOperator
        sparse or dense matrix (or any valid input to aslinearoperator)
    M : {LinearOperator, Nones}
        preconditioner
        sparse or dense matrix (or any valid input to aslinearoperator)
    x0 : {array_like, None}
        initial guess to iterative method
    b : array_like
        right hand side
    xtype : {'f', 'd', 'F', 'D', None}
        dtype of the x vector

    Returns
    -------
    (A, M, x, b, postprocess)
        A : LinearOperator
            matrix of the linear system
        M : LinearOperator
            preconditioner
        x : rank 1 ndarray
            initial guess
        b : rank 1 ndarray
            right hand side
        postprocess : function
            converts the solution vector to the appropriate
            type and dimensions (e.g. (N,1) matrix)

    """
    A_ = A
    A = aslinearoperator(A)

    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix, but got shape=%s' %
                         (A.shape, ))

    N = A.shape[0]

    b = asanyarray(b)

    if not (b.shape == (N, 1) or b.shape == (N, )):
        raise ValueError('A and b have incompatible dimensions')

    if b.dtype.char not in 'fdFD':
        b = b.astype('d')  # upcast non-FP types to double

    def postprocess(x):
        if isinstance(b, matrix):
            x = asmatrix(x)
        return x.reshape(b.shape)

    if xtype is None:
        if hasattr(A, 'dtype'):
            xtype = A.dtype.char
        else:
            xtype = A.matvec(b).dtype.char
        xtype = coerce(xtype, b.dtype.char)
    else:
        warn('Use of xtype argument is deprecated. '\
                'Use LinearOperator( ... , dtype=xtype) instead.',\
                DeprecationWarning)
        if xtype == 0:
            xtype = b.dtype.char
        else:
            if xtype not in 'fdFD':
                raise ValueError("xtype must be 'f', 'd', 'F', or 'D'")

    b = asarray(b, dtype=xtype)  #make b the same type as x
    b = b.ravel()

    if x0 is None:
        x = zeros(N, dtype=xtype)
    else:
        x = array(x0, dtype=xtype)
        if not (x.shape == (N, 1) or x.shape == (N, )):
            raise ValueError('A and x have incompatible dimensions')
        x = x.ravel()

    # process preconditioner
    if M is None:
        if hasattr(A_, 'psolve'):
            psolve = A_.psolve
        else:
            psolve = id
        if hasattr(A_, 'rpsolve'):
            rpsolve = A_.rpsolve
        else:
            rpsolve = id
        if psolve is id and rpsolve is id:
            M = IdentityOperator(shape=A.shape, dtype=A.dtype)
        else:
            M = LinearOperator(A.shape,
                               matvec=psolve,
                               rmatvec=rpsolve,
                               dtype=A.dtype)
    else:
        M = aslinearoperator(M)
        if A.shape != M.shape:
            raise ValueError('matrix and preconditioner have different shapes')

    return A, M, x, b, postprocess
示例#5
0
def make_system(A, M, x0, b):

    A_ = A
    A = aslinearoperator(A)

    if A.shape[0] != A.shape[1]:
        raise ValueError(
            'expected square matrix, but got shape=%s' % (A.shape,))

    N = A.shape[0]

    b = asanyarray(b)

    if not (b.shape == (N, 1) or b.shape == (N,)):
        raise ValueError('A and b have incompatible dimensions')

    if b.dtype.char not in 'fdFD':
        b = b.astype('d')  # upcast non-FP types to double

    def postprocess(x):
        if isinstance(b, matrix):
            x = asmatrix(x)
        return x.reshape(b.shape)

    if hasattr(A, 'dtype'):
        xtype = A.dtype.char
    else:
        xtype = A.matvec(b).dtype.char
    xtype = coerce(xtype, b.dtype.char)

    b = asarray(b, dtype=xtype)  # make b the same type as x
    b = b.ravel()

    if x0 is None:
        x = zeros(N, dtype=xtype)
    else:
        x = array(x0, dtype=xtype)
        if not (x.shape == (N, 1) or x.shape == (N,)):
            raise ValueError('A and x have incompatible dimensions')
        x = x.ravel()

    # process preconditioner
    if M is None:
        if hasattr(A_, 'psolve'):
            psolve = A_.psolve
        else:
            psolve = id
        if hasattr(A_, 'rpsolve'):
            rpsolve = A_.rpsolve
        else:
            rpsolve = id
        if psolve is id and rpsolve is id:
            M = IdentityOperator(shape=A.shape, dtype=A.dtype)
        else:
            M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,
                               dtype=A.dtype)
    else:
        M = aslinearoperator(M)
        if A.shape != M.shape:
            raise ValueError('matrix and preconditioner have different shapes')

    return A, M, x, b, postprocess
示例#6
0
文件: ch.py 项目: algrs/chumpy
    def dr_wrt(self, wrt, reverse_mode=False, profiler=None):
        tm_dr_wrt = timer()
        self.called_dr_wrt = True
        self._call_on_changed()

        drs = []

        if wrt in self._cache['drs']:
            if DEBUG:
                if wrt not in self._cache_info:
                    self._cache_info[wrt] = 0
                self._cache_info[wrt] +=1
                self._status = 'cached'
            return self._cache['drs'][wrt]

        direct_dr = self._compute_dr_wrt_sliced(wrt)

        if direct_dr is not None:
            drs.append(direct_dr)    

        if DEBUG:
            self._status = 'pending'
        
        propnames = set(_props_for(self.__class__))
        for k in set(self.dterms).intersection(propnames.union(set(self.__dict__.keys()))):

            p = getattr(self, k)

            if hasattr(p, 'dterms') and p is not wrt:

                indirect_dr = None

                if reverse_mode:
                    lhs = self._compute_dr_wrt_sliced(p)
                    if isinstance(lhs, LinearOperator):
                        tm_dr_wrt.pause()
                        dr2 = p.dr_wrt(wrt)
                        tm_dr_wrt.resume()
                        indirect_dr = lhs.matmat(dr2) if dr2 != None else None
                    else:
                        indirect_dr = p.lmult_wrt(lhs, wrt)
                else: # forward mode
                    tm_dr_wrt.pause()
                    dr2 = p.dr_wrt(wrt, profiler=profiler)
                    tm_dr_wrt.resume()
                    if dr2 is not None:
                        indirect_dr = self.compute_rop(p, rhs=dr2)

                if indirect_dr is not None:
                    drs.append(indirect_dr)

        if len(drs)==0:
            result = None
        elif len(drs)==1:
            result = drs[0]
        else:
            # TODO: ????????
            # result = np.sum(x for x in drs)
            if not np.any([isinstance(a, LinearOperator) for a in drs]):
                result = reduce(lambda x, y: x+y, drs)
            else:
                result = LinearOperator(drs[0].shape, lambda x : reduce(lambda a, b: a.dot(x)+b.dot(x),drs))

        # TODO: figure out how/whether to do this.
        if result is not None and not sp.issparse(result):
            tm_nonzero = timer()
            nonzero = np.count_nonzero(result)
            if tm_nonzero() > 0.1:
                pif('count_nonzero in {}sec'.format(tm_nonzero()))
            if nonzero == 0 or hasattr(result, 'size') and result.size / float(nonzero) >= 10.0:
                tm_convert_to_sparse = timer()
                result = sp.csc_matrix(result)
                import gc
                gc.collect()
                pif('converting result to sparse in {}sec'.format(tm_convert_to_sparse()))
                        
        if (result is not None) and (not sp.issparse(result)) and (not isinstance(result, LinearOperator)):
            result = np.atleast_2d(result)
        
        # When the number of parents is one, it indicates that
        # caching this is probably not useful because not 
        # more than one parent will likely ask for this same
        # thing again in the same iteration of an optimization.
        #
        # When the number of parents is zero, this is the top
        # level object and should be cached; when it's > 1
        # cache the combinations of the children.
        #
        # If we *always* filled in the cache, it would require 
        # more memory but would occasionally save a little cpu,
        # on average.
        if len(self._parents.keys()) != 1:
            self._cache['drs'][wrt] = result

        if DEBUG:
            self._status = 'done'
        
        if getattr(self, '_make_dense', False) and sp.issparse(result):
            result = result.todense()
        if getattr(self, '_make_sparse', False) and not sp.issparse(result):
            result = sp.csc_matrix(result)
        
        if tm_dr_wrt() > 0.1:
            pif('dx of {} wrt {} in {}sec, sparse: {}'.format(self.short_name, wrt.short_name, tm_dr_wrt(), sp.issparse(result)))

        return result
示例#7
0
    def dr_wrt(self, wrt, reverse_mode=False, profiler=None):
        tm_dr_wrt = timer()
        self.called_dr_wrt = True
        self._call_on_changed()

        drs = []

        if wrt in self._cache['drs']:
            if DEBUG:
                if wrt not in self._cache_info:
                    self._cache_info[wrt] = 0
                self._cache_info[wrt] += 1
                self._status = 'cached'
            return self._cache['drs'][wrt]

        direct_dr = self._compute_dr_wrt_sliced(wrt)

        if direct_dr is not None:
            drs.append(direct_dr)

        if DEBUG:
            self._status = 'pending'

        propnames = set(_props_for(self.__class__))
        for k in set(self.dterms).intersection(
                propnames.union(set(self.__dict__.keys()))):

            p = getattr(self, k)

            if hasattr(p, 'dterms') and p is not wrt:

                indirect_dr = None

                if reverse_mode:
                    lhs = self._compute_dr_wrt_sliced(p)
                    if isinstance(lhs, LinearOperator):
                        tm_dr_wrt.pause()
                        dr2 = p.dr_wrt(wrt)
                        tm_dr_wrt.resume()
                        indirect_dr = lhs.matmat(dr2) if dr2 != None else None
                    else:
                        indirect_dr = p.lmult_wrt(lhs, wrt)
                else:  # forward mode
                    tm_dr_wrt.pause()
                    dr2 = p.dr_wrt(wrt, profiler=profiler)
                    tm_dr_wrt.resume()
                    if dr2 is not None:
                        indirect_dr = self.compute_rop(p, rhs=dr2)

                if indirect_dr is not None:
                    drs.append(indirect_dr)

        if len(drs) == 0:
            result = None
        elif len(drs) == 1:
            result = drs[0]
        else:
            # TODO: ????????
            # result = np.sum(x for x in drs)
            if not np.any([isinstance(a, LinearOperator) for a in drs]):
                result = reduce(lambda x, y: x + y, drs)
            else:
                result = LinearOperator(
                    drs[0].shape,
                    lambda x: reduce(lambda a, b: a.dot(x) + b.dot(x), drs))

        # TODO: figure out how/whether to do this.
        if result is not None and not sp.issparse(result):
            tm_nonzero = timer()
            nonzero = np.count_nonzero(result)
            if tm_nonzero() > 0.1:
                pif('count_nonzero in {}sec'.format(tm_nonzero()))
            if nonzero == 0 or hasattr(
                    result, 'size') and result.size / float(nonzero) >= 10.0:
                tm_convert_to_sparse = timer()
                result = sp.csc_matrix(result)
                import gc
                gc.collect()
                pif('converting result to sparse in {}sec'.format(
                    tm_convert_to_sparse()))

        if (result is not None) and (not sp.issparse(result)) and (
                not isinstance(result, LinearOperator)):
            result = np.atleast_2d(result)

        # When the number of parents is one, it indicates that
        # caching this is probably not useful because not
        # more than one parent will likely ask for this same
        # thing again in the same iteration of an optimization.
        #
        # When the number of parents is zero, this is the top
        # level object and should be cached; when it's > 1
        # cache the combinations of the children.
        #
        # If we *always* filled in the cache, it would require
        # more memory but would occasionally save a little cpu,
        # on average.
        if len(list(self._parents.keys())) != 1:
            self._cache['drs'][wrt] = result

        if DEBUG:
            self._status = 'done'

        if getattr(self, '_make_dense', False) and sp.issparse(result):
            result = result.todense()
        if getattr(self, '_make_sparse', False) and not sp.issparse(result):
            result = sp.csc_matrix(result)

        if tm_dr_wrt() > 0.1:
            pif('dx of {} wrt {} in {}sec, sparse: {}'.format(
                self.short_name, wrt.short_name, tm_dr_wrt(),
                sp.issparse(result)))

        return result
示例#8
0
def test_mpi_p2p_alldata_gather():
    N = 2
    comm = MPI.COMM_WORLD
    world_rank = comm.Get_rank()
    world_size = comm.Get_size()
    print(comm)
    print("world rank ", world_rank)

    group = comm.Get_group()
    newgroup = group.Excl([0])
    newcomm = comm.Create(newgroup)

    # PROCESS 0:
    # Receive fenics_mesh from PROCESS 1 (fenics mesh distributed
    # across PROCESS 1 and PROCESS 2.

    if world_rank == 0:
        assert newcomm == MPI.COMM_NULL
        info = MPI.Status()

        # BM_CELLS
        comm.Probe(MPI.ANY_SOURCE, 100, info)
        elements = info.Get_elements(MPI.LONG)
        bm_cells = np.zeros(elements, dtype=np.int64)
        comm.Recv([bm_cells, MPI.LONG], source=1, tag=100)
        bm_cells = bm_cells.reshape(int(elements / 3), 3)

        # BM_COORDS
        comm.Probe(MPI.ANY_SOURCE, 101, info)
        elements = info.Get_elements(MPI.DOUBLE)
        bm_coords = np.zeros(elements, dtype=np.float64)
        comm.Recv([bm_coords, MPI.DOUBLE], source=1, tag=101)
        bm_coords = bm_coords.reshape(int(elements / 3), 3)

        # BM_NODES
        comm.Probe(MPI.ANY_SOURCE, 102, info)
        elements = info.Get_elements(MPI.INT)
        bm_nodes = np.zeros(elements, dtype=np.int32)
        comm.Recv([bm_nodes, MPI.INT], source=1, tag=102)
        # print(bm_nodes)
        # print(bm_coords)
        # print(bm_cells)
        # use boundary coords and boundary triangles to create bempp mesh.

        # BM_DOFMAP

        num_fenics_vertices = comm.recv(source=1, tag=103)
        print("num_vertices ", num_fenics_vertices)

        print("length of bm_nodes ", len(bm_nodes))

        # b_vertices_from_vertices = coo_matrix(
        #     (np.ones(len(bm_nodes)), (np.arange(len(bm_nodes)), bm_nodes)),
        #     shape=(len(bm_nodes), num_fenics_vertices),
        #     dtype="float64",
        # ).tocsc()

        dof_to_vertex_map = np.zeros(27, dtype=np.int64)

        b_vertices_from_vertices = coo_matrix(
            (np.ones(len(bm_nodes)), (np.arange(len(bm_nodes)), bm_nodes)),
            shape=(len(bm_nodes), 27),
            dtype="float64",
        ).tocsc()

        dof_to_vertex_map = np.arange(27, dtype=np.int64)

        print(dof_to_vertex_map)

        vertices_from_fenics_dofs = coo_matrix(
            (
                np.ones(27),
                (dof_to_vertex_map, np.arange(27)),
            ),
            shape=(27, 27),
            dtype="float64",
        ).tocsc()

        # receive A from fenics processes.

        comm.Probe(MPI.ANY_SOURCE, 112, info)
        elements = info.Get_elements(MPI.DOUBLE_COMPLEX)
        av = np.zeros(elements, dtype=np.cdouble)
        comm.Recv([av, MPI.DOUBLE_COMPLEX], source=1, tag=112)
        # print(av)

        comm.Probe(MPI.ANY_SOURCE, 111, info)
        elements = info.Get_elements(MPI.INT)
        aj = np.zeros(elements, dtype=np.int32)
        comm.Recv([aj, MPI.INT], source=1, tag=111)
        # print(aj)
        comm.Probe(MPI.ANY_SOURCE, 110, info)
        elements = info.Get_elements(MPI.INT)
        ai = np.zeros(elements, dtype=np.int32)
        comm.Recv([ai, MPI.INT], source=1, tag=110)
        # print("ai shape ", ai)

        k = 2

        bempp_boundary_grid = bempp.api.Grid(bm_coords.transpose(),
                                             bm_cells.transpose())
        space = bempp.api.function_space(bempp_boundary_grid, "P", 1)
        trace_space = space
        trace_matrix = b_vertices_from_vertices @ vertices_from_fenics_dofs
        bempp_space = bempp.api.function_space(trace_space.grid, "DP", 0)

        id_op = bempp.api.operators.boundary.sparse.identity(
            trace_space, bempp_space, bempp_space)
        mass = bempp.api.operators.boundary.sparse.identity(
            bempp_space, bempp_space, trace_space)
        dlp = bempp.api.operators.boundary.helmholtz.double_layer(
            trace_space, bempp_space, bempp_space, k)
        slp = bempp.api.operators.boundary.helmholtz.single_layer(
            bempp_space, bempp_space, bempp_space, k)

        rhs_fem = np.zeros(27)

        print("length of rhs fem ", len(rhs_fem))

        @bempp.api.complex_callable
        def u_inc(x, n, domain_index, result):
            result[0] = np.exp(1j * k * x[0])

        u_inc = bempp.api.GridFunction(bempp_space, fun=u_inc)
        rhs_bem = u_inc.projections(bempp_space)

        rhs = np.concatenate([rhs_fem, rhs_bem])

        from bempp.api.assembly.blocked_operator import BlockedDiscreteOperator
        from scipy.sparse.linalg.interface import LinearOperator

        blocks = [[None, None], [None, None]]

        trace_op = LinearOperator(trace_matrix.shape,
                                  lambda x: trace_matrix @ x)

        Asp = csr_matrix((av, aj, ai))

        blocks[0][0] = Asp
        blocks[0][1] = -trace_matrix.T * mass.weak_form().A
        blocks[1][0] = (0.5 * id_op - dlp).weak_form() * trace_op
        blocks[1][1] = slp.weak_form()

        blocked = BlockedDiscreteOperator(np.array(blocks))

        from scipy.sparse.linalg import gmres

        c = Counter()
        soln, info = gmres(blocked, rhs, callback=c.add)

        print("Solved in", c.count, "iterations")
        # computed = soln[: fenics_space.dim]

        print(soln)

        # print(actual)
        # print("L2 error:", np.linalg.norm(actual_vec - computed))
        # assert np.linalg.norm(actual_vec - computed) < 1 / N

        # dof_to_vertex_map = np.zeros(num_fenics_vertices, dtype=np.int64)
        # tets = fenics_mesh.geometry.dofmap
        # for tet in range(tets.num_nodes):
        #     cell_dofs = fenics_space.dofmap.cell_dofs(tet)
        #     cell_verts = tets.links(tet)
        #     for v in range(4):
        #         vertex_n = cell_verts[v]
        #         dof = cell_dofs[fenics_space.dofmap.dof_layout.entity_dofs(0, v)[0]]
        #         dof_to_vertex_map[dof] = vertex_n
        # print("dof_to_vertex_map ", dof_to_vertex_map)
        # vertices_from_fenics_dofs = coo_matrix(
        #     (
        #         np.ones(num_fenics_vertices),
        #         (dof_to_vertex_map, np.arange(num_fenics_vertices)),
        #     ),
        #     shape=(num_fenics_vertices, num_fenics_vertices),
        #     dtype="float64",
        # ).tocsc()

        # tets = fenics_mesh.geometry.dofmap
        # for tet in range(tets.num_nodes):
        #     cell_dofs = fenics_space.dofmap.cell_dofs(tet)
        #     cell_verts = tets.links(tet)
        #     for v in range(4):
        #         vertex_n = cell_verts[v]
        #         dof = cell_dofs[fenics_space.dofmap.dof_layout.entity_dofs(0, v)[0]]
        #         dof_to_vertex_map[dof] = vertex_n
        # print(dof_to_vertex_map)
        # vertices_from_fenics_dofs = coo_matrix(
        #     (
        #         np.ones(num_fenics_vertices),
        #         (dof_to_vertex_map, np.arange(num_fenics_vertices)),
        #     ),
        #     shape=(num_fenics_vertices, num_fenics_vertices),
        #     dtype="float64",
        # ).tocsc()

        # # Get trace matrix by multiplication
        # trace_matrix = b_vertices_from_vertices @ vertices_from_fenics_dofs

        # # Now return everything
        # return space, trace_matrix

        # out = os.path.join("./bempp_out", "test_mesh.msh")
        # bempp.api.export(out, grid=bempp_boundary_grid)
        # print("exported mesh to", out)

    else:  # world rank = 1, 2
        fenics_mesh = dolfinx.UnitCubeMesh(newcomm, N, N, N)
        with XDMFFile(newcomm, "box.xdmf", "w") as file:
            file.write_mesh(fenics_mesh)

        fenics_space = dolfinx.FunctionSpace(fenics_mesh, ("CG", 1))

        u = ufl.TrialFunction(fenics_space)
        v = ufl.TestFunction(fenics_space)
        k = 2

        form = (ufl.inner(ufl.grad(u), ufl.grad(v)) -
                k**2 * ufl.inner(u, v)) * ufl.dx

        bm_nodes_global, bm_coords, boundary = bm_from_fenics_mesh_mpi(
            fenics_mesh, fenics_space)
        A = dolfinx.fem.assemble_matrix(form)
        A.assemble()
        ai, aj, av = A.getValuesCSR()
        Asp = csr_matrix((av, aj, ai))
        print(Asp)
        # Asp_array = Asp.toarray()
        # Asp_1 = csr_matrix(Asp_array)
        # assert Asp_1.all() == Asp.all()
        # print(Asp_1)
        # print(Asp)

        bm_nodes_global_list = list(bm_nodes_global)
        bm_nodes_arr = np.asarray(bm_nodes_global_list, dtype=np.int64)
        sendbuf_bdry = boundary
        sendbuf_coords = bm_coords
        sendbuf_nodes = bm_nodes_arr
        recvbuf_boundary = None
        recvbuf_coords = None
        recvbuf_nodes = None

        rank = newcomm.Get_rank()
        # number cols = total num rows?
        print("PRINT counts ")
        print("ai, {}\n aj, {}\n av {}\n ".format(ai.shape, aj.shape,
                                                  av.shape))
        # print("sendbuf_bdry", len(sendbuf_bdry), rank)
        # print("bm_coords ", len(sendbuf_coords), rank)
        # print("nodes ", len(sendbuf_nodes), rank)
        # print("Asp array ", Asp_array.shape)
        # print("Asp ", Asp.shape)
        # print("av ", av)
        # print("av ", av[0])

        # print("ai ", len(ai))
        print("ai \n", ai)
        print("aj \n", aj)
        print("av \n", av)

        # send A
        sendbuf_ai = ai
        sendbuf_aj = aj
        sendbuf_av = av
        root = 0
        sendcounts = np.array(newcomm.gather(len(sendbuf_av), root))
        sendcounts_ai = np.array(newcomm.gather(len(sendbuf_ai), root))
        print(aj)
        # print(sendcounts)

        if newcomm.rank == root:
            print("sendcounts: {}, total: {}".format(sendcounts,
                                                     sum(sendcounts)))
            recvbuf_av = np.empty(sum(sendcounts), dtype=np.cdouble)
            recvbuf_aj = np.empty(sum(sendcounts), dtype=np.int32)
            recvbuf_ai = np.empty(sum(sendcounts_ai), dtype=np.int32)
        else:
            recvbuf_av = None
            recvbuf_aj = None
            recvbuf_ai = None

        # Allocate memory for gathered data on subprocess 0.
        if newcomm.rank == 0:
            info = MPI.Status()
            # The 3 factor corresponds to fact that the array is concatenated
            recvbuf_boundary = np.empty(newcomm.size * len(boundary) * 3,
                                        dtype=np.int32)
            recvbuf_coords = np.empty(newcomm.size * len(bm_coords) * 3,
                                      dtype=np.float64)
            recvbuf_nodes = np.empty(newcomm.size * len(bm_nodes_arr),
                                     dtype=np.int64)
            # recvbuf_dofs = np.empty(newcomm.size * len(bm_dofs))
            # recvbuf_soln = np.empty(newcomm.size*

        # newcomm.Gather(sendbuf_av, recvbuf_av, root=0)
        newcomm.Gatherv(sendbuf_ai,
                        recvbuf=(recvbuf_ai, sendcounts_ai),
                        root=0)
        newcomm.Gatherv(sendbuf=sendbuf_av,
                        recvbuf=(recvbuf_av, sendcounts),
                        root=root)
        newcomm.Gatherv(sendbuf=sendbuf_aj,
                        recvbuf=(recvbuf_aj, sendcounts),
                        root=root)

        # Receive on subprocess 0.
        newcomm.Gather(sendbuf_bdry, recvbuf_boundary, root=0)
        newcomm.Gather(sendbuf_coords, recvbuf_coords, root=0)
        newcomm.Gather(sendbuf_nodes, recvbuf_nodes, root=0)

        # exit(0)
        # this needs to be done - but not essential
        FEniCS_dofs_to_vertices(newcomm, fenics_space, fenics_mesh)
        # print(fenics_space.dim)
        print(fenics_space.dofmap.index_map.global_indices(False))
        print(len(fenics_space.dofmap.index_map.global_indices(False)))
        actual = dolfinx.Function(fenics_space)
        print("actual ", actual)
        actual.interpolate(lambda x: np.exp(1j * k * x[0]))
        actual_vec = actual.vector[:]
        print("actual vec \n ", actual_vec)
        print("actual vec size\n ", actual_vec.size)

        # newcomm.Gather(actual_vec, recvbuf_
        # when we do the gather we get boundary node indices repetitions
        # therefore we find unique nodes in the gathered array.
        if newcomm.rank == 0:
            all_boundary = recvbuf_boundary.reshape(
                int(len(recvbuf_boundary) / 3), 3)  # 48 (48)
            bm_coords = recvbuf_coords.reshape(int(len(recvbuf_coords) / 3),
                                               3)  # 34 (26)
            bm_nodes = recvbuf_nodes  # 34 (26)
            # print(len(bm_nodes))
            # print(len(all_boundary))
            # print(len(bm_coords))

            # Sort the nodes (on global geom node indices) to make the unique faster?
            sorted_indices = recvbuf_nodes.argsort()
            bm_nodes_sorted = recvbuf_nodes[sorted_indices]
            bm_coords_sorted = bm_coords[sorted_indices]
            # print("sorted indices, ", sorted_indices)

            bm_nodes, unique = np.unique(bm_nodes_sorted, return_index=True)
            bm_coords = bm_coords_sorted[unique]
            bm_nodes_list = list(bm_nodes)
            # print("bm_nodes_list", bm_nodes_list)
            # bm_cells - remap boundary triangle indices between 0-len(bm_nodes) - this can be improved
            bm_cells = np.array([[bm_nodes_list.index(i) for i in tri]
                                 for tri in all_boundary])

            #             print("received ai ", recvbuf_ai)
            #             print("received aj ", recvbuf_aj)
            #             print("received av ", recvbuf_av)

            #             # now process ai, aj and av.
            #             print("sendcounts ", sendcounts)
            #             print("sendcounts_ai", sendcounts_ai)

            end = sendcounts_ai[0]
            print("end ", end)
            new_recvbuf_ai = np.delete(recvbuf_ai, end)
            new_recvbuf_ai[end:] += new_recvbuf_ai[end - 1]
            print(new_recvbuf_ai)

            # print(len(bm_nodes))
            # print(len(bm_cells))
            # print(len(bm_coords))
            # print(len(all_boundary))
            # print(bm_cells)
            # send to world process 0.
            comm.Send([bm_cells, MPI.LONG], dest=0, tag=100)
            comm.Send([bm_coords, MPI.DOUBLE], dest=0, tag=101)
            comm.Send([np.array(bm_nodes, np.int32), MPI.LONG],
                      dest=0,
                      tag=102)

            # send ai, aj, av

            num_fenics_vertices = fenics_mesh.topology.connectivity(
                0, 0).num_nodes

            comm.send(num_fenics_vertices, dest=0, tag=103)

            comm.Send([new_recvbuf_ai, MPI.INT], dest=0, tag=110)
            print("aj ", recvbuf_aj.shape)
            print("aj ", new_recvbuf_ai.shape)
            comm.Send([recvbuf_aj, MPI.INT], dest=0, tag=111)
            comm.Send([recvbuf_av, MPI.DOUBLE_COMPLEX], dest=0, tag=112)

            print("num_fenics_vertices ", num_fenics_vertices)
示例#9
0
rhs = np.concatenate([rhs_fem, rhs_bem])

# We are now ready to create a ``BlockedLinearOperator`` containing all four parts of the discretisation of
# $$
# \begin{bmatrix}
#     \mathsf{A}-k^2 \mathsf{M} & -\mathsf{M}_\Gamma\\
#     \tfrac{1}{2}\mathsf{Id}-\mathsf{K} & \mathsf{V}
# \end{bmatrix}.
# $$

# In[8]:

from scipy.sparse.linalg.interface import LinearOperator
blocks = [[None, None], [None, None]]

trace_op = LinearOperator(trace_matrix.shape, lambda x: trace_matrix * x)

blfA = ngs.BilinearForm(ng_space)
blfA += ngs.SymbolicBFI(ngs.grad(u) * ngs.grad(v) - k**2 * n**2 * u * v)

c = ngs.Preconditioner(blfA, type="direct")

blfA.Assemble()

blocks[0][0] = NgOperator(blfA)
blocks[0][1] = -trace_matrix.T * mass.weak_form().sparse_operator
blocks[1][0] = (.5 * id_op - dlp).weak_form() * trace_op
blocks[1][1] = slp.weak_form()

blocked = bempp.api.BlockedDiscreteOperator(np.array(blocks))
示例#10
0
def qmr(A,
        b,
        x0=None,
        tol=1e-5,
        maxiter=None,
        M1=None,
        M2=None,
        callback=None,
        atol=None):
    """Use Quasi-Minimal Residual iteration to solve ``Ax = b``.

    Parameters
    ----------
    A : {sparse matrix, dense matrix, LinearOperator}
        The real-valued N-by-N matrix of the linear system.
        It is required that the linear operator can produce
        ``Ax`` and ``A^T x``.
    b : {array, matrix}
        Right hand side of the linear system. Has shape (N,) or (N,1).

    Returns
    -------
    x : {array, matrix}
        The converged solution.
    info : integer
        Provides convergence information:
            0  : successful exit
            >0 : convergence to tolerance not achieved, number of iterations
            <0 : illegal input or breakdown

    Other Parameters
    ----------------
    x0  : {array, matrix}
        Starting guess for the solution.
    tol, atol : float, optional
        Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
        The default for ``atol`` is ``'legacy'``, which emulates
        a different legacy behavior.

        .. warning::

           The default value for `atol` will be changed in a future release.
           For future compatibility, specify `atol` explicitly.
    maxiter : integer
        Maximum number of iterations.  Iteration will stop after maxiter
        steps even if the specified tolerance has not been achieved.
    M1 : {sparse matrix, dense matrix, LinearOperator}
        Left preconditioner for A.
    M2 : {sparse matrix, dense matrix, LinearOperator}
        Right preconditioner for A. Used together with the left
        preconditioner M1.  The matrix M1*A*M2 should have better
        conditioned than A alone.
    callback : function
        User-supplied function to call after each iteration.  It is called
        as callback(xk), where xk is the current solution vector.

    See Also
    --------
    LinearOperator

    Examples
    --------
    >>> from scipy.sparse import csc_matrix
    >>> from scipy.sparse.linalg import qmr
    >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
    >>> b = np.array([2, 4, -1], dtype=float)
    >>> x, exitCode = qmr(A, b)
    >>> print(exitCode)            # 0 indicates successful convergence
    0
    >>> np.allclose(A.dot(x), b)
    True
    """
    A_ = A
    A, M, x, b, postprocess = make_system(A, None, x0, b)

    if M1 is None and M2 is None:
        if hasattr(A_, 'psolve'):

            def left_psolve(b):
                return A_.psolve(b, 'left')

            def right_psolve(b):
                return A_.psolve(b, 'right')

            def left_rpsolve(b):
                return A_.rpsolve(b, 'left')

            def right_rpsolve(b):
                return A_.rpsolve(b, 'right')

            M1 = LinearOperator(A.shape,
                                matvec=left_psolve,
                                rmatvec=left_rpsolve)
            M2 = LinearOperator(A.shape,
                                matvec=right_psolve,
                                rmatvec=right_rpsolve)
        else:

            def id(b):
                return b

            M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
            M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)

    n = len(b)
    if maxiter is None:
        maxiter = n * 10

    ltr = _type_conv[x.dtype.char]
    revcom = getattr(_iterative, ltr + 'qmrrevcom')

    get_residual = lambda: np.linalg.norm(A.matvec(x) - b)
    atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'qmr')
    if atol == 'exit':
        return postprocess(x), 0

    resid = atol
    ndx1 = 1
    ndx2 = -1
    # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
    work = _aligned_zeros(11 * n, x.dtype)
    ijob = 1
    info = 0
    ftflag = True
    iter_ = maxiter
    while True:
        olditer = iter_
        x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
           revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
        if callback is not None and iter_ > olditer:
            callback(x)
        slice1 = slice(ndx1 - 1, ndx1 - 1 + n)
        slice2 = slice(ndx2 - 1, ndx2 - 1 + n)
        if (ijob == -1):
            if callback is not None:
                callback(x)
            break
        elif (ijob == 1):
            work[slice2] *= sclr2
            work[slice2] += sclr1 * A.matvec(work[slice1])
        elif (ijob == 2):
            work[slice2] *= sclr2
            work[slice2] += sclr1 * A.rmatvec(work[slice1])
        elif (ijob == 3):
            work[slice1] = M1.matvec(work[slice2])
        elif (ijob == 4):
            work[slice1] = M2.matvec(work[slice2])
        elif (ijob == 5):
            work[slice1] = M1.rmatvec(work[slice2])
        elif (ijob == 6):
            work[slice1] = M2.rmatvec(work[slice2])
        elif (ijob == 7):
            work[slice2] *= sclr2
            work[slice2] += sclr1 * A.matvec(x)
        elif (ijob == 8):
            if ftflag:
                info = -1
                ftflag = False
            resid, info = _stoptest(work[slice1], atol)
        ijob = 2

    if info > 0 and iter_ == maxiter and not (resid <= atol):
        # info isn't set appropriately otherwise
        info = iter_

    return postprocess(x), info
示例#11
0
def qmr(A,
        b,
        x0=None,
        tol=1e-5,
        maxiter=None,
        xtype=None,
        M1=None,
        M2=None,
        callback=None):
    """Use Quasi-Minimal Residual iteration to solve A x = b

    Parameters
    ----------
    A : {sparse matrix, dense matrix, LinearOperator}
        The real-valued N-by-N matrix of the linear system.
        It is required that the linear operator can produce
        ``Ax`` and ``A^T x``.
    b : {array, matrix}
        Right hand side of the linear system. Has shape (N,) or (N,1).

    Returns
    -------
    x : {array, matrix}
        The converged solution.
    info : integer
        Provides convergence information:
            0  : successful exit
            >0 : convergence to tolerance not achieved, number of iterations
            <0 : illegal input or breakdown

    Other Parameters
    ----------------
    x0  : {array, matrix}
        Starting guess for the solution.
    tol : float
        Tolerance to achieve. The algorithm terminates when either the relative
        or the absolute residual is below `tol`.
    maxiter : integer
        Maximum number of iterations.  Iteration will stop after maxiter
        steps even if the specified tolerance has not been achieved.
    M1 : {sparse matrix, dense matrix, LinearOperator}
        Left preconditioner for A.
    M2 : {sparse matrix, dense matrix, LinearOperator}
        Right preconditioner for A. Used together with the left
        preconditioner M1.  The matrix M1*A*M2 should have better
        conditioned than A alone.
    callback : function
        User-supplied function to call after each iteration.  It is called
        as callback(xk), where xk is the current solution vector.
    xtype : {'f','d','F','D'}
        This parameter is DEPRECATED -- avoid using it.

        The type of the result.  If None, then it will be determined from
        A.dtype.char and b.  If A does not have a typecode method then it
        will compute A.matvec(x0) to get a typecode.   To save the extra
        computation when A does not have a typecode attribute use xtype=0
        for the same type as b or use xtype='f','d','F',or 'D'.
        This parameter has been superceeded by LinearOperator.

    See Also
    --------
    LinearOperator

    """
    A_ = A
    A, M, x, b, postprocess = make_system(A, None, x0, b, xtype)

    if M1 is None and M2 is None:
        if hasattr(A_, 'psolve'):

            def left_psolve(b):
                return A_.psolve(b, 'left')

            def right_psolve(b):
                return A_.psolve(b, 'right')

            def left_rpsolve(b):
                return A_.rpsolve(b, 'left')

            def right_rpsolve(b):
                return A_.rpsolve(b, 'right')

            M1 = LinearOperator(A.shape,
                                matvec=left_psolve,
                                rmatvec=left_rpsolve)
            M2 = LinearOperator(A.shape,
                                matvec=right_psolve,
                                rmatvec=right_rpsolve)
        else:

            def id(b):
                return b

            M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
            M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)

    n = len(b)
    if maxiter is None:
        maxiter = n * 10

    ltr = _type_conv[x.dtype.char]
    revcom = getattr(_iterative, ltr + 'qmrrevcom')
    stoptest = getattr(_iterative, ltr + 'stoptest2')

    resid = tol
    ndx1 = 1
    ndx2 = -1
    work = np.zeros(11 * n, x.dtype)
    ijob = 1
    info = 0
    ftflag = True
    bnrm2 = -1.0
    iter_ = maxiter
    while True:
        olditer = iter_
        x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
           revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
        if callback is not None and iter_ > olditer:
            callback(x)
        slice1 = slice(ndx1 - 1, ndx1 - 1 + n)
        slice2 = slice(ndx2 - 1, ndx2 - 1 + n)
        if (ijob == -1):
            if callback is not None:
                callback(x)
            break
        elif (ijob == 1):
            work[slice2] *= sclr2
            work[slice2] += sclr1 * A.matvec(work[slice1])
        elif (ijob == 2):
            work[slice2] *= sclr2
            work[slice2] += sclr1 * A.rmatvec(work[slice1])
        elif (ijob == 3):
            work[slice1] = M1.matvec(work[slice2])
        elif (ijob == 4):
            work[slice1] = M2.matvec(work[slice2])
        elif (ijob == 5):
            work[slice1] = M1.rmatvec(work[slice2])
        elif (ijob == 6):
            work[slice1] = M2.rmatvec(work[slice2])
        elif (ijob == 7):
            work[slice2] *= sclr2
            work[slice2] += sclr1 * A.matvec(x)
        elif (ijob == 8):
            if ftflag:
                info = -1
                ftflag = False
            bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info)
        ijob = 2

    if info > 0 and iter_ == maxiter and resid > tol:
        #info isn't set appropriately otherwise
        info = iter_

    return postprocess(x), info
示例#12
0
print("assembling the boundary operators")

##set up the bem
sl = bempp.api.operators.boundary.laplace.single_layer(bem_dc, bem_c, bem_dc)
dl = bempp.api.operators.boundary.laplace.double_layer(bem_c, bem_c, bem_dc)
id_op = bempp.api.operators.boundary.sparse.identity(bem_dc, bem_dc, bem_c)
id_op2 = bempp.api.operators.boundary.sparse.identity(bem_c, bem_c, bem_dc)

block = np.ndarray([2, 2], dtype=np.object)
block[0, 0] = ngbem.NgOperator(a)
block[0, 1] = -trace_matrix.T * id_op.weak_form().sparse_operator

from scipy.sparse.linalg.interface import LinearOperator

trace_op = LinearOperator(trace_matrix.shape, lambda x: trace_matrix * x)
rhs_op1 = 0.5 * id_op2 - dl
block[1, 0] = rhs_op1.weak_form() * trace_op
block[1, 1] = sl.weak_form()
blockOp = bempp.api.BlockedDiscreteOperator(block)

#set up a block-diagonal preconditioner
p_block = np.ndarray([2, 2], dtype=np.object)
p_block[0, 0] = ngbem.NgOperator(c, a)
p_block[1,
        1] = bempp.api.InverseSparseDiscreteBoundaryOperator(
            bempp.api.operators.boundary.sparse.identity(
                bem_dc, bem_dc,
                bem_dc).weak_form())  #np.identity(bem_dc.global_dof_count)

p_blockOp = bempp.api.BlockedDiscreteOperator(p_block)
示例#13
0
 def as_linear_operator(self):
     from scipy.sparse.linalg.interface import LinearOperator
     return LinearOperator((self.num_pixels, self.num_coefs),
                           matvec=self.matvec,
                           rmatvec=self.rmatvec,
                           dtype='float')
示例#14
0
文件: utils.py 项目: szkafander/scipy
def make_system(A, M, x0, b):
    """Make a linear system Ax=b

    Parameters
    ----------
    A : LinearOperator
        sparse or dense matrix (or any valid input to aslinearoperator)
    M : {LinearOperator, Nones}
        preconditioner
        sparse or dense matrix (or any valid input to aslinearoperator)
    x0 : {array_like, str, None}
        initial guess to iterative method.
        ``x0 = 'Mb'`` means using the nonzero initial guess ``M * b``.
        Default is `None`, which means using the zero initial guess.
    b : array_like
        right hand side

    Returns
    -------
    (A, M, x, b, postprocess)
        A : LinearOperator
            matrix of the linear system
        M : LinearOperator
            preconditioner
        x : rank 1 ndarray
            initial guess
        b : rank 1 ndarray
            right hand side
        postprocess : function
            converts the solution vector to the appropriate
            type and dimensions (e.g. (N,1) matrix)

    """
    A_ = A
    A = aslinearoperator(A)

    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix, but got shape=%s' %
                         (A.shape, ))

    N = A.shape[0]

    b = asanyarray(b)

    if not (b.shape == (N, 1) or b.shape == (N, )):
        raise ValueError('shapes of A {} and b {} are incompatible'.format(
            A.shape, b.shape))

    if b.dtype.char not in 'fdFD':
        b = b.astype('d')  # upcast non-FP types to double

    def postprocess(x):
        if isinstance(b, matrix):
            x = asmatrix(x)
        return x.reshape(b.shape)

    if hasattr(A, 'dtype'):
        xtype = A.dtype.char
    else:
        xtype = A.matvec(b).dtype.char
    xtype = coerce(xtype, b.dtype.char)

    b = asarray(b, dtype=xtype)  # make b the same type as x
    b = b.ravel()

    # process preconditioner
    if M is None:
        if hasattr(A_, 'psolve'):
            psolve = A_.psolve
        else:
            psolve = id
        if hasattr(A_, 'rpsolve'):
            rpsolve = A_.rpsolve
        else:
            rpsolve = id
        if psolve is id and rpsolve is id:
            M = IdentityOperator(shape=A.shape, dtype=A.dtype)
        else:
            M = LinearOperator(A.shape,
                               matvec=psolve,
                               rmatvec=rpsolve,
                               dtype=A.dtype)
    else:
        M = aslinearoperator(M)
        if A.shape != M.shape:
            raise ValueError('matrix and preconditioner have different shapes')

    # set initial guess
    if x0 is None:
        x = zeros(N, dtype=xtype)
    elif isinstance(x0, str):
        if x0 == 'Mb':  # use nonzero initial guess ``M * b``
            bCopy = b.copy()
            x = M.matvec(bCopy)
    else:
        x = array(x0, dtype=xtype)
        if not (x.shape == (N, 1) or x.shape == (N, )):
            raise ValueError(f'shapes of A {A.shape} and '
                             f'x0 {x.shape} are incompatible')
        x = x.ravel()

    return A, M, x, b, postprocess