示例#1
0
文件: ch.py 项目: pujades/chumpy
    def _superdot(self, lhs, rhs):
        try:
            if lhs is None:
                return None
            if rhs is None:
                return None
            
            if isinstance(lhs, np.ndarray) and lhs.size==1:
                lhs = lhs.ravel()[0]
                
            if isinstance(rhs, np.ndarray) and rhs.size==1:
                rhs = rhs.ravel()[0]
    
            if isinstance(lhs, numbers.Number) or isinstance(rhs, numbers.Number):
                return lhs * rhs

            if isinstance(rhs, LinearOperator):
                return LinearOperator((lhs.shape[0], rhs.shape[1]), lambda x : lhs.dot(rhs.dot(x)))

            if isinstance(lhs, LinearOperator):                
                if sp.issparse(rhs):
                    return LinearOperator((lhs.shape[0], rhs.shape[1]), lambda x : lhs.dot(rhs.dot(x)))
                else:
                    return lhs.dot(rhs)
            
            # TODO: Figure out how/whether to do this.
            #lhs, rhs = utils.convert_inputs_to_sparse_if_possible(lhs, rhs)

            if not sp.issparse(lhs) and sp.issparse(rhs):
                return rhs.T.dot(lhs.T).T
    
            return lhs.dot(rhs)
        except:
            import pdb; pdb.set_trace()
示例#2
0
    def test_leftright_precond(self):
        """Check that QMR works with left and right preconditioners"""

        from scipy.sparse.linalg.dsolve import splu
        from scipy.sparse.linalg.interface import LinearOperator

        n = 100

        dat = ones(n)
        A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1] ,n,n)
        b = arange(n,dtype='d')

        L = spdiags([-dat/2, dat], [-1,0], n, n)
        U = spdiags([4*dat, -dat], [ 0,1], n, n)

        L_solver = splu(L)
        U_solver = splu(U)

        def L_solve(b):
            return L_solver.solve(b)
        def U_solve(b):
            return U_solver.solve(b)
        def LT_solve(b):
            return L_solver.solve(b,'T')
        def UT_solve(b):
            return U_solver.solve(b,'T')

        M1 = LinearOperator( (n,n), matvec=L_solve, rmatvec=LT_solve )
        M2 = LinearOperator( (n,n), matvec=U_solve, rmatvec=UT_solve )

        x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2)

        assert_equal(info,0)
        assert( norm(b - A*x) < 1e-8*norm(b) )
示例#3
0
    def _superdot(self, lhs, rhs, profiler=None):

        try:
            if lhs is None:
                return None
            if rhs is None:
                return None

            if isinstance(lhs, np.ndarray) and lhs.size == 1:
                lhs = lhs.ravel()[0]

            if isinstance(rhs, np.ndarray) and rhs.size == 1:
                rhs = rhs.ravel()[0]

            if isinstance(lhs, numbers.Number) or isinstance(
                    rhs, numbers.Number):
                return lhs * rhs

            if isinstance(rhs, LinearOperator):
                return LinearOperator((lhs.shape[0], rhs.shape[1]),
                                      lambda x: lhs.dot(rhs.dot(x)))

            if isinstance(lhs, LinearOperator):
                if sp.issparse(rhs):
                    return LinearOperator((lhs.shape[0], rhs.shape[1]),
                                          lambda x: lhs.dot(rhs.dot(x)))
                else:
                    # TODO: ?????????????
                    # return lhs.matmat(rhs)
                    return lhs.dot(rhs)

            # TODO: Figure out how/whether to do this.
            tm_maybe_sparse = timer()
            lhs, rhs = utils.convert_inputs_to_sparse_if_necessary(lhs, rhs)
            if tm_maybe_sparse() > 0.1:
                pif('convert_inputs_to_sparse_if_necessary in {}sec'.format(
                    tm_maybe_sparse()))

            if not sp.issparse(lhs) and sp.issparse(rhs):
                return rhs.T.dot(lhs.T).T
            return lhs.dot(rhs)
        except Exception as e:
            import sys, traceback
            traceback.print_exc(file=sys.stdout)
            if DEBUG:
                import pdb
                pdb.post_mortem()
            else:
                raise
示例#4
0
def svds(A, k=6, ncv=None, tol=0):
    """Compute k singular values/vectors for a sparse matrix using ARPACK.

    Parameters
    ----------
    A : sparse matrix
        Array to compute the SVD on
    k : int, optional
        Number of singular values and vectors to compute.
    ncv : integer
        The number of Lanczos vectors generated
        ncv must be greater than k+1 and smaller than n;
        it is recommended that ncv > 2*k
    tol : float, optional
        Tolerance for singular values. Zero (default) means machine precision.

    Note
    ----
    This is a naive implementation using an eigensolver on A.H * A or
    A * A.H, depending on which one is more efficient.

    """
    if not (isinstance(A, np.ndarray) or isspmatrix(A)):
        A = np.asarray(A)

    n, m = A.shape

    if np.issubdtype(A.dtype, np.complexfloating):
        herm = lambda x: x.T.conjugate()
        eigensolver = eigs
    else:
        herm = lambda x: x.T
        eigensolver = eigsh

    if n > m:
        X = A
        XH = herm(A)
    else:
        XH = A
        X = herm(A)

    def matvec_XH_X(x):
        return XH.dot(X.dot(x))

    XH_X = LinearOperator(matvec=matvec_XH_X,
                          dtype=X.dtype,
                          shape=(X.shape[1], X.shape[1]))

    eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol**2)
    s = np.sqrt(eigvals)

    if n > m:
        v = eigvec
        u = X.dot(v) / s
        vh = herm(v)
    else:
        u = eigvec
        vh = herm(X.dot(u) / s)

    return u, s, vh
示例#5
0
def bench_lobpcg_mikota():
    print()
    print('                 lobpcg benchmark using mikota pairs')
    print('==============================================================')
    print('      shape      | blocksize |    operation   |   time   ')
    print('                                              | (seconds)')
    print('--------------------------------------------------------------')
    fmt = ' %15s |   %3d     |     %6s     | %6.2f '

    m = 10
    for n in 128, 256, 512, 1024, 2048:
        shape = (n, n)
        A, B = _mikota_pair(n)
        desired_evs = np.square(np.arange(1, m + 1))

        tt = time.clock()
        X = rand(n, m)
        X = orth(X)
        LorU, lower = cho_factor(A, lower=0, overwrite_a=0)
        M = LinearOperator(shape,
                           matvec=partial(_precond, LorU, lower),
                           matmat=partial(_precond, LorU, lower))
        eigs, vecs = lobpcg(A, X, B, M, tol=1e-4, maxiter=40)
        eigs = sorted(eigs)
        elapsed = time.clock() - tt
        assert_allclose(eigs, desired_evs)
        print(fmt % (shape, m, 'lobpcg', elapsed))

        tt = time.clock()
        w = eigh(A, B, eigvals_only=True, eigvals=(0, m - 1))
        elapsed = time.clock() - tt
        assert_allclose(w, desired_evs)
        print(fmt % (shape, m, 'eigh', elapsed))
示例#6
0
    def test_preconditioner(self):
        # Check that preconditioning works
        pc = splu(Am.tocsc())
        M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)

        x0, count_0 = do_solve()
        x1, count_1 = do_solve(M=M)

        assert_(count_1 == 3)
        assert_(count_1 < count_0/2)
        assert_(allclose(x1, x0, rtol=1e-14))
示例#7
0
 def time_mikota(self, n, solver):
     m = 10
     if solver == 'lobpcg':
         X = rand(n, m)
         X = orth(X)
         LorU, lower = cho_factor(self.A, lower=0, overwrite_a=0)
         M = LinearOperator(self.shape,
                            matvec=partial(_precond, LorU, lower),
                            matmat=partial(_precond, LorU, lower))
         eigs, vecs = lobpcg(self.A, X, self.B, M, tol=1e-4, maxiter=40)
     else:
         eigh(self.A, self.B, eigvals_only=True, eigvals=(0, m - 1))
def compute_rank_approx(sz, routes):
    A, b, N, block_sizes, x_true = util.load_data(
        str(sz) + "/experiment2_waypoints_matrices_routes_" + str(routes) +
        ".mat")

    def matvec_XH_X(x):
        return A.dot(A.T.dot(x))

    XH_X = LinearOperator(matvec=matvec_XH_X,
                          dtype=A.dtype,
                          shape=(A.shape[0], A.shape[0]))
    eigvals, eigvec = eigsh(XH_X, k=500, tol=10**-5)
    eigvals = eigvals[::-1]
    for i, val in enumerate(eigvals):
        if val < 10**-6:
            return (N.shape[1], i)
示例#9
0
 def compute_dr_wrt(self, wrt):
     if wrt is self.a:
         if False:
             from scipy.sparse.linalg.interface import LinearOperator
             return LinearOperator((self.size, wrt.size), lambda x : self.reorder(x.reshape(self.a.shape)).ravel())
         else:
             a = self.a
             asz = a.size
             ashape = a.shape
             key = self.unique_reorder_id()
             if key not in self.dr_lookup or key is None:
                 JS = self.reorder(np.arange(asz).reshape(ashape))
                 IS = np.arange(JS.size)
                 data = np.ones_like(IS)
                 shape = JS.shape
                 self.dr_lookup[key] = sp.csc_matrix((data, (IS, JS.ravel())), shape=(self.r.size, wrt.r.size))
             return self.dr_lookup[key]
示例#10
0
def fit(X,
        Y,
        sigma,
        lmbda,
        L_X,
        L_Y,
        cg_tol=1e-3,
        cg_maxiter=None,
        alpha0=None):
    if cg_maxiter is None:
        # CG needs at max dimension many iterations
        cg_maxiter = L_X.shape[0]

    NX = X.shape[0]

    # set up and solve regularised linear system via bicgstab
    # this never stores an NxN matrix
    b = compute_b(X, Y, L_X, L_Y, sigma)
    matvec = lambda v: apply_left_C(v, X, Y, L_X, L_Y, lmbda)
    C_operator = LinearOperator((NX, NX), matvec=matvec, dtype=np.float64)

    # for printing number of CG iterations
    global counter
    counter = 0

    def callback(x):
        global counter
        counter += 1

    # start optimisation from alpha0, if present
    if alpha0 is not None:
        logger.debug("Starting bicgstab from previous alpha0")
    solution, info = bicgstab(C_operator,
                              b,
                              tol=cg_tol,
                              maxiter=cg_maxiter,
                              callback=callback,
                              x0=alpha0)
    logger.debug("Ran bicgstab for %d iterations." % counter)
    if info > 0:
        logger.warning("Warning: CG not convergence in %.3f tolerance within %d iterations" % \
                       (cg_tol, cg_maxiter))
    a = -sigma / 2. * solution
    return a
示例#11
0
    def test_precond(self):
        """test whether all methods accept a trivial preconditioner"""

        tol = 1e-8

        def identity(b, which=None):
            """trivial preconditioner"""
            return b

        for solver, req_sym, req_pos in self.solvers:

            for A, sym, pos in self.cases:
                if req_sym and not sym: continue
                if req_pos and not pos: continue

                M, N = A.shape
                D = spdiags([1.0 / A.diagonal()], [0], M, N)

                b = arange(A.shape[0], dtype=float)
                x0 = 0 * b

                precond = LinearOperator(A.shape, identity, rmatvec=identity)

                if solver == qmr:
                    x, info = solver(A,
                                     b,
                                     M1=precond,
                                     M2=precond,
                                     x0=x0,
                                     tol=tol)
                else:
                    x, info = solver(A, b, M=precond, x0=x0, tol=tol)
                assert_equal(info, 0)
                assert (norm(b - A * x) < tol * norm(b))

                A = A.copy()
                A.psolve = identity
                A.rpsolve = identity

                x, info = solver(A, b, x0=x0, tol=tol)
                assert_equal(info, 0)
                assert (norm(b - A * x) < tol * norm(b))
示例#12
0
def score_matching_low_rank(X, Y, sigma, lmbda, L_X, L_Y,
                                                    cg_tol=1e-3,
                                                    cg_maxiter=None):
        if cg_maxiter is None:
            # CG needs at max dimension many iterations
            cg_maxiter = L_X.shape[0]
        
        NX = X.shape[0]
        
        # set up and solve regularised linear system via bicgstab
        # this never stores an NxN matrix
        b = _compute_b_low_rank(X, Y, L_X, L_Y, sigma)
        matvec = lambda v:_apply_left_C_low_rank(v, X, Y, L_X, L_Y, lmbda)
        C_operator = LinearOperator((NX, NX), matvec=matvec, dtype=np.float64)
        solution, info = bicgstab(C_operator, b, tol=cg_tol, maxiter=cg_maxiter)
        if info > 0:
            print "Warning: CG not terminated within specified %d iterations" % cg_maxiter
        a = -sigma / 2. * solution
        
        return a
    def inv(self):
        """Construct the matrix inverse of this operator.

        Returns
        -------
        LinearOperator

        Raises
        ------
        ValueError
            if the instance is acyclic.
        """
        if not self._is_cyclic:
            raise NotImplementedError("HomogeneousIsotropicCorrelation.inv "
                                      "does not support acyclic correlations")
        # TODO: Return a HomogeneousIsotropicLinearOperator
        return LinearOperator(shape=self.shape,
                              dtype=self.dtype,
                              matvec=self.solve,
                              rmatvec=self.solve)
示例#14
0
def fit_sym(Z, sigma, lmbda, L,
                                                    cg_tol=1e-3,
                                                    cg_maxiter=None):
        if cg_maxiter is None:
            # CG needs at max dimension many iterations
            cg_maxiter = L.shape[0]
        
        N = Z.shape[0]
        
        # set up and solve regularised linear system via bicgstab
        # this never stores an NxN matrix
        b = compute_b_sym(Z, L, sigma)
        matvec = lambda v:apply_left_C_sym(v, Z, L, lmbda)
        C_operator = LinearOperator((N, N), matvec=matvec, dtype=np.float64)
        solution, info = bicgstab(C_operator, b, tol=cg_tol, maxiter=cg_maxiter)
        if info > 0:
            print "Warning: CG not terminated within specified %d iterations" % cg_maxiter
        a = -sigma / 2. * solution
        
        return a
def invert_sparse_mat_splu(A, **kwargs):
    ''' 
    given a sparse matrix A, return a LinearOperator whose
    application is A-inverse by way of splu
    '''

    Acsc = A.tocsc()
    Acsc.eliminate_zeros()
    nonzero_cols = ((Acsc.indptr[:-1] - Acsc.indptr[1:]) != 0).nonzero()[0]
    Map = scipy.sparse.eye(Acsc.shape[0], Acsc.shape[1], format='csc')
    Map = Map[:, nonzero_cols]
    Acsc = Map.T.tocsc() * Acsc * Map
    LU = scipy.sparse.linalg.splu(Acsc)
    LU_Map = Map

    def matvec(b):
        return LU_Map * LU.solve(numpy.ravel(LU_Map.T * b))

    Ainv = LinearOperator(A.shape, matvec=matvec, dtype=A.dtype)

    return Ainv
示例#16
0
    def dr_wrt(self, wrt, reverse_mode=False, profiler=None):
        tm_dr_wrt = timer()
        self.called_dr_wrt = True
        self._call_on_changed()

        drs = []

        if wrt in self._cache['drs']:
            if DEBUG:
                if wrt not in self._cache_info:
                    self._cache_info[wrt] = 0
                self._cache_info[wrt] += 1
                self._status = 'cached'
            return self._cache['drs'][wrt]

        direct_dr = self._compute_dr_wrt_sliced(wrt)

        if direct_dr is not None:
            drs.append(direct_dr)

        if DEBUG:
            self._status = 'pending'

        propnames = set(_props_for(self.__class__))
        for k in set(self.dterms).intersection(
                propnames.union(set(self.__dict__.keys()))):

            p = getattr(self, k)

            if hasattr(p, 'dterms') and p is not wrt:

                indirect_dr = None

                if reverse_mode:
                    lhs = self._compute_dr_wrt_sliced(p)
                    if isinstance(lhs, LinearOperator):
                        tm_dr_wrt.pause()
                        dr2 = p.dr_wrt(wrt)
                        tm_dr_wrt.resume()
                        indirect_dr = lhs.matmat(dr2) if dr2 != None else None
                    else:
                        indirect_dr = p.lmult_wrt(lhs, wrt)
                else:  # forward mode
                    tm_dr_wrt.pause()
                    dr2 = p.dr_wrt(wrt, profiler=profiler)
                    tm_dr_wrt.resume()
                    if dr2 is not None:
                        indirect_dr = self.compute_rop(p, rhs=dr2)

                if indirect_dr is not None:
                    drs.append(indirect_dr)

        if len(drs) == 0:
            result = None
        elif len(drs) == 1:
            result = drs[0]
        else:
            # TODO: ????????
            # result = np.sum(x for x in drs)
            if not np.any([isinstance(a, LinearOperator) for a in drs]):
                result = reduce(lambda x, y: x + y, drs)
            else:
                result = LinearOperator(
                    drs[0].shape,
                    lambda x: reduce(lambda a, b: a.dot(x) + b.dot(x), drs))

        # TODO: figure out how/whether to do this.
        if result is not None and not sp.issparse(result):
            tm_nonzero = timer()
            nonzero = np.count_nonzero(result)
            if tm_nonzero() > 0.1:
                pif('count_nonzero in {}sec'.format(tm_nonzero()))
            if nonzero == 0 or hasattr(
                    result, 'size') and result.size / float(nonzero) >= 10.0:
                tm_convert_to_sparse = timer()
                result = sp.csc_matrix(result)
                import gc
                gc.collect()
                pif('converting result to sparse in {}sec'.format(
                    tm_convert_to_sparse()))

        if (result is not None) and (not sp.issparse(result)) and (
                not isinstance(result, LinearOperator)):
            result = np.atleast_2d(result)

        # When the number of parents is one, it indicates that
        # caching this is probably not useful because not
        # more than one parent will likely ask for this same
        # thing again in the same iteration of an optimization.
        #
        # When the number of parents is zero, this is the top
        # level object and should be cached; when it's > 1
        # cache the combinations of the children.
        #
        # If we *always* filled in the cache, it would require
        # more memory but would occasionally save a little cpu,
        # on average.
        if len(list(self._parents.keys())) != 1:
            self._cache['drs'][wrt] = result

        if DEBUG:
            self._status = 'done'

        if getattr(self, '_make_dense', False) and sp.issparse(result):
            result = result.todense()
        if getattr(self, '_make_sparse', False) and not sp.issparse(result):
            result = sp.csc_matrix(result)

        if tm_dr_wrt() > 0.1:
            pif('dx of {} wrt {} in {}sec, sparse: {}'.format(
                self.short_name, wrt.short_name, tm_dr_wrt(),
                sp.issparse(result)))

        return result
示例#17
0
def test_mpi_p2p_alldata_gather():
    N = 2
    comm = MPI.COMM_WORLD
    world_rank = comm.Get_rank()
    world_size = comm.Get_size()
    print(comm)
    print("world rank ", world_rank)

    group = comm.Get_group()
    newgroup = group.Excl([0])
    newcomm = comm.Create(newgroup)

    # PROCESS 0:
    # Receive fenics_mesh from PROCESS 1 (fenics mesh distributed
    # across PROCESS 1 and PROCESS 2.

    if world_rank == 0:
        assert newcomm == MPI.COMM_NULL
        info = MPI.Status()

        # BM_CELLS
        comm.Probe(MPI.ANY_SOURCE, 100, info)
        elements = info.Get_elements(MPI.LONG)
        bm_cells = np.zeros(elements, dtype=np.int64)
        comm.Recv([bm_cells, MPI.LONG], source=1, tag=100)
        bm_cells = bm_cells.reshape(int(elements / 3), 3)

        # BM_COORDS
        comm.Probe(MPI.ANY_SOURCE, 101, info)
        elements = info.Get_elements(MPI.DOUBLE)
        bm_coords = np.zeros(elements, dtype=np.float64)
        comm.Recv([bm_coords, MPI.DOUBLE], source=1, tag=101)
        bm_coords = bm_coords.reshape(int(elements / 3), 3)

        # BM_NODES
        comm.Probe(MPI.ANY_SOURCE, 102, info)
        elements = info.Get_elements(MPI.INT)
        bm_nodes = np.zeros(elements, dtype=np.int32)
        comm.Recv([bm_nodes, MPI.INT], source=1, tag=102)
        # print(bm_nodes)
        # print(bm_coords)
        # print(bm_cells)
        # use boundary coords and boundary triangles to create bempp mesh.

        # BM_DOFMAP

        num_fenics_vertices = comm.recv(source=1, tag=103)
        print("num_vertices ", num_fenics_vertices)

        print("length of bm_nodes ", len(bm_nodes))

        # b_vertices_from_vertices = coo_matrix(
        #     (np.ones(len(bm_nodes)), (np.arange(len(bm_nodes)), bm_nodes)),
        #     shape=(len(bm_nodes), num_fenics_vertices),
        #     dtype="float64",
        # ).tocsc()

        dof_to_vertex_map = np.zeros(27, dtype=np.int64)

        b_vertices_from_vertices = coo_matrix(
            (np.ones(len(bm_nodes)), (np.arange(len(bm_nodes)), bm_nodes)),
            shape=(len(bm_nodes), 27),
            dtype="float64",
        ).tocsc()

        dof_to_vertex_map = np.arange(27, dtype=np.int64)

        print(dof_to_vertex_map)

        vertices_from_fenics_dofs = coo_matrix(
            (
                np.ones(27),
                (dof_to_vertex_map, np.arange(27)),
            ),
            shape=(27, 27),
            dtype="float64",
        ).tocsc()

        # receive A from fenics processes.

        comm.Probe(MPI.ANY_SOURCE, 112, info)
        elements = info.Get_elements(MPI.DOUBLE_COMPLEX)
        av = np.zeros(elements, dtype=np.cdouble)
        comm.Recv([av, MPI.DOUBLE_COMPLEX], source=1, tag=112)
        # print(av)

        comm.Probe(MPI.ANY_SOURCE, 111, info)
        elements = info.Get_elements(MPI.INT)
        aj = np.zeros(elements, dtype=np.int32)
        comm.Recv([aj, MPI.INT], source=1, tag=111)
        # print(aj)
        comm.Probe(MPI.ANY_SOURCE, 110, info)
        elements = info.Get_elements(MPI.INT)
        ai = np.zeros(elements, dtype=np.int32)
        comm.Recv([ai, MPI.INT], source=1, tag=110)
        # print("ai shape ", ai)

        k = 2

        bempp_boundary_grid = bempp.api.Grid(bm_coords.transpose(),
                                             bm_cells.transpose())
        space = bempp.api.function_space(bempp_boundary_grid, "P", 1)
        trace_space = space
        trace_matrix = b_vertices_from_vertices @ vertices_from_fenics_dofs
        bempp_space = bempp.api.function_space(trace_space.grid, "DP", 0)

        id_op = bempp.api.operators.boundary.sparse.identity(
            trace_space, bempp_space, bempp_space)
        mass = bempp.api.operators.boundary.sparse.identity(
            bempp_space, bempp_space, trace_space)
        dlp = bempp.api.operators.boundary.helmholtz.double_layer(
            trace_space, bempp_space, bempp_space, k)
        slp = bempp.api.operators.boundary.helmholtz.single_layer(
            bempp_space, bempp_space, bempp_space, k)

        rhs_fem = np.zeros(27)

        print("length of rhs fem ", len(rhs_fem))

        @bempp.api.complex_callable
        def u_inc(x, n, domain_index, result):
            result[0] = np.exp(1j * k * x[0])

        u_inc = bempp.api.GridFunction(bempp_space, fun=u_inc)
        rhs_bem = u_inc.projections(bempp_space)

        rhs = np.concatenate([rhs_fem, rhs_bem])

        from bempp.api.assembly.blocked_operator import BlockedDiscreteOperator
        from scipy.sparse.linalg.interface import LinearOperator

        blocks = [[None, None], [None, None]]

        trace_op = LinearOperator(trace_matrix.shape,
                                  lambda x: trace_matrix @ x)

        Asp = csr_matrix((av, aj, ai))

        blocks[0][0] = Asp
        blocks[0][1] = -trace_matrix.T * mass.weak_form().A
        blocks[1][0] = (0.5 * id_op - dlp).weak_form() * trace_op
        blocks[1][1] = slp.weak_form()

        blocked = BlockedDiscreteOperator(np.array(blocks))

        from scipy.sparse.linalg import gmres

        c = Counter()
        soln, info = gmres(blocked, rhs, callback=c.add)

        print("Solved in", c.count, "iterations")
        # computed = soln[: fenics_space.dim]

        print(soln)

        # print(actual)
        # print("L2 error:", np.linalg.norm(actual_vec - computed))
        # assert np.linalg.norm(actual_vec - computed) < 1 / N

        # dof_to_vertex_map = np.zeros(num_fenics_vertices, dtype=np.int64)
        # tets = fenics_mesh.geometry.dofmap
        # for tet in range(tets.num_nodes):
        #     cell_dofs = fenics_space.dofmap.cell_dofs(tet)
        #     cell_verts = tets.links(tet)
        #     for v in range(4):
        #         vertex_n = cell_verts[v]
        #         dof = cell_dofs[fenics_space.dofmap.dof_layout.entity_dofs(0, v)[0]]
        #         dof_to_vertex_map[dof] = vertex_n
        # print("dof_to_vertex_map ", dof_to_vertex_map)
        # vertices_from_fenics_dofs = coo_matrix(
        #     (
        #         np.ones(num_fenics_vertices),
        #         (dof_to_vertex_map, np.arange(num_fenics_vertices)),
        #     ),
        #     shape=(num_fenics_vertices, num_fenics_vertices),
        #     dtype="float64",
        # ).tocsc()

        # tets = fenics_mesh.geometry.dofmap
        # for tet in range(tets.num_nodes):
        #     cell_dofs = fenics_space.dofmap.cell_dofs(tet)
        #     cell_verts = tets.links(tet)
        #     for v in range(4):
        #         vertex_n = cell_verts[v]
        #         dof = cell_dofs[fenics_space.dofmap.dof_layout.entity_dofs(0, v)[0]]
        #         dof_to_vertex_map[dof] = vertex_n
        # print(dof_to_vertex_map)
        # vertices_from_fenics_dofs = coo_matrix(
        #     (
        #         np.ones(num_fenics_vertices),
        #         (dof_to_vertex_map, np.arange(num_fenics_vertices)),
        #     ),
        #     shape=(num_fenics_vertices, num_fenics_vertices),
        #     dtype="float64",
        # ).tocsc()

        # # Get trace matrix by multiplication
        # trace_matrix = b_vertices_from_vertices @ vertices_from_fenics_dofs

        # # Now return everything
        # return space, trace_matrix

        # out = os.path.join("./bempp_out", "test_mesh.msh")
        # bempp.api.export(out, grid=bempp_boundary_grid)
        # print("exported mesh to", out)

    else:  # world rank = 1, 2
        fenics_mesh = dolfinx.UnitCubeMesh(newcomm, N, N, N)
        with XDMFFile(newcomm, "box.xdmf", "w") as file:
            file.write_mesh(fenics_mesh)

        fenics_space = dolfinx.FunctionSpace(fenics_mesh, ("CG", 1))

        u = ufl.TrialFunction(fenics_space)
        v = ufl.TestFunction(fenics_space)
        k = 2

        form = (ufl.inner(ufl.grad(u), ufl.grad(v)) -
                k**2 * ufl.inner(u, v)) * ufl.dx

        bm_nodes_global, bm_coords, boundary = bm_from_fenics_mesh_mpi(
            fenics_mesh, fenics_space)
        A = dolfinx.fem.assemble_matrix(form)
        A.assemble()
        ai, aj, av = A.getValuesCSR()
        Asp = csr_matrix((av, aj, ai))
        print(Asp)
        # Asp_array = Asp.toarray()
        # Asp_1 = csr_matrix(Asp_array)
        # assert Asp_1.all() == Asp.all()
        # print(Asp_1)
        # print(Asp)

        bm_nodes_global_list = list(bm_nodes_global)
        bm_nodes_arr = np.asarray(bm_nodes_global_list, dtype=np.int64)
        sendbuf_bdry = boundary
        sendbuf_coords = bm_coords
        sendbuf_nodes = bm_nodes_arr
        recvbuf_boundary = None
        recvbuf_coords = None
        recvbuf_nodes = None

        rank = newcomm.Get_rank()
        # number cols = total num rows?
        print("PRINT counts ")
        print("ai, {}\n aj, {}\n av {}\n ".format(ai.shape, aj.shape,
                                                  av.shape))
        # print("sendbuf_bdry", len(sendbuf_bdry), rank)
        # print("bm_coords ", len(sendbuf_coords), rank)
        # print("nodes ", len(sendbuf_nodes), rank)
        # print("Asp array ", Asp_array.shape)
        # print("Asp ", Asp.shape)
        # print("av ", av)
        # print("av ", av[0])

        # print("ai ", len(ai))
        print("ai \n", ai)
        print("aj \n", aj)
        print("av \n", av)

        # send A
        sendbuf_ai = ai
        sendbuf_aj = aj
        sendbuf_av = av
        root = 0
        sendcounts = np.array(newcomm.gather(len(sendbuf_av), root))
        sendcounts_ai = np.array(newcomm.gather(len(sendbuf_ai), root))
        print(aj)
        # print(sendcounts)

        if newcomm.rank == root:
            print("sendcounts: {}, total: {}".format(sendcounts,
                                                     sum(sendcounts)))
            recvbuf_av = np.empty(sum(sendcounts), dtype=np.cdouble)
            recvbuf_aj = np.empty(sum(sendcounts), dtype=np.int32)
            recvbuf_ai = np.empty(sum(sendcounts_ai), dtype=np.int32)
        else:
            recvbuf_av = None
            recvbuf_aj = None
            recvbuf_ai = None

        # Allocate memory for gathered data on subprocess 0.
        if newcomm.rank == 0:
            info = MPI.Status()
            # The 3 factor corresponds to fact that the array is concatenated
            recvbuf_boundary = np.empty(newcomm.size * len(boundary) * 3,
                                        dtype=np.int32)
            recvbuf_coords = np.empty(newcomm.size * len(bm_coords) * 3,
                                      dtype=np.float64)
            recvbuf_nodes = np.empty(newcomm.size * len(bm_nodes_arr),
                                     dtype=np.int64)
            # recvbuf_dofs = np.empty(newcomm.size * len(bm_dofs))
            # recvbuf_soln = np.empty(newcomm.size*

        # newcomm.Gather(sendbuf_av, recvbuf_av, root=0)
        newcomm.Gatherv(sendbuf_ai,
                        recvbuf=(recvbuf_ai, sendcounts_ai),
                        root=0)
        newcomm.Gatherv(sendbuf=sendbuf_av,
                        recvbuf=(recvbuf_av, sendcounts),
                        root=root)
        newcomm.Gatherv(sendbuf=sendbuf_aj,
                        recvbuf=(recvbuf_aj, sendcounts),
                        root=root)

        # Receive on subprocess 0.
        newcomm.Gather(sendbuf_bdry, recvbuf_boundary, root=0)
        newcomm.Gather(sendbuf_coords, recvbuf_coords, root=0)
        newcomm.Gather(sendbuf_nodes, recvbuf_nodes, root=0)

        # exit(0)
        # this needs to be done - but not essential
        FEniCS_dofs_to_vertices(newcomm, fenics_space, fenics_mesh)
        # print(fenics_space.dim)
        print(fenics_space.dofmap.index_map.global_indices(False))
        print(len(fenics_space.dofmap.index_map.global_indices(False)))
        actual = dolfinx.Function(fenics_space)
        print("actual ", actual)
        actual.interpolate(lambda x: np.exp(1j * k * x[0]))
        actual_vec = actual.vector[:]
        print("actual vec \n ", actual_vec)
        print("actual vec size\n ", actual_vec.size)

        # newcomm.Gather(actual_vec, recvbuf_
        # when we do the gather we get boundary node indices repetitions
        # therefore we find unique nodes in the gathered array.
        if newcomm.rank == 0:
            all_boundary = recvbuf_boundary.reshape(
                int(len(recvbuf_boundary) / 3), 3)  # 48 (48)
            bm_coords = recvbuf_coords.reshape(int(len(recvbuf_coords) / 3),
                                               3)  # 34 (26)
            bm_nodes = recvbuf_nodes  # 34 (26)
            # print(len(bm_nodes))
            # print(len(all_boundary))
            # print(len(bm_coords))

            # Sort the nodes (on global geom node indices) to make the unique faster?
            sorted_indices = recvbuf_nodes.argsort()
            bm_nodes_sorted = recvbuf_nodes[sorted_indices]
            bm_coords_sorted = bm_coords[sorted_indices]
            # print("sorted indices, ", sorted_indices)

            bm_nodes, unique = np.unique(bm_nodes_sorted, return_index=True)
            bm_coords = bm_coords_sorted[unique]
            bm_nodes_list = list(bm_nodes)
            # print("bm_nodes_list", bm_nodes_list)
            # bm_cells - remap boundary triangle indices between 0-len(bm_nodes) - this can be improved
            bm_cells = np.array([[bm_nodes_list.index(i) for i in tri]
                                 for tri in all_boundary])

            #             print("received ai ", recvbuf_ai)
            #             print("received aj ", recvbuf_aj)
            #             print("received av ", recvbuf_av)

            #             # now process ai, aj and av.
            #             print("sendcounts ", sendcounts)
            #             print("sendcounts_ai", sendcounts_ai)

            end = sendcounts_ai[0]
            print("end ", end)
            new_recvbuf_ai = np.delete(recvbuf_ai, end)
            new_recvbuf_ai[end:] += new_recvbuf_ai[end - 1]
            print(new_recvbuf_ai)

            # print(len(bm_nodes))
            # print(len(bm_cells))
            # print(len(bm_coords))
            # print(len(all_boundary))
            # print(bm_cells)
            # send to world process 0.
            comm.Send([bm_cells, MPI.LONG], dest=0, tag=100)
            comm.Send([bm_coords, MPI.DOUBLE], dest=0, tag=101)
            comm.Send([np.array(bm_nodes, np.int32), MPI.LONG],
                      dest=0,
                      tag=102)

            # send ai, aj, av

            num_fenics_vertices = fenics_mesh.topology.connectivity(
                0, 0).num_nodes

            comm.send(num_fenics_vertices, dest=0, tag=103)

            comm.Send([new_recvbuf_ai, MPI.INT], dest=0, tag=110)
            print("aj ", recvbuf_aj.shape)
            print("aj ", new_recvbuf_ai.shape)
            comm.Send([recvbuf_aj, MPI.INT], dest=0, tag=111)
            comm.Send([recvbuf_av, MPI.DOUBLE_COMPLEX], dest=0, tag=112)

            print("num_fenics_vertices ", num_fenics_vertices)
示例#18
0
文件: utils.py 项目: YautongNg/gengis
def make_system(A, M, x0, b, xtype=None):
    """Make a linear system Ax=b

    Parameters
    ----------
    A : LinearOperator
        sparse or dense matrix (or any valid input to aslinearoperator)
    M : {LinearOperator, Nones}
        preconditioner
        sparse or dense matrix (or any valid input to aslinearoperator)
    x0 : {array_like, None}
        initial guess to iterative method
    b : array_like
        right hand side
    xtype : {'f', 'd', 'F', 'D', None}
        dtype of the x vector

    Returns
    -------
    (A, M, x, b, postprocess)
        A : LinearOperator
            matrix of the linear system
        M : LinearOperator
            preconditioner
        x : rank 1 ndarray
            initial guess
        b : rank 1 ndarray
            right hand side
        postprocess : function
            converts the solution vector to the appropriate
            type and dimensions (e.g. (N,1) matrix)

    """
    A_ = A
    A = aslinearoperator(A)

    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix, but got shape=%s' %
                         (A.shape, ))

    N = A.shape[0]

    b = asanyarray(b)

    if not (b.shape == (N, 1) or b.shape == (N, )):
        raise ValueError('A and b have incompatible dimensions')

    if b.dtype.char not in 'fdFD':
        b = b.astype('d')  # upcast non-FP types to double

    def postprocess(x):
        if isinstance(b, matrix):
            x = asmatrix(x)
        return x.reshape(b.shape)

    if xtype is None:
        if hasattr(A, 'dtype'):
            xtype = A.dtype.char
        else:
            xtype = A.matvec(b).dtype.char
        xtype = coerce(xtype, b.dtype.char)
    else:
        warn(
            'Use of xtype argument is deprecated. '
            'Use LinearOperator( ... , dtype=xtype) instead.',
            DeprecationWarning)
        if xtype == 0:
            xtype = b.dtype.char
        else:
            if xtype not in 'fdFD':
                raise ValueError("xtype must be 'f', 'd', 'F', or 'D'")

    b = asarray(b, dtype=xtype)  # make b the same type as x
    b = b.ravel()

    if x0 is None:
        x = zeros(N, dtype=xtype)
    else:
        x = array(x0, dtype=xtype)
        if not (x.shape == (N, 1) or x.shape == (N, )):
            raise ValueError('A and x have incompatible dimensions')
        x = x.ravel()

    # process preconditioner
    if M is None:
        if hasattr(A_, 'psolve'):
            psolve = A_.psolve
        else:
            psolve = id
        if hasattr(A_, 'rpsolve'):
            rpsolve = A_.rpsolve
        else:
            rpsolve = id
        if psolve is id and rpsolve is id:
            M = IdentityOperator(shape=A.shape, dtype=A.dtype)
        else:
            M = LinearOperator(A.shape,
                               matvec=psolve,
                               rmatvec=rpsolve,
                               dtype=A.dtype)
    else:
        M = aslinearoperator(M)
        if A.shape != M.shape:
            raise ValueError('matrix and preconditioner have different shapes')

    return A, M, x, b, postprocess
示例#19
0
def qmr(A,
        b,
        x0=None,
        tol=1e-5,
        maxiter=None,
        M1=None,
        M2=None,
        callback=None,
        atol=None):
    """Use Quasi-Minimal Residual iteration to solve ``Ax = b``.

    Parameters
    ----------
    A : {sparse matrix, dense matrix, LinearOperator}
        The real-valued N-by-N matrix of the linear system.
        Alternatively, ``A`` can be a linear operator which can
        produce ``Ax`` and ``A^T x`` using, e.g.,
        ``scipy.sparse.linalg.LinearOperator``.
    b : {array, matrix}
        Right hand side of the linear system. Has shape (N,) or (N,1).

    Returns
    -------
    x : {array, matrix}
        The converged solution.
    info : integer
        Provides convergence information:
            0  : successful exit
            >0 : convergence to tolerance not achieved, number of iterations
            <0 : illegal input or breakdown

    Other Parameters
    ----------------
    x0  : {array, matrix}
        Starting guess for the solution.
    tol, atol : float, optional
        Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
        The default for ``atol`` is ``'legacy'``, which emulates
        a different legacy behavior.

        .. warning::

           The default value for `atol` will be changed in a future release.
           For future compatibility, specify `atol` explicitly.
    maxiter : integer
        Maximum number of iterations.  Iteration will stop after maxiter
        steps even if the specified tolerance has not been achieved.
    M1 : {sparse matrix, dense matrix, LinearOperator}
        Left preconditioner for A.
    M2 : {sparse matrix, dense matrix, LinearOperator}
        Right preconditioner for A. Used together with the left
        preconditioner M1.  The matrix M1*A*M2 should have better
        conditioned than A alone.
    callback : function
        User-supplied function to call after each iteration.  It is called
        as callback(xk), where xk is the current solution vector.

    See Also
    --------
    LinearOperator

    Examples
    --------
    >>> from scipy.sparse import csc_matrix
    >>> from scipy.sparse.linalg import qmr
    >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
    >>> b = np.array([2, 4, -1], dtype=float)
    >>> x, exitCode = qmr(A, b)
    >>> print(exitCode)            # 0 indicates successful convergence
    0
    >>> np.allclose(A.dot(x), b)
    True
    """
    A_ = A
    A, M, x, b, postprocess = make_system(A, None, x0, b)

    if M1 is None and M2 is None:
        if hasattr(A_, 'psolve'):

            def left_psolve(b):
                return A_.psolve(b, 'left')

            def right_psolve(b):
                return A_.psolve(b, 'right')

            def left_rpsolve(b):
                return A_.rpsolve(b, 'left')

            def right_rpsolve(b):
                return A_.rpsolve(b, 'right')

            M1 = LinearOperator(A.shape,
                                matvec=left_psolve,
                                rmatvec=left_rpsolve)
            M2 = LinearOperator(A.shape,
                                matvec=right_psolve,
                                rmatvec=right_rpsolve)
        else:

            def id(b):
                return b

            M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
            M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)

    n = len(b)
    if maxiter is None:
        maxiter = n * 10

    ltr = _type_conv[x.dtype.char]
    revcom = getattr(_iterative, ltr + 'qmrrevcom')

    get_residual = lambda: np.linalg.norm(A.matvec(x) - b)
    atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'qmr')
    if atol == 'exit':
        return postprocess(x), 0

    resid = atol
    ndx1 = 1
    ndx2 = -1
    # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
    work = _aligned_zeros(11 * n, x.dtype)
    ijob = 1
    info = 0
    ftflag = True
    iter_ = maxiter
    while True:
        olditer = iter_
        x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
           revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
        if callback is not None and iter_ > olditer:
            callback(x)
        slice1 = slice(ndx1 - 1, ndx1 - 1 + n)
        slice2 = slice(ndx2 - 1, ndx2 - 1 + n)
        if (ijob == -1):
            if callback is not None:
                callback(x)
            break
        elif (ijob == 1):
            work[slice2] *= sclr2
            work[slice2] += sclr1 * A.matvec(work[slice1])
        elif (ijob == 2):
            work[slice2] *= sclr2
            work[slice2] += sclr1 * A.rmatvec(work[slice1])
        elif (ijob == 3):
            work[slice1] = M1.matvec(work[slice2])
        elif (ijob == 4):
            work[slice1] = M2.matvec(work[slice2])
        elif (ijob == 5):
            work[slice1] = M1.rmatvec(work[slice2])
        elif (ijob == 6):
            work[slice1] = M2.rmatvec(work[slice2])
        elif (ijob == 7):
            work[slice2] *= sclr2
            work[slice2] += sclr1 * A.matvec(x)
        elif (ijob == 8):
            if ftflag:
                info = -1
                ftflag = False
            resid, info = _stoptest(work[slice1], atol)
        ijob = 2

    if info > 0 and iter_ == maxiter and not (resid <= atol):
        # info isn't set appropriately otherwise
        info = iter_

    return postprocess(x), info
示例#20
0
def qmr(A,
        b,
        x0=None,
        tol=1e-5,
        maxiter=None,
        xtype=None,
        M1=None,
        M2=None,
        callback=None):
    """Use Quasi-Minimal Residual iteration to solve A x = b

    Parameters
    ----------
    A : {sparse matrix, dense matrix, LinearOperator}
        The real-valued N-by-N matrix of the linear system.
        It is required that the linear operator can produce
        ``Ax`` and ``A^T x``.
    b : {array, matrix}
        Right hand side of the linear system. Has shape (N,) or (N,1).

    Returns
    -------
    x : {array, matrix}
        The converged solution.
    info : integer
        Provides convergence information:
            0  : successful exit
            >0 : convergence to tolerance not achieved, number of iterations
            <0 : illegal input or breakdown

    Other Parameters
    ----------------
    x0  : {array, matrix}
        Starting guess for the solution.
    tol : float
        Tolerance to achieve. The algorithm terminates when either the relative
        or the absolute residual is below `tol`.
    maxiter : integer
        Maximum number of iterations.  Iteration will stop after maxiter
        steps even if the specified tolerance has not been achieved.
    M1 : {sparse matrix, dense matrix, LinearOperator}
        Left preconditioner for A.
    M2 : {sparse matrix, dense matrix, LinearOperator}
        Right preconditioner for A. Used together with the left
        preconditioner M1.  The matrix M1*A*M2 should have better
        conditioned than A alone.
    callback : function
        User-supplied function to call after each iteration.  It is called
        as callback(xk), where xk is the current solution vector.
    xtype : {'f','d','F','D'}
        This parameter is DEPRECATED -- avoid using it.

        The type of the result.  If None, then it will be determined from
        A.dtype.char and b.  If A does not have a typecode method then it
        will compute A.matvec(x0) to get a typecode.   To save the extra
        computation when A does not have a typecode attribute use xtype=0
        for the same type as b or use xtype='f','d','F',or 'D'.
        This parameter has been superseded by LinearOperator.

    See Also
    --------
    LinearOperator

    """
    A_ = A
    A, M, x, b, postprocess = make_system(A, None, x0, b, xtype)

    if M1 is None and M2 is None:
        if hasattr(A_, 'psolve'):

            def left_psolve(b):
                return A_.psolve(b, 'left')

            def right_psolve(b):
                return A_.psolve(b, 'right')

            def left_rpsolve(b):
                return A_.rpsolve(b, 'left')

            def right_rpsolve(b):
                return A_.rpsolve(b, 'right')

            M1 = LinearOperator(A.shape,
                                matvec=left_psolve,
                                rmatvec=left_rpsolve)
            M2 = LinearOperator(A.shape,
                                matvec=right_psolve,
                                rmatvec=right_rpsolve)
        else:

            def id(b):
                return b

            M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
            M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)

    n = len(b)
    if maxiter is None:
        maxiter = n * 10

    ltr = _type_conv[x.dtype.char]
    revcom = getattr(_iterative, ltr + 'qmrrevcom')
    stoptest = getattr(_iterative, ltr + 'stoptest2')

    resid = tol
    ndx1 = 1
    ndx2 = -1
    # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
    work = _aligned_zeros(11 * n, x.dtype)
    ijob = 1
    info = 0
    ftflag = True
    bnrm2 = -1.0
    iter_ = maxiter
    while True:
        olditer = iter_
        x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
           revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
        if callback is not None and iter_ > olditer:
            callback(x)
        slice1 = slice(ndx1 - 1, ndx1 - 1 + n)
        slice2 = slice(ndx2 - 1, ndx2 - 1 + n)
        if (ijob == -1):
            if callback is not None:
                callback(x)
            break
        elif (ijob == 1):
            work[slice2] *= sclr2
            work[slice2] += sclr1 * A.matvec(work[slice1])
        elif (ijob == 2):
            work[slice2] *= sclr2
            work[slice2] += sclr1 * A.rmatvec(work[slice1])
        elif (ijob == 3):
            work[slice1] = M1.matvec(work[slice2])
        elif (ijob == 4):
            work[slice1] = M2.matvec(work[slice2])
        elif (ijob == 5):
            work[slice1] = M1.rmatvec(work[slice2])
        elif (ijob == 6):
            work[slice1] = M2.rmatvec(work[slice2])
        elif (ijob == 7):
            work[slice2] *= sclr2
            work[slice2] += sclr1 * A.matvec(x)
        elif (ijob == 8):
            if ftflag:
                info = -1
                ftflag = False
            bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info)
        ijob = 2

    if info > 0 and iter_ == maxiter and resid > tol:
        # info isn't set appropriately otherwise
        info = iter_

    return postprocess(x), info
示例#21
0
def make_system(A, M, x0, b):

    A_ = A
    A = aslinearoperator(A)

    if A.shape[0] != A.shape[1]:
        raise ValueError(
            'expected square matrix, but got shape=%s' % (A.shape,))

    N = A.shape[0]

    b = asanyarray(b)

    if not (b.shape == (N, 1) or b.shape == (N,)):
        raise ValueError('A and b have incompatible dimensions')

    if b.dtype.char not in 'fdFD':
        b = b.astype('d')  # upcast non-FP types to double

    def postprocess(x):
        if isinstance(b, matrix):
            x = asmatrix(x)
        return x.reshape(b.shape)

    if hasattr(A, 'dtype'):
        xtype = A.dtype.char
    else:
        xtype = A.matvec(b).dtype.char
    xtype = coerce(xtype, b.dtype.char)

    b = asarray(b, dtype=xtype)  # make b the same type as x
    b = b.ravel()

    if x0 is None:
        x = zeros(N, dtype=xtype)
    else:
        x = array(x0, dtype=xtype)
        if not (x.shape == (N, 1) or x.shape == (N,)):
            raise ValueError('A and x have incompatible dimensions')
        x = x.ravel()

    # process preconditioner
    if M is None:
        if hasattr(A_, 'psolve'):
            psolve = A_.psolve
        else:
            psolve = id
        if hasattr(A_, 'rpsolve'):
            rpsolve = A_.rpsolve
        else:
            rpsolve = id
        if psolve is id and rpsolve is id:
            M = IdentityOperator(shape=A.shape, dtype=A.dtype)
        else:
            M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,
                               dtype=A.dtype)
    else:
        M = aslinearoperator(M)
        if A.shape != M.shape:
            raise ValueError('matrix and preconditioner have different shapes')

    return A, M, x, b, postprocess
示例#22
0
def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None,
         maxiter=None, return_singular_vectors=True,
         solver='arpack'):
    """Compute the largest or smallest k singular values/vectors for a sparse matrix. The order of the singular values is not guaranteed.

    Parameters
    ----------
    A : {sparse matrix, LinearOperator}
        Array to compute the SVD on, of shape (M, N)
    k : int, optional
        Number of singular values and vectors to compute.
        Must be 1 <= k < min(A.shape).
    ncv : int, optional
        The number of Lanczos vectors generated
        ncv must be greater than k+1 and smaller than n;
        it is recommended that ncv > 2*k
        Default: ``min(n, max(2*k + 1, 20))``
    tol : float, optional
        Tolerance for singular values. Zero (default) means machine precision.
    which : str, ['LM' | 'SM'], optional
        Which `k` singular values to find:

            - 'LM' : largest singular values
            - 'SM' : smallest singular values

        .. versionadded:: 0.12.0
    v0 : ndarray, optional
        Starting vector for iteration, of length min(A.shape). Should be an
        (approximate) left singular vector if N > M and a right singular
        vector otherwise.
        Default: random

        .. versionadded:: 0.12.0
    maxiter : int, optional
        Maximum number of iterations.

        .. versionadded:: 0.12.0
    return_singular_vectors : bool or str, optional
        - True: return singular vectors (True) in addition to singular values.

        .. versionadded:: 0.12.0

        - "u": only return the u matrix, without computing vh (if N > M).
        - "vh": only return the vh matrix, without computing u (if N <= M).

        .. versionadded:: 0.16.0
    solver : str, optional
            Eigenvalue solver to use. Should be 'arpack' or 'lobpcg'.
            Default: 'arpack'

    Returns
    -------
    u : ndarray, shape=(M, k)
        Unitary matrix having left singular vectors as columns.
        If `return_singular_vectors` is "vh", this variable is not computed,
        and None is returned instead.
    s : ndarray, shape=(k,)
        The singular values.
    vt : ndarray, shape=(k, N)
        Unitary matrix having right singular vectors as rows.
        If `return_singular_vectors` is "u", this variable is not computed,
        and None is returned instead.


    Notes
    -----
    This is a naive implementation using ARPACK or LOBPCG as an eigensolver
    on A.H * A or A * A.H, depending on which one is more efficient.

    Examples
    --------
    >>> from scipy.sparse import csc_matrix
    >>> from scipy.sparse.linalg import svds, eigs
    >>> A = csc_matrix([[1, 0, 0], [5, 0, 2], [0, -1, 0], [0, 0, 3]], dtype=float)
    >>> u, s, vt = svds(A, k=2)
    >>> s
    array([ 2.75193379,  5.6059665 ])
    >>> np.sqrt(eigs(A.dot(A.T), k=2)[0]).real
    array([ 5.6059665 ,  2.75193379])
    """
    if which == 'LM':
        largest = True
    elif which == 'SM':
        largest = False
    else:
        raise ValueError("which must be either 'LM' or 'SM'.")

    if not (isinstance(A, LinearOperator) or isspmatrix(A) or is_pydata_spmatrix(A)):
        A = np.asarray(A)

    n, m = A.shape

    if k <= 0 or k >= min(n, m):
        raise ValueError("k must be between 1 and min(A.shape), k=%d" % k)

    if isinstance(A, LinearOperator):
        if n > m:
            X_dot = A.matvec
            X_matmat = A.matmat
            XH_dot = A.rmatvec
            XH_mat = A.rmatmat
            transpose = False
        else:
            X_dot = A.rmatvec
            X_matmat = A.rmatmat
            XH_dot = A.matvec
            XH_mat = A.matmat

            dtype = getattr(A, 'dtype', None)
            if dtype is None:
                dtype = A.dot(np.zeros([m, 1])).dtype
            transpose = True

    else:
        if n > m:
            X_dot = X_matmat = A.dot
            XH_dot = XH_mat = _herm(A).dot
            transpose = False
        else:
            XH_dot = XH_mat = A.dot
            X_dot = X_matmat = _herm(A).dot
            transpose = True

    def matvec_XH_X(x):
        return XH_dot(X_dot(x))

    def matmat_XH_X(x):
        return XH_mat(X_matmat(x))

    XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype,
                          matmat=matmat_XH_X,
                          shape=(min(A.shape), min(A.shape)))

    # Get a low rank approximation of the implicitly defined gramian matrix.
    # This is not a stable way to approach the problem.
    if solver == 'lobpcg':

        if k == 1 and v0 is not None:
            X = np.reshape(v0, (-1, 1))
        else:
            X = np.random.RandomState(52).randn(min(A.shape), k)

        eigvals, eigvec = lobpcg(XH_X, X, tol=tol, maxiter=maxiter,
                                 largest=largest)

    elif solver == 'arpack' or solver is None:
        eigvals, eigvec = eigsh(XH_X, k=k, tol=tol, maxiter=maxiter,
                                ncv=ncv, which=which, v0=v0)

    else:
        raise ValueError("solver must be either 'arpack', or 'lobpcg'.")

    u = X_matmat(eigvec)
    if not return_singular_vectors:
        s = svd(u, compute_uv=False)
        return s[::-1]

    # compute the right singular vectors of X and update the left ones accordingly
    u, s, vh = svd(u, full_matrices=False)
    u = u[:, ::-1]
    s = s[::-1]
    vh = vh[::-1]
    return_u = (return_singular_vectors == 'u')
    return_vh = (return_singular_vectors == 'vh')
    if not transpose:
        if return_vh:
            u = None
        if return_u:
            vh = None
        else:
            vh = vh @ _herm(eigvec)
        return u, s, vh
    else:
        if return_u:
            u = eigvec @ _herm(vh)
            return u, s, None
        if return_vh:
            return None, s, _herm(u)
        u, vh = eigvec @ _herm(vh), _herm(u)
        return u, s, vh
示例#23
0
文件: _svds.py 项目: szkafander/scipy
def svds(A,
         k=6,
         ncv=None,
         tol=0,
         which='LM',
         v0=None,
         maxiter=None,
         return_singular_vectors=True,
         solver='arpack',
         random_state=None,
         options=None):
    """
    Partial singular value decomposition of a sparse matrix.

    Compute the largest or smallest `k` singular values and corresponding
    singular vectors of a sparse matrix `A`. The order in which the singular
    values are returned is not guaranteed.

    In the descriptions below, let ``M, N = A.shape``.

    Parameters
    ----------
    A : sparse matrix or LinearOperator
        Matrix to decompose.
    k : int, default: 6
        Number of singular values and singular vectors to compute.
        Must satisfy ``1 <= k <= kmax``, where ``kmax=min(M, N)`` for
        ``solver='propack'`` and ``kmax=min(M, N) - 1`` otherwise.
    ncv : int, optional
        When ``solver='arpack'``, this is the number of Lanczos vectors
        generated. See :ref:`'arpack' <sparse.linalg.svds-arpack>` for details.
        When ``solver='lobpcg'`` or ``solver='propack'``, this parameter is
        ignored.
    tol : float, optional
        Tolerance for singular values. Zero (default) means machine precision.
    which : {'LM', 'SM'}
        Which `k` singular values to find: either the largest magnitude ('LM')
        or smallest magnitude ('SM') singular values.
    v0 : ndarray, optional
        The starting vector for iteration; see method-specific
        documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`,
        :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or
        :ref:`'propack' <sparse.linalg.svds-propack>` for details.
    maxiter : int, optional
        Maximum number of iterations; see method-specific
        documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`,
        :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or
        :ref:`'propack' <sparse.linalg.svds-propack>` for details.
    return_singular_vectors : {True, False, "u", "vh"}
        Singular values are always computed and returned; this parameter
        controls the computation and return of singular vectors.

        - ``True``: return singular vectors.
        - ``False``: do not return singular vectors.
        - ``"u"``: if ``M <= N``, compute only the left singular vectors and
          return ``None`` for the right singular vectors. Otherwise, compute
          all singular vectors.
        - ``"vh"``: if ``M > N``, compute only the right singular vectors and
          return ``None`` for the left singular vectors. Otherwise, compute
          all singular vectors.

        If ``solver='propack'``, the option is respected regardless of the
        matrix shape.

    solver :  {'arpack', 'propack', 'lobpcg'}, optional
            The solver used.
            :ref:`'arpack' <sparse.linalg.svds-arpack>`,
            :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`, and
            :ref:`'propack' <sparse.linalg.svds-propack>` are supported.
            Default: `'arpack'`.
    random_state : {None, int, `numpy.random.Generator`,
                    `numpy.random.RandomState`}, optional

        Pseudorandom number generator state used to generate resamples.

        If `random_state` is ``None`` (or `np.random`), the
        `numpy.random.RandomState` singleton is used.
        If `random_state` is an int, a new ``RandomState`` instance is used,
        seeded with `random_state`.
        If `random_state` is already a ``Generator`` or ``RandomState``
        instance then that instance is used.
    options : dict, optional
        A dictionary of solver-specific options. No solver-specific options
        are currently supported; this parameter is reserved for future use.

    Returns
    -------
    u : ndarray, shape=(M, k)
        Unitary matrix having left singular vectors as columns.
    s : ndarray, shape=(k,)
        The singular values.
    vh : ndarray, shape=(k, N)
        Unitary matrix having right singular vectors as rows.

    Notes
    -----
    This is a naive implementation using ARPACK or LOBPCG as an eigensolver
    on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
    efficient.

    Examples
    --------
    Construct a matrix ``A`` from singular values and vectors.

    >>> from scipy.stats import ortho_group
    >>> from scipy.sparse import csc_matrix, diags
    >>> from scipy.sparse.linalg import svds
    >>> rng = np.random.default_rng()
    >>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
    >>> s = [0.0001, 0.001, 3, 4, 5]  # singular values
    >>> u = orthogonal[:, :5]         # left singular vectors
    >>> vT = orthogonal[:, 5:].T      # right singular vectors
    >>> A = u @ diags(s) @ vT

    With only three singular values/vectors, the SVD approximates the original
    matrix.

    >>> u2, s2, vT2 = svds(A, k=3)
    >>> A2 = u2 @ np.diag(s2) @ vT2
    >>> np.allclose(A2, A.toarray(), atol=1e-3)
    True

    With all five singular values/vectors, we can reproduce the original
    matrix.

    >>> u3, s3, vT3 = svds(A, k=5)
    >>> A3 = u3 @ np.diag(s3) @ vT3
    >>> np.allclose(A3, A.toarray())
    True

    The singular values match the expected singular values, and the singular
    vectors are as expected up to a difference in sign.

    >>> (np.allclose(s3, s) and
    ...  np.allclose(np.abs(u3), np.abs(u.toarray())) and
    ...  np.allclose(np.abs(vT3), np.abs(vT.toarray())))
    True

    The singular vectors are also orthogonal.
    >>> (np.allclose(u3.T @ u3, np.eye(5)) and
    ...  np.allclose(vT3 @ vT3.T, np.eye(5)))
    True

    """
    rs_was_None = random_state is None  # avoid changing v0 for arpack/lobpcg

    args = _iv(A, k, ncv, tol, which, v0, maxiter, return_singular_vectors,
               solver, random_state)
    (A, k, ncv, tol, which, v0, maxiter, return_singular_vectors, solver,
     random_state) = args

    largest = (which == 'LM')
    n, m = A.shape

    if n > m:
        X_dot = A.matvec
        X_matmat = A.matmat
        XH_dot = A.rmatvec
        XH_mat = A.rmatmat
    else:
        X_dot = A.rmatvec
        X_matmat = A.rmatmat
        XH_dot = A.matvec
        XH_mat = A.matmat

        dtype = getattr(A, 'dtype', None)
        if dtype is None:
            dtype = A.dot(np.zeros([m, 1])).dtype

    def matvec_XH_X(x):
        return XH_dot(X_dot(x))

    def matmat_XH_X(x):
        return XH_mat(X_matmat(x))

    XH_X = LinearOperator(matvec=matvec_XH_X,
                          dtype=A.dtype,
                          matmat=matmat_XH_X,
                          shape=(min(A.shape), min(A.shape)))

    # Get a low rank approximation of the implicitly defined gramian matrix.
    # This is not a stable way to approach the problem.
    if solver == 'lobpcg':

        if k == 1 and v0 is not None:
            X = np.reshape(v0, (-1, 1))
        else:
            if rs_was_None:
                X = np.random.RandomState(52).randn(min(A.shape), k)
            else:
                X = random_state.uniform(size=(min(A.shape), k))

        eigvals, eigvec = lobpcg(
            XH_X,
            X,
            tol=tol**2,
            maxiter=maxiter,
            largest=largest,
        )

    elif solver == 'propack':
        jobu = return_singular_vectors in {True, 'u'}
        jobv = return_singular_vectors in {True, 'vh'}
        irl_mode = (which == 'SM')
        res = _svdp(A,
                    k=k,
                    tol=tol**2,
                    which=which,
                    maxiter=None,
                    compute_u=jobu,
                    compute_v=jobv,
                    irl_mode=irl_mode,
                    kmax=maxiter,
                    v0=v0,
                    random_state=random_state)

        u, s, vh, _ = res  # but we'll ignore bnd, the last output

        # PROPACK order appears to be largest first. `svds` output order is not
        # guaranteed, according to documentation, but for ARPACK and LOBPCG
        # they actually are ordered smallest to largest, so reverse for
        # consistency.
        s = s[::-1]
        u = u[:, ::-1]
        vh = vh[::-1]

        u = u if jobu else None
        vh = vh if jobv else None

        if return_singular_vectors:
            return u, s, vh
        else:
            return s

    elif solver == 'arpack' or solver is None:
        if v0 is None and not rs_was_None:
            v0 = random_state.uniform(size=(min(A.shape), ))
        eigvals, eigvec = eigsh(XH_X,
                                k=k,
                                tol=tol**2,
                                maxiter=maxiter,
                                ncv=ncv,
                                which=which,
                                v0=v0)

    # Gramian matrices have real non-negative eigenvalues.
    eigvals = np.maximum(eigvals.real, 0)

    # Use the sophisticated detection of small eigenvalues from pinvh.
    t = eigvec.dtype.char.lower()
    factor = {'f': 1E3, 'd': 1E6}
    cond = factor[t] * np.finfo(t).eps
    cutoff = cond * np.max(eigvals)

    # Get a mask indicating which eigenpairs are not degenerately tiny,
    # and create the re-ordered array of thresholded singular values.
    above_cutoff = (eigvals > cutoff)
    nlarge = above_cutoff.sum()
    nsmall = k - nlarge
    slarge = np.sqrt(eigvals[above_cutoff])
    s = np.zeros_like(eigvals)
    s[:nlarge] = slarge
    if not return_singular_vectors:
        return np.sort(s)

    if n > m:
        vlarge = eigvec[:, above_cutoff]
        ularge = (X_matmat(vlarge) /
                  slarge if return_singular_vectors != 'vh' else None)
        vhlarge = _herm(vlarge)
    else:
        ularge = eigvec[:, above_cutoff]
        vhlarge = (_herm(X_matmat(ularge) /
                         slarge) if return_singular_vectors != 'u' else None)

    u = (_augmented_orthonormal_cols(ularge, nsmall, random_state)
         if ularge is not None else None)
    vh = (_augmented_orthonormal_rows(vhlarge, nsmall, random_state)
          if vhlarge is not None else None)

    indexes_sorted = np.argsort(s)
    s = s[indexes_sorted]
    if u is not None:
        u = u[:, indexes_sorted]
    if vh is not None:
        vh = vh[indexes_sorted]

    return u, s, vh
示例#24
0
def make_system(A, M, x0, b):
    """Make a linear system Ax=b

    Parameters
    ----------
    A : LinearOperator
        sparse or dense matrix (or any valid input to aslinearoperator)
    M : {LinearOperator, Nones}
        preconditioner
        sparse or dense matrix (or any valid input to aslinearoperator)
    x0 : {array_like, str, None}
        initial guess to iterative method.
        ``x0 = 'Mb'`` means using the nonzero initial guess ``M @ b``.
        Default is `None`, which means using the zero initial guess.
    b : array_like
        right hand side

    Returns
    -------
    (A, M, x, b, postprocess)
        A : LinearOperator
            matrix of the linear system
        M : LinearOperator
            preconditioner
        x : rank 1 ndarray
            initial guess
        b : rank 1 ndarray
            right hand side
        postprocess : function
            converts the solution vector to the appropriate
            type and dimensions (e.g. (N,1) matrix)

    """
    A_ = A
    A = aslinearoperator(A)

    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix, but got shape=%s' %
                         (A.shape, ))

    N = A.shape[0]

    b = asanyarray(b)

    if not (b.shape == (N, 1) or b.shape == (N, )):
        raise ValueError('shapes of A {} and b {} are incompatible'.format(
            A.shape, b.shape))

    if b.dtype.char not in 'fdFD':
        b = b.astype('d')  # upcast non-FP types to double

    def postprocess(x):
        if isinstance(b, matrix):
            x = asmatrix(x)
        return x.reshape(b.shape)

    if hasattr(A, 'dtype'):
        xtype = A.dtype.char
    else:
        xtype = A.matvec(b).dtype.char
    xtype = coerce(xtype, b.dtype.char)

    b = asarray(b, dtype=xtype)  # make b the same type as x
    b = b.ravel()

    # process preconditioner
    if M is None:
        if hasattr(A_, 'psolve'):
            psolve = A_.psolve
        else:
            psolve = id
        if hasattr(A_, 'rpsolve'):
            rpsolve = A_.rpsolve
        else:
            rpsolve = id
        if psolve is id and rpsolve is id:
            M = IdentityOperator(shape=A.shape, dtype=A.dtype)
        else:
            M = LinearOperator(A.shape,
                               matvec=psolve,
                               rmatvec=rpsolve,
                               dtype=A.dtype)
    else:
        M = aslinearoperator(M)
        if A.shape != M.shape:
            raise ValueError('matrix and preconditioner have different shapes')

    # set initial guess
    if x0 is None:
        x = zeros(N, dtype=xtype)
    elif isinstance(x0, str):
        if x0 == 'Mb':  # use nonzero initial guess ``M @ b``
            bCopy = b.copy()
            x = M.matvec(bCopy)
    else:
        x = array(x0, dtype=xtype)
        if not (x.shape == (N, 1) or x.shape == (N, )):
            raise ValueError(f'shapes of A {A.shape} and '
                             f'x0 {x.shape} are incompatible')
        x = x.ravel()

    return A, M, x, b, postprocess
示例#25
0
文件: _svds.py 项目: yacth/scipy
def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None,
         maxiter=None, return_singular_vectors=True,
         solver='arpack', options=None):
    """
    Partial singular value decomposition of a sparse matrix.

    Compute the largest or smallest `k` singular values and corresponding
    singular vectors of a sparse matrix `A`. The order in which the singular
    values are returned is not guaranteed.

    In the descriptions below, let ``M, N = A.shape``.

    Parameters
    ----------
    A : sparse matrix or LinearOperator
        Matrix to decompose.
    k : int, default: 6
        Number of singular values and singular vectors to compute.
        Must satisfy ``1 <= k < min(M, N)``.
    ncv : int, optional
        When ``solver='arpack'``, this is the number of Lanczos vectors
        generated. See :ref:`'arpack' <sparse.linalg.svds-arpack>` for details.
        When ``solver='lobpcg'``, this parameter is ignored.
    tol : float, optional
        Tolerance for singular values. Zero (default) means machine precision.
    which : {'LM', 'SM'}
        Which `k` singular values to find: either the largest magnitude ('LM')
        or smallest magnitude ('SM') singular values.
    v0 : ndarray, optional
        The starting vector for iteration; see method-specific
        documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>` or
        :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`) for details.
    maxiter : int, optional
        Maximum number of iterations; see method-specific
        documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>` or
        :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`) for details.
    return_singular_vectors : bool or str, optional
        Singular values are always computed and returned; this parameter
        controls the computation and return of singular vectors.

        - ``True``: return singular vectors.
        - ``False``: do not return singular vectors.
        - ``"u"``: only return the left singular values, without computing the
          right singular vectors (if ``N > M``).
        - ``"vh"``: only return the right singular values, without computing
          the left singular vectors (if ``N <= M``).

    solver : str, optional
            The solver used.
            :ref:`'arpack' <sparse.linalg.svds-arpack>` and
            :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>` are supported.
            Default: `'arpack'`.
    options : dict, optional
        A dictionary of solver-specific options. No solver-specific options
        are currently supported; this parameter is reserved for future use.

    Returns
    -------
    u : ndarray, shape=(M, k)
        Unitary matrix having left singular vectors as columns.
        If `return_singular_vectors` is ``"vh"``, this variable is not
        computed, and ``None`` is returned instead.
    s : ndarray, shape=(k,)
        The singular values.
    vh : ndarray, shape=(k, N)
        Unitary matrix having right singular vectors as rows.
        If `return_singular_vectors` is ``"u"``, this variable is not computed,
        and ``None`` is returned instead.

    Notes
    -----
    This is a naive implementation using ARPACK or LOBPCG as an eigensolver
    on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
    efficient.

    Examples
    --------
    Construct a matrix ``A`` from singular values and vectors.

    >>> from scipy.stats import ortho_group
    >>> from scipy.sparse import csc_matrix, diags
    >>> from scipy.sparse.linalg import svds
    >>> rng = np.random.default_rng()
    >>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
    >>> s = [0.0001, 0.001, 3, 4, 5]  # singular values
    >>> u = orthogonal[:, :5]         # left singular vectors
    >>> vT = orthogonal[:, 5:].T      # right singular vectors
    >>> A = u @ diags(s) @ vT

    With only three singular values/vectors, the SVD approximates the original
    matrix.

    >>> u2, s2, vT2 = svds(A, k=3)
    >>> A2 = u2 @ np.diag(s2) @ vT2
    >>> np.allclose(A2, A.todense(), atol=1e-3)
    True

    With all five singular values/vectors, we can reproduce the original
    matrix.

    >>> u3, s3, vT3 = svds(A, k=5)
    >>> A3 = u3 @ np.diag(s3) @ vT3
    >>> np.allclose(A3, A.todense())
    True

    The singular values match the expected singular values, and the singular
    values are as expected up to a difference in sign. Consequently, the
    returned arrays of singular vectors must also be orthogonal.

    >>> (np.allclose(s3, s) and
    ...  np.allclose(np.abs(u3), np.abs(u.todense())) and
    ...  np.allclose(np.abs(vT3), np.abs(vT.todense())))
    True

    """
    if which == 'LM':
        largest = True
    elif which == 'SM':
        largest = False
    else:
        raise ValueError("which must be either 'LM' or 'SM'.")

    if (not (isinstance(A, LinearOperator) or isspmatrix(A)
             or is_pydata_spmatrix(A))):
        A = np.asarray(A)

    n, m = A.shape

    if k <= 0 or k >= min(n, m):
        raise ValueError("k must be between 1 and min(A.shape), k=%d" % k)

    if isinstance(A, LinearOperator):
        if n > m:
            X_dot = A.matvec
            X_matmat = A.matmat
            XH_dot = A.rmatvec
            XH_mat = A.rmatmat
        else:
            X_dot = A.rmatvec
            X_matmat = A.rmatmat
            XH_dot = A.matvec
            XH_mat = A.matmat

            dtype = getattr(A, 'dtype', None)
            if dtype is None:
                dtype = A.dot(np.zeros([m, 1])).dtype

    else:
        if n > m:
            X_dot = X_matmat = A.dot
            XH_dot = XH_mat = _herm(A).dot
        else:
            XH_dot = XH_mat = A.dot
            X_dot = X_matmat = _herm(A).dot

    def matvec_XH_X(x):
        return XH_dot(X_dot(x))

    def matmat_XH_X(x):
        return XH_mat(X_matmat(x))

    XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype,
                          matmat=matmat_XH_X,
                          shape=(min(A.shape), min(A.shape)))

    # Get a low rank approximation of the implicitly defined gramian matrix.
    # This is not a stable way to approach the problem.
    if solver == 'lobpcg':

        if k == 1 and v0 is not None:
            X = np.reshape(v0, (-1, 1))
        else:
            X = np.random.RandomState(52).randn(min(A.shape), k)

        eigvals, eigvec = lobpcg(XH_X, X, tol=tol ** 2, maxiter=maxiter,
                                 largest=largest)

    elif solver == 'arpack' or solver is None:
        eigvals, eigvec = eigsh(XH_X, k=k, tol=tol ** 2, maxiter=maxiter,
                                ncv=ncv, which=which, v0=v0)

    else:
        raise ValueError("solver must be either 'arpack', or 'lobpcg'.")

    # Gramian matrices have real non-negative eigenvalues.
    eigvals = np.maximum(eigvals.real, 0)

    # Use the sophisticated detection of small eigenvalues from pinvh.
    t = eigvec.dtype.char.lower()
    factor = {'f': 1E3, 'd': 1E6}
    cond = factor[t] * np.finfo(t).eps
    cutoff = cond * np.max(eigvals)

    # Get a mask indicating which eigenpairs are not degenerately tiny,
    # and create the re-ordered array of thresholded singular values.
    above_cutoff = (eigvals > cutoff)
    nlarge = above_cutoff.sum()
    nsmall = k - nlarge
    slarge = np.sqrt(eigvals[above_cutoff])
    s = np.zeros_like(eigvals)
    s[:nlarge] = slarge
    if not return_singular_vectors:
        return np.sort(s)

    if n > m:
        vlarge = eigvec[:, above_cutoff]
        ularge = (X_matmat(vlarge) / slarge
                  if return_singular_vectors != 'vh' else None)
        vhlarge = _herm(vlarge)
    else:
        ularge = eigvec[:, above_cutoff]
        vhlarge = (_herm(X_matmat(ularge) / slarge)
                   if return_singular_vectors != 'u' else None)

    u = (_augmented_orthonormal_cols(ularge, nsmall)
         if ularge is not None else None)
    vh = (_augmented_orthonormal_rows(vhlarge, nsmall)
          if vhlarge is not None else None)

    indexes_sorted = np.argsort(s)
    s = s[indexes_sorted]
    if u is not None:
        u = u[:, indexes_sorted]
    if vh is not None:
        vh = vh[indexes_sorted]

    return u, s, vh
示例#26
0
文件: ch.py 项目: pujades/chumpy
    def dr_wrt(self, wrt, reverse_mode=False):
        self._call_on_changed()

        drs = []        

        if wrt in self._cache['drs']:
            return self._cache['drs'][wrt]

        direct_dr = self._compute_dr_wrt_sliced(wrt)

        if direct_dr is not None:
            drs.append(direct_dr)                

        propnames = set(_props_for(self.__class__))
        for k in set(self.dterms).intersection(propnames.union(set(self.__dict__.keys()))):
            p = getattr(self, k)

            if hasattr(p, 'dterms') and p is not wrt:

                indirect_dr = None

                if reverse_mode:
                    lhs = self._compute_dr_wrt_sliced(p)
                    if isinstance(lhs, LinearOperator):
                        dr2 = p.dr_wrt(wrt)
                        indirect_dr = lhs.matmat(dr2) if dr2 != None else None
                    else:
                        indirect_dr = p.lmult_wrt(lhs, wrt)
                else: # forward mode
                    dr2 = p.dr_wrt(wrt)
                    if dr2 is not None:
                        indirect_dr = self.compute_rop(p, rhs=dr2)

                if indirect_dr is not None:
                    drs.append(indirect_dr)

        if len(drs)==0:
            result = None

        elif len(drs)==1:
            result = drs[0]

        else:
            if not np.any([isinstance(a, LinearOperator) for a in drs]):
                result = reduce(lambda x, y: x+y, drs)
            else:
                result = LinearOperator(drs[0].shape, lambda x : reduce(lambda a, b: a.dot(x)+b.dot(x),drs))

        # TODO: figure out how/whether to do this.
        # if result is not None and not sp.issparse(result):
        #    nonzero = np.count_nonzero(result)
        #    if nonzero > 0 and hasattr(result, 'size') and result.size / nonzero >= 10.0:
        #         #import pdb; pdb.set_trace()
        #         result = sp.csc_matrix(result)
            
            
        if (result is not None) and (not sp.issparse(result)) and (not isinstance(result, LinearOperator)):
            result = np.atleast_2d(result)
            
        # When the number of parents is one, it indicates that
        # caching this is probably not useful because not 
        # more than one parent will likely ask for this same
        # thing again in the same iteration of an optimization.
        #
        # If we *always* filled in the cache, it would require 
        # more memory but would occasionally save a little cpu,
        # on average.
        if len(self._parents.keys()) != 1:
            self._cache['drs'][wrt] = result

        return result
示例#27
0

Am = csr_matrix(array([[-2,1,0,0,0,9],
                       [1,-2,1,0,5,0],
                       [0,1,-2,1,0,0],
                       [0,0,1,-2,1,0],
                       [0,3,0,1,-2,1],
                       [1,0,0,0,1,-2]]))
b = array([1,2,3,4,5,6])
count = [0]


def matvec(v):
    count[0] += 1
    return Am*v
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)


def do_solve(**kw):
    count[0] = 0
    x0, flag = lgmres(A, b, x0=zeros(A.shape[0]), inner_m=6, tol=1e-14, **kw)
    count_0 = count[0]
    assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
    return x0, count_0


class TestLGMRES(TestCase):
    def test_preconditioner(self):
        # Check that preconditioning works
        pc = splu(Am.tocsc())
        M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
示例#28
0
def relaxation_as_linear_operator(method, A, b):
    """Create a linear operator that applies a relaxation method to a right-hand-side.

    Parameters
    ----------
    methods : {tuple or string}
        Relaxation descriptor: Each tuple must be of the form ('method','opts')
        where 'method' is the name of a supported smoother, e.g., gauss_seidel,
        and 'opts' a dict of keyword arguments to the smoother, e.g., opts =
        {'sweep':symmetric}.  If string, must be that of a supported smoother,
        e.g., gauss_seidel.

    Returns
    -------
    linear operator that applies the relaxation method to a vector for a
    fixed right-hand-side, b.

    Notes
    -----
    This method is primarily used to improve B during the aggregation setup
    phase.  Here b = 0, and each relaxation call can improve the quality of B,
    especially near the boundaries.

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.relaxation.utils import relaxation_as_linear_operator
    >>> import numpy as np
    >>> A = poisson((100,100), format='csr')           # matrix
    >>> B = np.ones((A.shape[0],1))                 # Candidate vector
    >>> b = np.zeros((A.shape[0]))                  # RHS
    >>> relax = relaxation_as_linear_operator('gauss_seidel', A, b)
    >>> B = relax*B

    """
    def unpack_arg(v):
        if isinstance(v, tuple):
            return v[0], v[1]
        return v, {}

    # setup variables
    accepted_methods = [
        'gauss_seidel', 'block_gauss_seidel', 'sor', 'gauss_seidel_ne',
        'gauss_seidel_nr', 'jacobi', 'block_jacobi', 'richardson', 'schwarz',
        'strength_based_schwarz', 'jacobi_ne'
    ]

    b = np.array(b, dtype=A.dtype)
    fn, kwargs = unpack_arg(method)
    lvl = MultilevelSolver.Level()
    lvl.A = A

    # Retrieve setup call from relaxation.smoothing for this relaxation method
    if not accepted_methods.__contains__(fn):
        raise NameError(f'invalid relaxation method: {fn}')
    try:
        setup_smoother = getattr(relaxation.smoothing, 'setup_' + fn)
    except NameError as e:
        raise NameError(f'invalid presmoother method: {fn}') from e

    # Get relaxation routine that takes only (A, x, b) as parameters
    relax = setup_smoother(lvl, **kwargs)

    # Define matvec
    def matvec(x):
        xcopy = x.copy()
        relax(A, xcopy, b)
        return xcopy

    return LinearOperator(A.shape, matvec, dtype=A.dtype)
# $$
# \begin{bmatrix}
#     \mathsf{A}-k^2 \mathsf{M} & -\mathsf{M}_\Gamma\\
#     \tfrac{1}{2}\mathsf{Id}-\mathsf{K} & \mathsf{V}
# \end{bmatrix}.
# $$

# In[27]:


from bempp.api.assembly.blocked_operator import BlockedDiscreteOperator
from bempp.api.external.fenics import FenicsOperator
from scipy.sparse.linalg.interface import LinearOperator
blocks = [[None,None],[None,None]]

trace_op = LinearOperator(trace_matrix.shape, lambda x:trace_matrix @ x)

A = FenicsOperator((dolfin.inner(dolfin.nabla_grad(u),
                                 dolfin.nabla_grad(v)) \
    - k**2 * n**2 * u * v) * dolfin.dx)

blocks[0][0] = A.weak_form()
blocks[0][1] = -trace_matrix.T * mass.weak_form().A
blocks[1][0] = (.5 * id_op - dlp).weak_form() * trace_op
blocks[1][1] = slp.weak_form()

blocked = BlockedDiscreteOperator(np.array(blocks))


# Next, we solve the system, then split the solution into the parts assosiated with u and &lambda;. For an efficient solve, preconditioning is required.
示例#30
0
print("assembling the boundary operators")

##set up the bem
sl = bempp.api.operators.boundary.laplace.single_layer(bem_dc, bem_c, bem_dc)
dl = bempp.api.operators.boundary.laplace.double_layer(bem_c, bem_c, bem_dc)
id_op = bempp.api.operators.boundary.sparse.identity(bem_dc, bem_dc, bem_c)
id_op2 = bempp.api.operators.boundary.sparse.identity(bem_c, bem_c, bem_dc)

block = np.ndarray([2, 2], dtype=np.object)
block[0, 0] = ngbem.NgOperator(a)
block[0, 1] = -trace_matrix.T * id_op.weak_form().sparse_operator

from scipy.sparse.linalg.interface import LinearOperator

trace_op = LinearOperator(trace_matrix.shape, lambda x: trace_matrix * x)
rhs_op1 = 0.5 * id_op2 - dl
block[1, 0] = rhs_op1.weak_form() * trace_op
block[1, 1] = sl.weak_form()
blockOp = bempp.api.BlockedDiscreteOperator(block)

#set up a block-diagonal preconditioner
p_block = np.ndarray([2, 2], dtype=np.object)
p_block[0, 0] = ngbem.NgOperator(c, a)
p_block[1,
        1] = bempp.api.InverseSparseDiscreteBoundaryOperator(
            bempp.api.operators.boundary.sparse.identity(
                bem_dc, bem_dc,
                bem_dc).weak_form())  #np.identity(bem_dc.global_dof_count)

p_blockOp = bempp.api.BlockedDiscreteOperator(p_block)