def test_preconditioner(self): # Check that preconditioning works pc = splu(Am.tocsc()) M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype) x0, count_0 = do_solve() x1, count_1 = do_solve(M=M) assert_equal(count_1, 3) assert_(count_1 < count_0 / 2) assert_(allclose(x1, x0, rtol=1e-14))
def time_mikota(self, n, solver): m = 10 if solver == 'lobpcg': X = rand(n, m) X = orth(X) LorU, lower = cho_factor(self.A, lower=0, overwrite_a=0) M = LinearOperator(self.shape, matvec=partial(_precond, LorU, lower), matmat=partial(_precond, LorU, lower)) eigs, vecs = lobpcg(self.A, X, self.B, M, tol=1e-4, maxiter=40) else: eigh(self.A, self.B, eigvals_only=True, eigvals=(0, m - 1))
def make_system(A, M, x0, b): """Make a linear system Ax=b Parameters ---------- A : LinearOperator sparse or dense matrix (or any valid input to aslinearoperator) M : {LinearOperator, Nones} preconditioner sparse or dense matrix (or any valid input to aslinearoperator) x0 : {array_like, str, None} initial guess to iterative method. ``x0 = 'Mb'`` means using the nonzero initial guess ``M @ b``. Default is `None`, which means using the zero initial guess. b : array_like right hand side Returns ------- (A, M, x, b, postprocess) A : LinearOperator matrix of the linear system M : LinearOperator preconditioner x : rank 1 ndarray initial guess b : rank 1 ndarray right hand side postprocess : function converts the solution vector to the appropriate type and dimensions (e.g. (N,1) matrix) """ A_ = A A = aslinearoperator(A) if A.shape[0] != A.shape[1]: raise ValueError(f'expected square matrix, but got shape={(A.shape,)}') N = A.shape[0] b = asanyarray(b) if not (b.shape == (N, 1) or b.shape == (N, )): raise ValueError(f'shapes of A {A.shape} and b {b.shape} are ' 'incompatible') if b.dtype.char not in 'fdFD': b = b.astype('d') # upcast non-FP types to double def postprocess(x): return x if hasattr(A, 'dtype'): xtype = A.dtype.char else: xtype = A.matvec(b).dtype.char xtype = coerce(xtype, b.dtype.char) b = asarray(b, dtype=xtype) # make b the same type as x b = b.ravel() # process preconditioner if M is None: if hasattr(A_, 'psolve'): psolve = A_.psolve else: psolve = id if hasattr(A_, 'rpsolve'): rpsolve = A_.rpsolve else: rpsolve = id if psolve is id and rpsolve is id: M = IdentityOperator(shape=A.shape, dtype=A.dtype) else: M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve, dtype=A.dtype) else: M = aslinearoperator(M) if A.shape != M.shape: raise ValueError('matrix and preconditioner have different shapes') # set initial guess if x0 is None: x = zeros(N, dtype=xtype) elif isinstance(x0, str): if x0 == 'Mb': # use nonzero initial guess ``M @ b`` bCopy = b.copy() x = M.matvec(bCopy) else: x = array(x0, dtype=xtype) if not (x.shape == (N, 1) or x.shape == (N, )): raise ValueError(f'shapes of A {A.shape} and ' f'x0 {x.shape} are incompatible') x = x.ravel() return A, M, x, b, postprocess
def qmr(A, b, x0=None, tol=1e-5, maxiter=None, M1=None, M2=None, callback=None, atol=None): """Use Quasi-Minimal Residual iteration to solve ``Ax = b``. Parameters ---------- A : {sparse matrix, ndarray, LinearOperator} The real-valued N-by-N matrix of the linear system. Alternatively, ``A`` can be a linear operator which can produce ``Ax`` and ``A^T x`` using, e.g., ``scipy.sparse.linalg.LinearOperator``. b : ndarray Right hand side of the linear system. Has shape (N,) or (N,1). Returns ------- x : ndarray The converged solution. info : integer Provides convergence information: 0 : successful exit >0 : convergence to tolerance not achieved, number of iterations <0 : illegal input or breakdown Other Parameters ---------------- x0 : ndarray Starting guess for the solution. tol, atol : float, optional Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``. The default for ``atol`` is ``'legacy'``, which emulates a different legacy behavior. .. warning:: The default value for `atol` will be changed in a future release. For future compatibility, specify `atol` explicitly. maxiter : integer Maximum number of iterations. Iteration will stop after maxiter steps even if the specified tolerance has not been achieved. M1 : {sparse matrix, ndarray, LinearOperator} Left preconditioner for A. M2 : {sparse matrix, ndarray, LinearOperator} Right preconditioner for A. Used together with the left preconditioner M1. The matrix M1@A@M2 should have better conditioned than A alone. callback : function User-supplied function to call after each iteration. It is called as callback(xk), where xk is the current solution vector. See Also -------- LinearOperator Examples -------- >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import qmr >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float) >>> b = np.array([2, 4, -1], dtype=float) >>> x, exitCode = qmr(A, b) >>> print(exitCode) # 0 indicates successful convergence 0 >>> np.allclose(A.dot(x), b) True """ A_ = A A, M, x, b, postprocess = make_system(A, None, x0, b) if M1 is None and M2 is None: if hasattr(A_,'psolve'): def left_psolve(b): return A_.psolve(b,'left') def right_psolve(b): return A_.psolve(b,'right') def left_rpsolve(b): return A_.rpsolve(b,'left') def right_rpsolve(b): return A_.rpsolve(b,'right') M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve) M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve) else: def id(b): return b M1 = LinearOperator(A.shape, matvec=id, rmatvec=id) M2 = LinearOperator(A.shape, matvec=id, rmatvec=id) n = len(b) if maxiter is None: maxiter = n*10 ltr = _type_conv[x.dtype.char] revcom = getattr(_iterative, ltr + 'qmrrevcom') get_residual = lambda: np.linalg.norm(A.matvec(x) - b) atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'qmr') if atol == 'exit': return postprocess(x), 0 resid = atol ndx1 = 1 ndx2 = -1 # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1 work = _aligned_zeros(11*n,x.dtype) ijob = 1 info = 0 ftflag = True iter_ = maxiter while True: olditer = iter_ x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \ revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob) if callback is not None and iter_ > olditer: callback(x) slice1 = slice(ndx1-1, ndx1-1+n) slice2 = slice(ndx2-1, ndx2-1+n) if (ijob == -1): if callback is not None: callback(x) break elif (ijob == 1): work[slice2] *= sclr2 work[slice2] += sclr1*A.matvec(work[slice1]) elif (ijob == 2): work[slice2] *= sclr2 work[slice2] += sclr1*A.rmatvec(work[slice1]) elif (ijob == 3): work[slice1] = M1.matvec(work[slice2]) elif (ijob == 4): work[slice1] = M2.matvec(work[slice2]) elif (ijob == 5): work[slice1] = M1.rmatvec(work[slice2]) elif (ijob == 6): work[slice1] = M2.rmatvec(work[slice2]) elif (ijob == 7): work[slice2] *= sclr2 work[slice2] += sclr1*A.matvec(x) elif (ijob == 8): if ftflag: info = -1 ftflag = False resid, info = _stoptest(work[slice1], atol) ijob = 2 if info > 0 and iter_ == maxiter and not (resid <= atol): # info isn't set appropriately otherwise info = iter_ return postprocess(x), info
def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None, maxiter=None, return_singular_vectors=True, solver='arpack', random_state=None, options=None): """ Partial singular value decomposition of a sparse matrix. Compute the largest or smallest `k` singular values and corresponding singular vectors of a sparse matrix `A`. The order in which the singular values are returned is not guaranteed. In the descriptions below, let ``M, N = A.shape``. Parameters ---------- A : sparse matrix or LinearOperator Matrix to decompose. k : int, default: 6 Number of singular values and singular vectors to compute. Must satisfy ``1 <= k <= kmax``, where ``kmax=min(M, N)`` for ``solver='propack'`` and ``kmax=min(M, N) - 1`` otherwise. ncv : int, optional When ``solver='arpack'``, this is the number of Lanczos vectors generated. See :ref:`'arpack' <sparse.linalg.svds-arpack>` for details. When ``solver='lobpcg'`` or ``solver='propack'``, this parameter is ignored. tol : float, optional Tolerance for singular values. Zero (default) means machine precision. which : {'LM', 'SM'} Which `k` singular values to find: either the largest magnitude ('LM') or smallest magnitude ('SM') singular values. v0 : ndarray, optional The starting vector for iteration; see method-specific documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`, :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or :ref:`'propack' <sparse.linalg.svds-propack>` for details. maxiter : int, optional Maximum number of iterations; see method-specific documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`, :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or :ref:`'propack' <sparse.linalg.svds-propack>` for details. return_singular_vectors : {True, False, "u", "vh"} Singular values are always computed and returned; this parameter controls the computation and return of singular vectors. - ``True``: return singular vectors. - ``False``: do not return singular vectors. - ``"u"``: if ``M <= N``, compute only the left singular vectors and return ``None`` for the right singular vectors. Otherwise, compute all singular vectors. - ``"vh"``: if ``M > N``, compute only the right singular vectors and return ``None`` for the left singular vectors. Otherwise, compute all singular vectors. If ``solver='propack'``, the option is respected regardless of the matrix shape. solver : {'arpack', 'propack', 'lobpcg'}, optional The solver used. :ref:`'arpack' <sparse.linalg.svds-arpack>`, :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`, and :ref:`'propack' <sparse.linalg.svds-propack>` are supported. Default: `'arpack'`. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional Pseudorandom number generator state used to generate resamples. If `random_state` is ``None`` (or `np.random`), the `numpy.random.RandomState` singleton is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with `random_state`. If `random_state` is already a ``Generator`` or ``RandomState`` instance then that instance is used. options : dict, optional A dictionary of solver-specific options. No solver-specific options are currently supported; this parameter is reserved for future use. Returns ------- u : ndarray, shape=(M, k) Unitary matrix having left singular vectors as columns. s : ndarray, shape=(k,) The singular values. vh : ndarray, shape=(k, N) Unitary matrix having right singular vectors as rows. Notes ----- This is a naive implementation using ARPACK or LOBPCG as an eigensolver on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more efficient. Examples -------- Construct a matrix ``A`` from singular values and vectors. >>> from scipy.stats import ortho_group >>> from scipy.sparse import csc_matrix, diags >>> from scipy.sparse.linalg import svds >>> rng = np.random.default_rng() >>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng)) >>> s = [0.0001, 0.001, 3, 4, 5] # singular values >>> u = orthogonal[:, :5] # left singular vectors >>> vT = orthogonal[:, 5:].T # right singular vectors >>> A = u @ diags(s) @ vT With only three singular values/vectors, the SVD approximates the original matrix. >>> u2, s2, vT2 = svds(A, k=3) >>> A2 = u2 @ np.diag(s2) @ vT2 >>> np.allclose(A2, A.toarray(), atol=1e-3) True With all five singular values/vectors, we can reproduce the original matrix. >>> u3, s3, vT3 = svds(A, k=5) >>> A3 = u3 @ np.diag(s3) @ vT3 >>> np.allclose(A3, A.toarray()) True The singular values match the expected singular values, and the singular vectors are as expected up to a difference in sign. >>> (np.allclose(s3, s) and ... np.allclose(np.abs(u3), np.abs(u.toarray())) and ... np.allclose(np.abs(vT3), np.abs(vT.toarray()))) True The singular vectors are also orthogonal. >>> (np.allclose(u3.T @ u3, np.eye(5)) and ... np.allclose(vT3 @ vT3.T, np.eye(5))) True """ rs_was_None = random_state is None # avoid changing v0 for arpack/lobpcg args = _iv(A, k, ncv, tol, which, v0, maxiter, return_singular_vectors, solver, random_state) (A, k, ncv, tol, which, v0, maxiter, return_singular_vectors, solver, random_state) = args largest = (which == 'LM') n, m = A.shape if n > m: X_dot = A.matvec X_matmat = A.matmat XH_dot = A.rmatvec XH_mat = A.rmatmat else: X_dot = A.rmatvec X_matmat = A.rmatmat XH_dot = A.matvec XH_mat = A.matmat dtype = getattr(A, 'dtype', None) if dtype is None: dtype = A.dot(np.zeros([m, 1])).dtype def matvec_XH_X(x): return XH_dot(X_dot(x)) def matmat_XH_X(x): return XH_mat(X_matmat(x)) XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype, matmat=matmat_XH_X, shape=(min(A.shape), min(A.shape))) # Get a low rank approximation of the implicitly defined gramian matrix. # This is not a stable way to approach the problem. if solver == 'lobpcg': if k == 1 and v0 is not None: X = np.reshape(v0, (-1, 1)) else: if rs_was_None: X = np.random.RandomState(52).randn(min(A.shape), k) else: X = random_state.uniform(size=(min(A.shape), k)) eigvals, eigvec = lobpcg( XH_X, X, tol=tol**2, maxiter=maxiter, largest=largest, ) elif solver == 'propack': jobu = return_singular_vectors in {True, 'u'} jobv = return_singular_vectors in {True, 'vh'} irl_mode = (which == 'SM') res = _svdp(A, k=k, tol=tol**2, which=which, maxiter=None, compute_u=jobu, compute_v=jobv, irl_mode=irl_mode, kmax=maxiter, v0=v0, random_state=random_state) u, s, vh, _ = res # but we'll ignore bnd, the last output # PROPACK order appears to be largest first. `svds` output order is not # guaranteed, according to documentation, but for ARPACK and LOBPCG # they actually are ordered smallest to largest, so reverse for # consistency. s = s[::-1] u = u[:, ::-1] vh = vh[::-1] u = u if jobu else None vh = vh if jobv else None if return_singular_vectors: return u, s, vh else: return s elif solver == 'arpack' or solver is None: if v0 is None and not rs_was_None: v0 = random_state.uniform(size=(min(A.shape), )) eigvals, eigvec = eigsh(XH_X, k=k, tol=tol**2, maxiter=maxiter, ncv=ncv, which=which, v0=v0) # Gramian matrices have real non-negative eigenvalues. eigvals = np.maximum(eigvals.real, 0) # Use the sophisticated detection of small eigenvalues from pinvh. t = eigvec.dtype.char.lower() factor = {'f': 1E3, 'd': 1E6} cond = factor[t] * np.finfo(t).eps cutoff = cond * np.max(eigvals) # Get a mask indicating which eigenpairs are not degenerately tiny, # and create the re-ordered array of thresholded singular values. above_cutoff = (eigvals > cutoff) nlarge = above_cutoff.sum() nsmall = k - nlarge slarge = np.sqrt(eigvals[above_cutoff]) s = np.zeros_like(eigvals) s[:nlarge] = slarge if not return_singular_vectors: return np.sort(s) if n > m: vlarge = eigvec[:, above_cutoff] ularge = (X_matmat(vlarge) / slarge if return_singular_vectors != 'vh' else None) vhlarge = _herm(vlarge) else: ularge = eigvec[:, above_cutoff] vhlarge = (_herm(X_matmat(ularge) / slarge) if return_singular_vectors != 'u' else None) u = (_augmented_orthonormal_cols(ularge, nsmall, random_state) if ularge is not None else None) vh = (_augmented_orthonormal_rows(vhlarge, nsmall, random_state) if vhlarge is not None else None) indexes_sorted = np.argsort(s) s = s[indexes_sorted] if u is not None: u = u[:, indexes_sorted] if vh is not None: vh = vh[indexes_sorted] return u, s, vh
def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None, maxiter=None, return_singular_vectors=True, solver='arpack', random_state=None, options=None): """ Partial singular value decomposition of a sparse matrix. Compute the largest or smallest `k` singular values and corresponding singular vectors of a sparse matrix `A`. The order in which the singular values are returned is not guaranteed. In the descriptions below, let ``M, N = A.shape``. Parameters ---------- A : ndarray, sparse matrix, or LinearOperator Matrix to decompose of a floating point numeric dtype. k : int, default: 6 Number of singular values and singular vectors to compute. Must satisfy ``1 <= k <= kmax``, where ``kmax=min(M, N)`` for ``solver='propack'`` and ``kmax=min(M, N) - 1`` otherwise. ncv : int, optional When ``solver='arpack'``, this is the number of Lanczos vectors generated. See :ref:`'arpack' <sparse.linalg.svds-arpack>` for details. When ``solver='lobpcg'`` or ``solver='propack'``, this parameter is ignored. tol : float, optional Tolerance for singular values. Zero (default) means machine precision. which : {'LM', 'SM'} Which `k` singular values to find: either the largest magnitude ('LM') or smallest magnitude ('SM') singular values. v0 : ndarray, optional The starting vector for iteration; see method-specific documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`, :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or :ref:`'propack' <sparse.linalg.svds-propack>` for details. maxiter : int, optional Maximum number of iterations; see method-specific documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`, :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or :ref:`'propack' <sparse.linalg.svds-propack>` for details. return_singular_vectors : {True, False, "u", "vh"} Singular values are always computed and returned; this parameter controls the computation and return of singular vectors. - ``True``: return singular vectors. - ``False``: do not return singular vectors. - ``"u"``: if ``M <= N``, compute only the left singular vectors and return ``None`` for the right singular vectors. Otherwise, compute all singular vectors. - ``"vh"``: if ``M > N``, compute only the right singular vectors and return ``None`` for the left singular vectors. Otherwise, compute all singular vectors. If ``solver='propack'``, the option is respected regardless of the matrix shape. solver : {'arpack', 'propack', 'lobpcg'}, optional The solver used. :ref:`'arpack' <sparse.linalg.svds-arpack>`, :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`, and :ref:`'propack' <sparse.linalg.svds-propack>` are supported. Default: `'arpack'`. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional Pseudorandom number generator state used to generate resamples. If `random_state` is ``None`` (or `np.random`), the `numpy.random.RandomState` singleton is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with `random_state`. If `random_state` is already a ``Generator`` or ``RandomState`` instance then that instance is used. options : dict, optional A dictionary of solver-specific options. No solver-specific options are currently supported; this parameter is reserved for future use. Returns ------- u : ndarray, shape=(M, k) Unitary matrix having left singular vectors as columns. s : ndarray, shape=(k,) The singular values. vh : ndarray, shape=(k, N) Unitary matrix having right singular vectors as rows. Notes ----- This is a naive implementation using ARPACK or LOBPCG as an eigensolver on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more efficient, followed by the Rayleigh-Ritz method as postprocessing; see https://w.wiki/4zms Alternatively, the PROPACK solver can be called. ``form="array"`` Choices of the input matrix ``A`` numeric dtype may be limited. Only ``solver="lobpcg"`` supports all floating point dtypes real: 'np.single', 'np.double', 'np.longdouble' and complex: 'np.csingle', 'np.cdouble', 'np.clongdouble'. The ``solver="arpack"`` supports only 'np.single', 'np.double', and 'np.cdouble'. Examples -------- Construct a matrix ``A`` from singular values and vectors. >>> from scipy.stats import ortho_group >>> from scipy.sparse import csc_matrix, diags >>> from scipy.sparse.linalg import svds >>> rng = np.random.default_rng() >>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng)) >>> s = [0.0001, 0.001, 3, 4, 5] # singular values >>> u = orthogonal[:, :5] # left singular vectors >>> vT = orthogonal[:, 5:].T # right singular vectors >>> A = u @ diags(s) @ vT With only three singular values/vectors, the SVD approximates the original matrix. >>> u2, s2, vT2 = svds(A, k=3) >>> A2 = u2 @ np.diag(s2) @ vT2 >>> np.allclose(A2, A.toarray(), atol=1e-3) True With all five singular values/vectors, we can reproduce the original matrix. >>> u3, s3, vT3 = svds(A, k=5) >>> A3 = u3 @ np.diag(s3) @ vT3 >>> np.allclose(A3, A.toarray()) True The singular values match the expected singular values, and the singular vectors are as expected up to a difference in sign. >>> (np.allclose(s3, s) and ... np.allclose(np.abs(u3), np.abs(u.toarray())) and ... np.allclose(np.abs(vT3), np.abs(vT.toarray()))) True The singular vectors are also orthogonal. >>> (np.allclose(u3.T @ u3, np.eye(5)) and ... np.allclose(vT3 @ vT3.T, np.eye(5))) True """ rs_was_None = random_state is None # avoid changing v0 for arpack/lobpcg args = _iv(A, k, ncv, tol, which, v0, maxiter, return_singular_vectors, solver, random_state) (A, k, ncv, tol, which, v0, maxiter, return_singular_vectors, solver, random_state) = args largest = (which == 'LM') n, m = A.shape if n > m: X_dot = A.matvec X_matmat = A.matmat XH_dot = A.rmatvec XH_mat = A.rmatmat transpose = False else: X_dot = A.rmatvec X_matmat = A.rmatmat XH_dot = A.matvec XH_mat = A.matmat transpose = True dtype = getattr(A, 'dtype', None) if dtype is None: dtype = A.dot(np.zeros([m, 1])).dtype def matvec_XH_X(x): return XH_dot(X_dot(x)) def matmat_XH_X(x): return XH_mat(X_matmat(x)) XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype, matmat=matmat_XH_X, shape=(min(A.shape), min(A.shape))) # Get a low rank approximation of the implicitly defined gramian matrix. # This is not a stable way to approach the problem. if solver == 'lobpcg': if k == 1 and v0 is not None: X = np.reshape(v0, (-1, 1)) else: if rs_was_None: X = np.random.RandomState(52).randn(min(A.shape), k) else: X = random_state.uniform(size=(min(A.shape), k)) _, eigvec = lobpcg(XH_X, X, tol=tol**2, maxiter=maxiter, largest=largest) # lobpcg does not guarantee exactly orthonormal eigenvectors eigvec, _ = np.linalg.qr(eigvec) elif solver == 'propack': if not HAS_PROPACK: raise ValueError("`solver='propack'` is opt-in due " "to potential issues on Windows, " "it can be enabled by setting the " "`SCIPY_USE_PROPACK` environment " "variable before importing scipy") jobu = return_singular_vectors in {True, 'u'} jobv = return_singular_vectors in {True, 'vh'} irl_mode = (which == 'SM') res = _svdp(A, k=k, tol=tol**2, which=which, maxiter=None, compute_u=jobu, compute_v=jobv, irl_mode=irl_mode, kmax=maxiter, v0=v0, random_state=random_state) u, s, vh, _ = res # but we'll ignore bnd, the last output # PROPACK order appears to be largest first. `svds` output order is not # guaranteed, according to documentation, but for ARPACK and LOBPCG # they actually are ordered smallest to largest, so reverse for # consistency. s = s[::-1] u = u[:, ::-1] vh = vh[::-1] u = u if jobu else None vh = vh if jobv else None if return_singular_vectors: return u, s, vh else: return s elif solver == 'arpack' or solver is None: if v0 is None and not rs_was_None: v0 = random_state.uniform(size=(min(A.shape), )) _, eigvec = eigsh(XH_X, k=k, tol=tol**2, maxiter=maxiter, ncv=ncv, which=which, v0=v0) Av = X_matmat(eigvec) if not return_singular_vectors: s = svd(Av, compute_uv=False, overwrite_a=True) return s[::-1] # compute the left singular vectors of X and update the right ones # accordingly u, s, vh = svd(Av, full_matrices=False, overwrite_a=True) u = u[:, ::-1] s = s[::-1] vh = vh[::-1] jobu = return_singular_vectors in {True, 'u'} jobv = return_singular_vectors in {True, 'vh'} if transpose: u_tmp = eigvec @ _herm(vh) if jobu else None vh = _herm(u) if jobv else None u = u_tmp else: if not jobu: u = None vh = vh @ _herm(eigvec) if jobv else None return u, s, vh
from scipy.sparse.linalg import splu from scipy.sparse.linalg._isolve import gcrotmk, gmres Am = csr_matrix( array([[-2, 1, 0, 0, 0, 9], [1, -2, 1, 0, 5, 0], [0, 1, -2, 1, 0, 0], [0, 0, 1, -2, 1, 0], [0, 3, 0, 1, -2, 1], [1, 0, 0, 0, 1, -2]])) b = array([1, 2, 3, 4, 5, 6]) count = [0] def matvec(v): count[0] += 1 return Am @ v A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype) def do_solve(**kw): count[0] = 0 with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw) count_0 = count[0] assert_(allclose(A @ x0, b, rtol=1e-12, atol=1e-12), norm(A @ x0 - b)) return x0, count_0 class TestGCROTMK: def test_preconditioner(self): # Check that preconditioning works