def test_dtypes_of_operator_sum(): # gh-6078 mat_complex = np.random.rand(2, 2) + 1j * np.random.rand(2, 2) mat_real = np.random.rand(2, 2) complex_operator = interface.aslinearoperator(mat_complex) real_operator = interface.aslinearoperator(mat_real) sum_complex = complex_operator + complex_operator sum_real = real_operator + real_operator assert_equal(sum_real.dtype, np.float64) assert_equal(sum_complex.dtype, np.complex128)
def testComplexX0(self): A = 4 * eye(self.n) + ones((self.n, self.n)) xtrue = transpose(arange(self.n, 0, -1)) b = aslinearoperator(A).matvec(xtrue) x0 = zeros(self.n, dtype=complex) x = lsmr(A, b, x0=x0)[0] assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
def setup_method(self): self.n = 10 self.A = lowerBidiagonalMatrix(20, self.n) self.xtrue = transpose(arange(self.n, 0, -1)) self.Afun = aslinearoperator(self.A) self.b = self.Afun.matvec(self.xtrue) self.x0 = ones(self.n) self.x00 = self.x0.copy() self.returnValues = lsmr(self.A, self.b) self.returnValuesX0 = lsmr(self.A, self.b, x0=self.x0)
def test_transpose_noconjugate(): X = np.array([[1j]]) A = interface.aslinearoperator(X) B = 1j * A Y = 1j * X v = np.array([1]) assert_equal(B.dot(v), Y.dot(v)) assert_equal(B.T.dot(v), Y.T.dot(v))
def test_adjoint_conjugate(): X = np.array([[1j]]) A = interface.aslinearoperator(X) B = 1j * A Y = 1j * X v = np.array([1]) assert_equal(B.dot(v), Y.dot(v)) assert_equal(B.H.dot(v), Y.T.conj().dot(v))
def test_dot(self): for M, A_array in self.cases: A = interface.aslinearoperator(M) M, N = A.shape x0 = np.array([1, 2, 3]) x1 = np.array([[1], [2], [3]]) x2 = np.array([[1, 4], [2, 5], [3, 6]]) assert_equal(A.dot(x0), A_array.dot(x0)) assert_equal(A.dot(x1), A_array.dot(x1)) assert_equal(A.dot(x2), A_array.dot(x2))
def test_attributes(): A = interface.aslinearoperator(np.arange(16).reshape(4, 4)) def always_four_ones(x): x = np.asarray(x) assert_(x.shape == (3, ) or x.shape == (3, 1)) return np.ones(4) B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones) for op in [A, B, A * B, A.H, A + A, B + B, A**4]: assert_(hasattr(op, "dtype")) assert_(hasattr(op, "shape")) assert_(hasattr(op, "_matvec"))
def test_basic(self): for M, A_array in self.cases: A = interface.aslinearoperator(M) M, N = A.shape xs = [np.array([1, 2, 3]), np.array([[1], [2], [3]])] ys = [np.array([1, 2]), np.array([[1], [2]])] if A.dtype == np.complex_: xs += [np.array([1, 2j, 3j]), np.array([[1], [2j], [3j]])] ys += [np.array([1, 2j]), np.array([[1], [2j]])] x2 = np.array([[1, 4], [2, 5], [3, 6]]) for x in xs: assert_equal(A.matvec(x), A_array.dot(x)) assert_equal(A * x, A_array.dot(x)) assert_equal(A.matmat(x2), A_array.dot(x2)) assert_equal(A * x2, A_array.dot(x2)) for y in ys: assert_equal(A.rmatvec(y), A_array.T.conj().dot(y)) assert_equal(A.T.matvec(y), A_array.T.dot(y)) assert_equal(A.H.matvec(y), A_array.T.conj().dot(y)) for y in ys: if y.ndim < 2: continue assert_equal(A.rmatmat(y), A_array.T.conj().dot(y)) assert_equal(A.T.matmat(y), A_array.T.dot(y)) assert_equal(A.H.matmat(y), A_array.T.conj().dot(y)) if hasattr(M, 'dtype'): assert_equal(A.dtype, M.dtype) assert_(hasattr(A, 'args'))
def lsmrtest(m, n, damp): """Verbose testing of lsmr""" A = lowerBidiagonalMatrix(m, n) xtrue = arange(n, 0, -1, dtype=float) Afun = aslinearoperator(A) b = Afun.matvec(xtrue) atol = 1.0e-7 btol = 1.0e-7 conlim = 1.0e+10 itnlim = 10 * n show = 1 x, istop, itn, normr, normar, norma, conda, normx \ = lsmr(A, b, damp, atol, btol, conlim, itnlim, show) j1 = min(n, 5) j2 = max(n - 4, 1) print(' ') print('First elements of x:') str = ['%10.4f' % (xi) for xi in x[0:j1]] print(''.join(str)) print(' ') print('Last elements of x:') str = ['%10.4f' % (xi) for xi in x[j2 - 1:]] print(''.join(str)) r = b - Afun.matvec(x) r2 = sqrt(norm(r)**2 + (damp * norm(x))**2) print(' ') str = 'normr (est.) %17.10e' % (normr) str2 = 'normr (true) %17.10e' % (r2) print(str) print(str2) print(' ')
def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8, maxiter=None, show=False, x0=None): """Iterative solver for least-squares problems. lsmr solves the system of linear equations ``Ax = b``. If the system is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``. ``A`` is a rectangular matrix of dimension m-by-n, where all cases are allowed: m = n, m > n, or m < n. ``b`` is a vector of length m. The matrix A may be dense or sparse (usually sparse). Parameters ---------- A : {sparse matrix, ndarray, LinearOperator} Matrix A in the linear system. Alternatively, ``A`` can be a linear operator which can produce ``Ax`` and ``A^H x`` using, e.g., ``scipy.sparse.linalg.LinearOperator``. b : array_like, shape (m,) Vector ``b`` in the linear system. damp : float Damping factor for regularized least-squares. `lsmr` solves the regularized least-squares problem:: min ||(b) - ( A )x|| ||(0) (damp*I) ||_2 where damp is a scalar. If damp is None or 0, the system is solved without regularization. Default is 0. atol, btol : float, optional Stopping tolerances. `lsmr` continues iterations until a certain backward error estimate is smaller than some quantity depending on atol and btol. Let ``r = b - Ax`` be the residual vector for the current approximate solution ``x``. If ``Ax = b`` seems to be consistent, `lsmr` terminates when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``. Otherwise, `lsmr` terminates when ``norm(A^H r) <= atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (default), the final ``norm(r)`` should be accurate to about 6 digits. (The final ``x`` will usually have fewer correct digits, depending on ``cond(A)`` and the size of LAMBDA.) If `atol` or `btol` is None, a default value of 1.0e-6 will be used. Ideally, they should be estimates of the relative error in the entries of ``A`` and ``b`` respectively. For example, if the entries of ``A`` have 7 correct digits, set ``atol = 1e-7``. This prevents the algorithm from doing unnecessary work beyond the uncertainty of the input data. conlim : float, optional `lsmr` terminates if an estimate of ``cond(A)`` exceeds `conlim`. For compatible systems ``Ax = b``, conlim could be as large as 1.0e+12 (say). For least-squares problems, `conlim` should be less than 1.0e+8. If `conlim` is None, the default value is 1e+8. Maximum precision can be obtained by setting ``atol = btol = conlim = 0``, but the number of iterations may then be excessive. Default is 1e8. maxiter : int, optional `lsmr` terminates if the number of iterations reaches `maxiter`. The default is ``maxiter = min(m, n)``. For ill-conditioned systems, a larger value of `maxiter` may be needed. Default is False. show : bool, optional Print iterations logs if ``show=True``. Default is False. x0 : array_like, shape (n,), optional Initial guess of ``x``, if None zeros are used. Default is None. .. versionadded:: 1.0.0 Returns ------- x : ndarray of float Least-square solution returned. istop : int istop gives the reason for stopping:: istop = 0 means x=0 is a solution. If x0 was given, then x=x0 is a solution. = 1 means x is an approximate solution to A@x = B, according to atol and btol. = 2 means x approximately solves the least-squares problem according to atol. = 3 means COND(A) seems to be greater than CONLIM. = 4 is the same as 1 with atol = btol = eps (machine precision) = 5 is the same as 2 with atol = eps. = 6 is the same as 3 with CONLIM = 1/eps. = 7 means ITN reached maxiter before the other stopping conditions were satisfied. itn : int Number of iterations used. normr : float ``norm(b-Ax)`` normar : float ``norm(A^H (b - Ax))`` norma : float ``norm(A)`` conda : float Condition number of A. normx : float ``norm(x)`` Notes ----- .. versionadded:: 0.11.0 References ---------- .. [1] D. C.-L. Fong and M. A. Saunders, "LSMR: An iterative algorithm for sparse least-squares problems", SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011. :arxiv:`1006.0758` .. [2] LSMR Software, https://web.stanford.edu/group/SOL/software/lsmr/ Examples -------- >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import lsmr >>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float) The first example has the trivial solution `[0, 0]` >>> b = np.array([0., 0., 0.], dtype=float) >>> x, istop, itn, normr = lsmr(A, b)[:4] >>> istop 0 >>> x array([0., 0.]) The stopping code `istop=0` returned indicates that a vector of zeros was found as a solution. The returned solution `x` indeed contains `[0., 0.]`. The next example has a non-trivial solution: >>> b = np.array([1., 0., -1.], dtype=float) >>> x, istop, itn, normr = lsmr(A, b)[:4] >>> istop 1 >>> x array([ 1., -1.]) >>> itn 1 >>> normr 4.440892098500627e-16 As indicated by `istop=1`, `lsmr` found a solution obeying the tolerance limits. The given solution `[1., -1.]` obviously solves the equation. The remaining return values include information about the number of iterations (`itn=1`) and the remaining difference of left and right side of the solved equation. The final example demonstrates the behavior in the case where there is no solution for the equation: >>> b = np.array([1., 0.01, -1.], dtype=float) >>> x, istop, itn, normr = lsmr(A, b)[:4] >>> istop 2 >>> x array([ 1.00333333, -0.99666667]) >>> A.dot(x)-b array([ 0.00333333, -0.00333333, 0.00333333]) >>> normr 0.005773502691896255 `istop` indicates that the system is inconsistent and thus `x` is rather an approximate solution to the corresponding least-squares problem. `normr` contains the minimal distance that was found. """ A = aslinearoperator(A) b = atleast_1d(b) if b.ndim > 1: b = b.squeeze() msg = ('The exact solution is x = 0, or x = x0, if x0 was given ', 'Ax - b is small enough, given atol, btol ', 'The least-squares solution is good enough, given atol ', 'The estimate of cond(Abar) has exceeded conlim ', 'Ax - b is small enough for this machine ', 'The least-squares solution is good enough for this machine', 'Cond(Abar) seems to be too large for this machine ', 'The iteration limit has been reached ') hdg1 = ' itn x(1) norm r norm Ar' hdg2 = ' compatible LS norm A cond A' pfreq = 20 # print frequency (for repeating the heading) pcount = 0 # print counter m, n = A.shape # stores the num of singular values minDim = min([m, n]) if maxiter is None: maxiter = minDim if x0 is None: dtype = result_type(A, b, float) else: dtype = result_type(A, b, x0, float) if show: print(' ') print('LSMR Least-squares solution of Ax = b\n') print(f'The matrix A has {m} rows and {n} columns') print('damp = %20.14e\n' % (damp)) print('atol = %8.2e conlim = %8.2e\n' % (atol, conlim)) print('btol = %8.2e maxiter = %8g\n' % (btol, maxiter)) u = b normb = norm(b) if x0 is None: x = zeros(n, dtype) beta = normb.copy() else: x = atleast_1d(x0.copy()) u = u - A.matvec(x) beta = norm(u) if beta > 0: u = (1 / beta) * u v = A.rmatvec(u) alpha = norm(v) else: v = zeros(n, dtype) alpha = 0 if alpha > 0: v = (1 / alpha) * v # Initialize variables for 1st iteration. itn = 0 zetabar = alpha * beta alphabar = alpha rho = 1 rhobar = 1 cbar = 1 sbar = 0 h = v.copy() hbar = zeros(n, dtype) # Initialize variables for estimation of ||r||. betadd = beta betad = 0 rhodold = 1 tautildeold = 0 thetatilde = 0 zeta = 0 d = 0 # Initialize variables for estimation of ||A|| and cond(A) normA2 = alpha * alpha maxrbar = 0 minrbar = 1e+100 normA = sqrt(normA2) condA = 1 normx = 0 # Items for use in stopping rules, normb set earlier istop = 0 ctol = 0 if conlim > 0: ctol = 1 / conlim normr = beta # Reverse the order here from the original matlab code because # there was an error on return when arnorm==0 normar = alpha * beta if normar == 0: if show: print(msg[0]) return x, istop, itn, normr, normar, normA, condA, normx if normb == 0: x = b return x, istop, itn, normr, normar, normA, condA, normx if show: print(' ') print(hdg1, hdg2) test1 = 1 test2 = alpha / beta str1 = '%6g %12.5e' % (itn, x[0]) str2 = ' %10.3e %10.3e' % (normr, normar) str3 = ' %8.1e %8.1e' % (test1, test2) print(''.join([str1, str2, str3])) # Main iteration loop. while itn < maxiter: itn = itn + 1 # Perform the next step of the bidiagonalization to obtain the # next beta, u, alpha, v. These satisfy the relations # beta*u = A@v - alpha*u, # alpha*v = A'@u - beta*v. u *= -alpha u += A.matvec(v) beta = norm(u) if beta > 0: u *= (1 / beta) v *= -beta v += A.rmatvec(u) alpha = norm(v) if alpha > 0: v *= (1 / alpha) # At this point, beta = beta_{k+1}, alpha = alpha_{k+1}. # Construct rotation Qhat_{k,2k+1}. chat, shat, alphahat = _sym_ortho(alphabar, damp) # Use a plane rotation (Q_i) to turn B_i to R_i rhoold = rho c, s, rho = _sym_ortho(alphahat, beta) thetanew = s * alpha alphabar = c * alpha # Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar rhobarold = rhobar zetaold = zeta thetabar = sbar * rho rhotemp = cbar * rho cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew) zeta = cbar * zetabar zetabar = -sbar * zetabar # Update h, h_hat, x. hbar *= -(thetabar * rho / (rhoold * rhobarold)) hbar += h x += (zeta / (rho * rhobar)) * hbar h *= -(thetanew / rho) h += v # Estimate of ||r||. # Apply rotation Qhat_{k,2k+1}. betaacute = chat * betadd betacheck = -shat * betadd # Apply rotation Q_{k,k+1}. betahat = c * betaacute betadd = -s * betaacute # Apply rotation Qtilde_{k-1}. # betad = betad_{k-1} here. thetatildeold = thetatilde ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar) thetatilde = stildeold * rhobar rhodold = ctildeold * rhobar betad = -stildeold * betad + ctildeold * betahat # betad = betad_k here. # rhodold = rhod_k here. tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold taud = (zeta - thetatilde * tautildeold) / rhodold d = d + betacheck * betacheck normr = sqrt(d + (betad - taud)**2 + betadd * betadd) # Estimate ||A||. normA2 = normA2 + beta * beta normA = sqrt(normA2) normA2 = normA2 + alpha * alpha # Estimate cond(A). maxrbar = max(maxrbar, rhobarold) if itn > 1: minrbar = min(minrbar, rhobarold) condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp) # Test for convergence. # Compute norms for convergence testing. normar = abs(zetabar) normx = norm(x) # Now use these norms to estimate certain other quantities, # some of which will be small near a solution. test1 = normr / normb if (normA * normr) != 0: test2 = normar / (normA * normr) else: test2 = infty test3 = 1 / condA t1 = test1 / (1 + normA * normx / normb) rtol = btol + atol * normA * normx / normb # The following tests guard against extremely small values of # atol, btol or ctol. (The user may have set any or all of # the parameters atol, btol, conlim to 0.) # The effect is equivalent to the normAl tests using # atol = eps, btol = eps, conlim = 1/eps. if itn >= maxiter: istop = 7 if 1 + test3 <= 1: istop = 6 if 1 + test2 <= 1: istop = 5 if 1 + t1 <= 1: istop = 4 # Allow for tolerances set by the user. if test3 <= ctol: istop = 3 if test2 <= atol: istop = 2 if test1 <= rtol: istop = 1 # See if it is time to print something. if show: if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \ (itn % 10 == 0) or (test3 <= 1.1 * ctol) or \ (test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \ (istop != 0): if pcount >= pfreq: pcount = 0 print(' ') print(hdg1, hdg2) pcount = pcount + 1 str1 = '%6g %12.5e' % (itn, x[0]) str2 = ' %10.3e %10.3e' % (normr, normar) str3 = ' %8.1e %8.1e' % (test1, test2) str4 = ' %8.1e %8.1e' % (normA, condA) print(''.join([str1, str2, str3, str4])) if istop > 0: break # Print the stopping condition. if show: print(' ') print('LSMR finished') print(msg[istop]) print('istop =%8g normr =%8.1e' % (istop, normr)) print(' normA =%8.1e normAr =%8.1e' % (normA, normar)) print('itn =%8g condA =%8.1e' % (itn, condA)) print(' normx =%8.1e' % (normx)) print(str1, str2) print(str3, str4) return x, istop, itn, normr, normar, normA, condA, normx
def test_ndim(): X = np.array([[1]]) A = interface.aslinearoperator(X) assert_equal(A.ndim, 2)
def assertCompatibleSystem(self, A, xtrue): Afun = aslinearoperator(A) b = Afun.matvec(xtrue) x = lsmr(A, b)[0] assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
def setup_method(self): self.cases = [] def make_cases(original, dtype): cases = [] cases.append((matrix(original, dtype=dtype), original)) cases.append((np.array(original, dtype=dtype), original)) cases.append((sparse.csr_matrix(original, dtype=dtype), original)) # Test default implementations of _adjoint and _rmatvec, which # refer to each other. def mv(x, dtype): y = original.dot(x) if len(x.shape) == 2: y = y.reshape(-1, 1) return y def rmv(x, dtype): return original.T.conj().dot(x) class BaseMatlike(interface.LinearOperator): args = () def __init__(self, dtype): self.dtype = np.dtype(dtype) self.shape = original.shape def _matvec(self, x): return mv(x, self.dtype) class HasRmatvec(BaseMatlike): args = () def _rmatvec(self, x): return rmv(x, self.dtype) class HasAdjoint(BaseMatlike): args = () def _adjoint(self): shape = self.shape[1], self.shape[0] matvec = partial(rmv, dtype=self.dtype) rmatvec = partial(mv, dtype=self.dtype) return interface.LinearOperator(matvec=matvec, rmatvec=rmatvec, dtype=self.dtype, shape=shape) class HasRmatmat(HasRmatvec): def _matmat(self, x): return original.dot(x) def _rmatmat(self, x): return original.T.conj().dot(x) cases.append((HasRmatvec(dtype), original)) cases.append((HasAdjoint(dtype), original)) cases.append((HasRmatmat(dtype), original)) return cases original = np.array([[1, 2, 3], [4, 5, 6]]) self.cases += make_cases(original, np.int32) self.cases += make_cases(original, np.float32) self.cases += make_cases(original, np.float64) self.cases += [(interface.aslinearoperator(M).T, A.T) for M, A in make_cases(original.T, np.float64)] self.cases += [(interface.aslinearoperator(M).H, A.T.conj()) for M, A in make_cases(original.T, np.float64)] original = np.array([[1, 2j, 3j], [4j, 5j, 6]]) self.cases += make_cases(original, np.complex_) self.cases += [(interface.aslinearoperator(M).T, A.T) for M, A in make_cases(original.T, np.complex_)] self.cases += [(interface.aslinearoperator(M).H, A.T.conj()) for M, A in make_cases(original.T, np.complex_)]
def make_system(A, M, x0, b): """Make a linear system Ax=b Parameters ---------- A : LinearOperator sparse or dense matrix (or any valid input to aslinearoperator) M : {LinearOperator, Nones} preconditioner sparse or dense matrix (or any valid input to aslinearoperator) x0 : {array_like, str, None} initial guess to iterative method. ``x0 = 'Mb'`` means using the nonzero initial guess ``M @ b``. Default is `None`, which means using the zero initial guess. b : array_like right hand side Returns ------- (A, M, x, b, postprocess) A : LinearOperator matrix of the linear system M : LinearOperator preconditioner x : rank 1 ndarray initial guess b : rank 1 ndarray right hand side postprocess : function converts the solution vector to the appropriate type and dimensions (e.g. (N,1) matrix) """ A_ = A A = aslinearoperator(A) if A.shape[0] != A.shape[1]: raise ValueError(f'expected square matrix, but got shape={(A.shape,)}') N = A.shape[0] b = asanyarray(b) if not (b.shape == (N, 1) or b.shape == (N, )): raise ValueError(f'shapes of A {A.shape} and b {b.shape} are ' 'incompatible') if b.dtype.char not in 'fdFD': b = b.astype('d') # upcast non-FP types to double def postprocess(x): return x if hasattr(A, 'dtype'): xtype = A.dtype.char else: xtype = A.matvec(b).dtype.char xtype = coerce(xtype, b.dtype.char) b = asarray(b, dtype=xtype) # make b the same type as x b = b.ravel() # process preconditioner if M is None: if hasattr(A_, 'psolve'): psolve = A_.psolve else: psolve = id if hasattr(A_, 'rpsolve'): rpsolve = A_.rpsolve else: rpsolve = id if psolve is id and rpsolve is id: M = IdentityOperator(shape=A.shape, dtype=A.dtype) else: M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve, dtype=A.dtype) else: M = aslinearoperator(M) if A.shape != M.shape: raise ValueError('matrix and preconditioner have different shapes') # set initial guess if x0 is None: x = zeros(N, dtype=xtype) elif isinstance(x0, str): if x0 == 'Mb': # use nonzero initial guess ``M @ b`` bCopy = b.copy() x = M.matvec(bCopy) else: x = array(x0, dtype=xtype) if not (x.shape == (N, 1) or x.shape == (N, )): raise ValueError(f'shapes of A {A.shape} and ' f'x0 {x.shape} are incompatible') x = x.ravel() return A, M, x, b, postprocess
def _iv(A, k, ncv, tol, which, v0, maxiter, return_singular, solver, random_state): # input validation/standardization for `solver` # out of order because it's needed for other parameters solver = str(solver).lower() solvers = {"arpack", "lobpcg", "propack"} if solver not in solvers: raise ValueError(f"solver must be one of {solvers}.") # input validation/standardization for `A` A = aslinearoperator(A) # this takes care of some input validation if not (np.issubdtype(A.dtype, np.complexfloating) or np.issubdtype(A.dtype, np.floating)): message = "`A` must be of floating or complex floating data type." raise ValueError(message) if np.prod(A.shape) == 0: message = "`A` must not be empty." raise ValueError(message) # input validation/standardization for `k` kmax = min(A.shape) if solver == 'propack' else min(A.shape) - 1 if int(k) != k or not (0 < k <= kmax): message = "`k` must be an integer satisfying `0 < k < min(A.shape)`." raise ValueError(message) k = int(k) # input validation/standardization for `ncv` if solver == "arpack" and ncv is not None: if int(ncv) != ncv or not (k < ncv < min(A.shape)): message = ("`ncv` must be an integer satisfying " "`k < ncv < min(A.shape)`.") raise ValueError(message) ncv = int(ncv) # input validation/standardization for `tol` if tol < 0 or not np.isfinite(tol): message = "`tol` must be a non-negative floating point value." raise ValueError(message) tol = float(tol) # input validation/standardization for `which` which = str(which).upper() whichs = {'LM', 'SM'} if which not in whichs: raise ValueError(f"`which` must be in {whichs}.") # input validation/standardization for `v0` if v0 is not None: v0 = np.atleast_1d(v0) if not (np.issubdtype(v0.dtype, np.complexfloating) or np.issubdtype(v0.dtype, np.floating)): message = ("`v0` must be of floating or complex floating " "data type.") raise ValueError(message) shape = (A.shape[0], ) if solver == 'propack' else (min(A.shape), ) if v0.shape != shape: message = "`v0` must have shape {shape}." raise ValueError(message) # input validation/standardization for `maxiter` if maxiter is not None and (int(maxiter) != maxiter or maxiter <= 0): message = "`maxiter` must be a positive integer." raise ValueError(message) maxiter = int(maxiter) if maxiter is not None else maxiter # input validation/standardization for `return_singular_vectors` # not going to be flexible with this; too complicated for little gain rs_options = {True, False, "vh", "u"} if return_singular not in rs_options: raise ValueError(f"`return_singular_vectors` must be in {rs_options}.") random_state = check_random_state(random_state) return (A, k, ncv, tol, which, v0, maxiter, return_singular, solver, random_state)
def lsqr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8, iter_lim=None, show=False, calc_var=False, x0=None): """Find the least-squares solution to a large, sparse, linear system of equations. The function solves ``Ax = b`` or ``min ||Ax - b||^2`` or ``min ||Ax - b||^2 + d^2 ||x - x0||^2``. The matrix A may be square or rectangular (over-determined or under-determined), and may have any rank. :: 1. Unsymmetric equations -- solve Ax = b 2. Linear least squares -- solve Ax = b in the least-squares sense 3. Damped least squares -- solve ( A )*x = ( b ) ( damp*I ) ( damp*x0 ) in the least-squares sense Parameters ---------- A : {sparse matrix, ndarray, LinearOperator} Representation of an m-by-n matrix. Alternatively, ``A`` can be a linear operator which can produce ``Ax`` and ``A^T x`` using, e.g., ``scipy.sparse.linalg.LinearOperator``. b : array_like, shape (m,) Right-hand side vector ``b``. damp : float Damping coefficient. Default is 0. atol, btol : float, optional Stopping tolerances. `lsqr` continues iterations until a certain backward error estimate is smaller than some quantity depending on atol and btol. Let ``r = b - Ax`` be the residual vector for the current approximate solution ``x``. If ``Ax = b`` seems to be consistent, `lsqr` terminates when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``. Otherwise, `lsqr` terminates when ``norm(A^H r) <= atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (default), the final ``norm(r)`` should be accurate to about 6 digits. (The final ``x`` will usually have fewer correct digits, depending on ``cond(A)`` and the size of LAMBDA.) If `atol` or `btol` is None, a default value of 1.0e-6 will be used. Ideally, they should be estimates of the relative error in the entries of ``A`` and ``b`` respectively. For example, if the entries of ``A`` have 7 correct digits, set ``atol = 1e-7``. This prevents the algorithm from doing unnecessary work beyond the uncertainty of the input data. conlim : float, optional Another stopping tolerance. lsqr terminates if an estimate of ``cond(A)`` exceeds `conlim`. For compatible systems ``Ax = b``, `conlim` could be as large as 1.0e+12 (say). For least-squares problems, conlim should be less than 1.0e+8. Maximum precision can be obtained by setting ``atol = btol = conlim = zero``, but the number of iterations may then be excessive. Default is 1e8. iter_lim : int, optional Explicit limitation on number of iterations (for safety). show : bool, optional Display an iteration log. Default is False. calc_var : bool, optional Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``. x0 : array_like, shape (n,), optional Initial guess of x, if None zeros are used. Default is None. .. versionadded:: 1.0.0 Returns ------- x : ndarray of float The final solution. istop : int Gives the reason for termination. 1 means x is an approximate solution to Ax = b. 2 means x approximately solves the least-squares problem. itn : int Iteration number upon termination. r1norm : float ``norm(r)``, where ``r = b - Ax``. r2norm : float ``sqrt( norm(r)^2 + damp^2 * norm(x - x0)^2 )``. Equal to `r1norm` if ``damp == 0``. anorm : float Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``. acond : float Estimate of ``cond(Abar)``. arnorm : float Estimate of ``norm(A'@r - damp^2*(x - x0))``. xnorm : float ``norm(x)`` var : ndarray of float If ``calc_var`` is True, estimates all diagonals of ``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A + damp^2*I)^{-1}``. This is well defined if A has full column rank or ``damp > 0``. (Not sure what var means if ``rank(A) < n`` and ``damp = 0.``) Notes ----- LSQR uses an iterative method to approximate the solution. The number of iterations required to reach a certain accuracy depends strongly on the scaling of the problem. Poor scaling of the rows or columns of A should therefore be avoided where possible. For example, in problem 1 the solution is unaltered by row-scaling. If a row of A is very small or large compared to the other rows of A, the corresponding row of ( A b ) should be scaled up or down. In problems 1 and 2, the solution x is easily recovered following column-scaling. Unless better information is known, the nonzero columns of A should be scaled so that they all have the same Euclidean norm (e.g., 1.0). In problem 3, there is no freedom to re-scale if damp is nonzero. However, the value of damp should be assigned only after attention has been paid to the scaling of A. The parameter damp is intended to help regularize ill-conditioned systems, by preventing the true solution from being very large. Another aid to regularization is provided by the parameter acond, which may be used to terminate iterations before the computed solution becomes very large. If some initial estimate ``x0`` is known and if ``damp == 0``, one could proceed as follows: 1. Compute a residual vector ``r0 = b - A@x0``. 2. Use LSQR to solve the system ``A@dx = r0``. 3. Add the correction dx to obtain a final solution ``x = x0 + dx``. This requires that ``x0`` be available before and after the call to LSQR. To judge the benefits, suppose LSQR takes k1 iterations to solve A@x = b and k2 iterations to solve A@dx = r0. If x0 is "good", norm(r0) will be smaller than norm(b). If the same stopping tolerances atol and btol are used for each system, k1 and k2 will be similar, but the final solution x0 + dx should be more accurate. The only way to reduce the total work is to use a larger stopping tolerance for the second system. If some value btol is suitable for A@x = b, the larger value btol*norm(b)/norm(r0) should be suitable for A@dx = r0. Preconditioning is another way to reduce the number of iterations. If it is possible to solve a related system ``M@x = b`` efficiently, where M approximates A in some helpful way (e.g. M - A has low rank or its elements are small relative to those of A), LSQR may converge more rapidly on the system ``A@M(inverse)@z = b``, after which x can be recovered by solving M@x = z. If A is symmetric, LSQR should not be used! Alternatives are the symmetric conjugate-gradient method (cg) and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that applies to any symmetric A and will converge more rapidly than LSQR. If A is positive definite, there are other implementations of symmetric cg that require slightly less work per iteration than SYMMLQ (but will take the same number of iterations). References ---------- .. [1] C. C. Paige and M. A. Saunders (1982a). "LSQR: An algorithm for sparse linear equations and sparse least squares", ACM TOMS 8(1), 43-71. .. [2] C. C. Paige and M. A. Saunders (1982b). "Algorithm 583. LSQR: Sparse linear equations and least squares problems", ACM TOMS 8(2), 195-209. .. [3] M. A. Saunders (1995). "Solution of sparse rectangular systems using LSQR and CRAIG", BIT 35, 588-604. Examples -------- >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import lsqr >>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float) The first example has the trivial solution `[0, 0]` >>> b = np.array([0., 0., 0.], dtype=float) >>> x, istop, itn, normr = lsqr(A, b)[:4] >>> istop 0 >>> x array([ 0., 0.]) The stopping code `istop=0` returned indicates that a vector of zeros was found as a solution. The returned solution `x` indeed contains `[0., 0.]`. The next example has a non-trivial solution: >>> b = np.array([1., 0., -1.], dtype=float) >>> x, istop, itn, r1norm = lsqr(A, b)[:4] >>> istop 1 >>> x array([ 1., -1.]) >>> itn 1 >>> r1norm 4.440892098500627e-16 As indicated by `istop=1`, `lsqr` found a solution obeying the tolerance limits. The given solution `[1., -1.]` obviously solves the equation. The remaining return values include information about the number of iterations (`itn=1`) and the remaining difference of left and right side of the solved equation. The final example demonstrates the behavior in the case where there is no solution for the equation: >>> b = np.array([1., 0.01, -1.], dtype=float) >>> x, istop, itn, r1norm = lsqr(A, b)[:4] >>> istop 2 >>> x array([ 1.00333333, -0.99666667]) >>> A.dot(x)-b array([ 0.00333333, -0.00333333, 0.00333333]) >>> r1norm 0.005773502691896255 `istop` indicates that the system is inconsistent and thus `x` is rather an approximate solution to the corresponding least-squares problem. `r1norm` contains the norm of the minimal residual that was found. """ A = aslinearoperator(A) b = np.atleast_1d(b) if b.ndim > 1: b = b.squeeze() m, n = A.shape if iter_lim is None: iter_lim = 2 * n var = np.zeros(n) msg = ('The exact solution is x = 0 ', 'Ax - b is small enough, given atol, btol ', 'The least-squares solution is good enough, given atol ', 'The estimate of cond(Abar) has exceeded conlim ', 'Ax - b is small enough for this machine ', 'The least-squares solution is good enough for this machine', 'Cond(Abar) seems to be too large for this machine ', 'The iteration limit has been reached ') if show: print(' ') print('LSQR Least-squares solution of Ax = b') str1 = f'The matrix A has {m} rows and {n} columns' str2 = 'damp = %20.14e calc_var = %8g' % (damp, calc_var) str3 = 'atol = %8.2e conlim = %8.2e' % (atol, conlim) str4 = 'btol = %8.2e iter_lim = %8g' % (btol, iter_lim) print(str1) print(str2) print(str3) print(str4) itn = 0 istop = 0 ctol = 0 if conlim > 0: ctol = 1 / conlim anorm = 0 acond = 0 dampsq = damp**2 ddnorm = 0 res2 = 0 xnorm = 0 xxnorm = 0 z = 0 cs2 = -1 sn2 = 0 # Set up the first vectors u and v for the bidiagonalization. # These satisfy beta*u = b - A@x, alfa*v = A'@u. u = b bnorm = np.linalg.norm(b) if x0 is None: x = np.zeros(n) beta = bnorm.copy() else: x = np.asarray(x0) u = u - A.matvec(x) beta = np.linalg.norm(u) if beta > 0: u = (1 / beta) * u v = A.rmatvec(u) alfa = np.linalg.norm(v) else: v = x.copy() alfa = 0 if alfa > 0: v = (1 / alfa) * v w = v.copy() rhobar = alfa phibar = beta rnorm = beta r1norm = rnorm r2norm = rnorm # Reverse the order here from the original matlab code because # there was an error on return when arnorm==0 arnorm = alfa * beta if arnorm == 0: if show: print(msg[0]) return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var head1 = ' Itn x[0] r1norm r2norm ' head2 = ' Compatible LS Norm A Cond A' if show: print(' ') print(head1, head2) test1 = 1 test2 = alfa / beta str1 = '%6g %12.5e' % (itn, x[0]) str2 = ' %10.3e %10.3e' % (r1norm, r2norm) str3 = ' %8.1e %8.1e' % (test1, test2) print(str1, str2, str3) # Main iteration loop. while itn < iter_lim: itn = itn + 1 # Perform the next step of the bidiagonalization to obtain the # next beta, u, alfa, v. These satisfy the relations # beta*u = a@v - alfa*u, # alfa*v = A'@u - beta*v. u = A.matvec(v) - alfa * u beta = np.linalg.norm(u) if beta > 0: u = (1 / beta) * u anorm = sqrt(anorm**2 + alfa**2 + beta**2 + dampsq) v = A.rmatvec(u) - beta * v alfa = np.linalg.norm(v) if alfa > 0: v = (1 / alfa) * v # Use a plane rotation to eliminate the damping parameter. # This alters the diagonal (rhobar) of the lower-bidiagonal matrix. if damp > 0: rhobar1 = sqrt(rhobar**2 + dampsq) cs1 = rhobar / rhobar1 sn1 = damp / rhobar1 psi = sn1 * phibar phibar = cs1 * phibar else: # cs1 = 1 and sn1 = 0 rhobar1 = rhobar psi = 0. # Use a plane rotation to eliminate the subdiagonal element (beta) # of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix. cs, sn, rho = _sym_ortho(rhobar1, beta) theta = sn * alfa rhobar = -cs * alfa phi = cs * phibar phibar = sn * phibar tau = sn * phi # Update x and w. t1 = phi / rho t2 = -theta / rho dk = (1 / rho) * w x = x + t1 * w w = v + t2 * w ddnorm = ddnorm + np.linalg.norm(dk)**2 if calc_var: var = var + dk**2 # Use a plane rotation on the right to eliminate the # super-diagonal element (theta) of the upper-bidiagonal matrix. # Then use the result to estimate norm(x). delta = sn2 * rho gambar = -cs2 * rho rhs = phi - delta * z zbar = rhs / gambar xnorm = sqrt(xxnorm + zbar**2) gamma = sqrt(gambar**2 + theta**2) cs2 = gambar / gamma sn2 = theta / gamma z = rhs / gamma xxnorm = xxnorm + z**2 # Test for convergence. # First, estimate the condition of the matrix Abar, # and the norms of rbar and Abar'rbar. acond = anorm * sqrt(ddnorm) res1 = phibar**2 res2 = res2 + psi**2 rnorm = sqrt(res1 + res2) arnorm = alfa * abs(tau) # Distinguish between # r1norm = ||b - Ax|| and # r2norm = rnorm in current code # = sqrt(r1norm^2 + damp^2*||x - x0||^2). # Estimate r1norm from # r1norm = sqrt(r2norm^2 - damp^2*||x - x0||^2). # Although there is cancellation, it might be accurate enough. if damp > 0: r1sq = rnorm**2 - dampsq * xxnorm r1norm = sqrt(abs(r1sq)) if r1sq < 0: r1norm = -r1norm else: r1norm = rnorm r2norm = rnorm # Now use these norms to estimate certain other quantities, # some of which will be small near a solution. test1 = rnorm / bnorm test2 = arnorm / (anorm * rnorm + eps) test3 = 1 / (acond + eps) t1 = test1 / (1 + anorm * xnorm / bnorm) rtol = btol + atol * anorm * xnorm / bnorm # The following tests guard against extremely small values of # atol, btol or ctol. (The user may have set any or all of # the parameters atol, btol, conlim to 0.) # The effect is equivalent to the normal tests using # atol = eps, btol = eps, conlim = 1/eps. if itn >= iter_lim: istop = 7 if 1 + test3 <= 1: istop = 6 if 1 + test2 <= 1: istop = 5 if 1 + t1 <= 1: istop = 4 # Allow for tolerances set by the user. if test3 <= ctol: istop = 3 if test2 <= atol: istop = 2 if test1 <= rtol: istop = 1 if show: # See if it is time to print something. prnt = False if n <= 40: prnt = True if itn <= 10: prnt = True if itn >= iter_lim - 10: prnt = True # if itn%10 == 0: prnt = True if test3 <= 2 * ctol: prnt = True if test2 <= 10 * atol: prnt = True if test1 <= 10 * rtol: prnt = True if istop != 0: prnt = True if prnt: str1 = '%6g %12.5e' % (itn, x[0]) str2 = ' %10.3e %10.3e' % (r1norm, r2norm) str3 = ' %8.1e %8.1e' % (test1, test2) str4 = ' %8.1e %8.1e' % (anorm, acond) print(str1, str2, str3, str4) if istop != 0: break # End of iteration loop. # Print the stopping condition. if show: print(' ') print('LSQR finished') print(msg[istop]) print(' ') str1 = 'istop =%8g r1norm =%8.1e' % (istop, r1norm) str2 = 'anorm =%8.1e arnorm =%8.1e' % (anorm, arnorm) str3 = 'itn =%8g r2norm =%8.1e' % (itn, r2norm) str4 = 'acond =%8.1e xnorm =%8.1e' % (acond, xnorm) print(str1 + ' ' + str2) print(str3 + ' ' + str4) print(' ') return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
def testComplexB(self): A = 4 * eye(self.n) + ones((self.n, self.n)) xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j)) b = aslinearoperator(A).matvec(xtrue) x = lsmr(A, b)[0] assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)