Example #1
0
def test_dtypes_of_operator_sum():
    # gh-6078

    mat_complex = np.random.rand(2, 2) + 1j * np.random.rand(2, 2)
    mat_real = np.random.rand(2, 2)

    complex_operator = interface.aslinearoperator(mat_complex)
    real_operator = interface.aslinearoperator(mat_real)

    sum_complex = complex_operator + complex_operator
    sum_real = real_operator + real_operator

    assert_equal(sum_real.dtype, np.float64)
    assert_equal(sum_complex.dtype, np.complex128)
Example #2
0
def test_dtypes_of_operator_sum():
    # gh-6078

    mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2)
    mat_real = np.random.rand(2,2)

    complex_operator = interface.aslinearoperator(mat_complex)
    real_operator = interface.aslinearoperator(mat_real)

    sum_complex = complex_operator + complex_operator
    sum_real = real_operator + real_operator

    assert_equal(sum_real.dtype, np.float64)
    assert_equal(sum_complex.dtype, np.complex128)
Example #3
0
 def setUp(self):
     self.n = 10
     self.A = lowerBidiagonalMatrix(20,self.n)
     self.xtrue = transpose(arange(self.n,0,-1))
     self.Afun = aslinearoperator(self.A)
     self.b = self.Afun.matvec(self.xtrue)
     self.returnValues = lsmr(self.A,self.b)
Example #4
0
 def testComplexX0(self):
     A = 4 * eye(self.n) + ones((self.n, self.n))
     xtrue = transpose(arange(self.n, 0, -1))
     b = aslinearoperator(A).matvec(xtrue)
     x0 = zeros(self.n, dtype=complex)
     x = lsmr(A, b, x0=x0)[0]
     assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
Example #5
0
    def test_basic(self):

        for M in self.cases:
            A = interface.aslinearoperator(M)
            M, N = A.shape

            assert_equal(A.matvec(np.array([1, 2, 3])), [14, 32])
            assert_equal(A.matvec(np.array([[1], [2], [3]])), [[14], [32]])

            assert_equal(A * np.array([1, 2, 3]), [14, 32])
            assert_equal(A * np.array([[1], [2], [3]]), [[14], [32]])

            assert_equal(A.rmatvec(np.array([1, 2])), [9, 12, 15])
            assert_equal(A.rmatvec(np.array([[1], [2]])), [[9], [12], [15]])
            assert_equal(A.H.matvec(np.array([1, 2])), [9, 12, 15])
            assert_equal(A.H.matvec(np.array([[1], [2]])), [[9], [12], [15]])

            assert_equal(A.matmat(np.array([[1, 4], [2, 5], [3, 6]])),
                         [[14, 32], [32, 77]])

            assert_equal(A * np.array([[1, 4], [2, 5], [3, 6]]),
                         [[14, 32], [32, 77]])

            if hasattr(M, 'dtype'):
                assert_equal(A.dtype, M.dtype)
Example #6
0
 def testComplexX0(self):
     A = 4 * eye(self.n) + ones((self.n, self.n))
     xtrue = transpose(arange(self.n, 0, -1))
     b = aslinearoperator(A).matvec(xtrue)
     x0 = zeros(self.n, dtype=complex)
     x = lsmr(A, b, x0=x0)[0]
     assert_almost_equal(norm(x - xtrue), 0, decimal=5)
Example #7
0
    def test_basic(self):

        for M in self.cases:
            A = interface.aslinearoperator(M)
            M,N = A.shape

            assert_equal(A.matvec(np.array([1,2,3])), [14,32])
            assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])

            assert_equal(A * np.array([1,2,3]), [14,32])
            assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])

            assert_equal(A.rmatvec(np.array([1,2])), [9,12,15])
            assert_equal(A.rmatvec(np.array([[1],[2]])), [[9],[12],[15]])
            assert_equal(A.H.matvec(np.array([1,2])), [9,12,15])
            assert_equal(A.H.matvec(np.array([[1],[2]])), [[9],[12],[15]])

            assert_equal(
                    A.matmat(np.array([[1,4],[2,5],[3,6]])),
                    [[14,32],[32,77]])

            assert_equal(A * np.array([[1,4],[2,5],[3,6]]), [[14,32],[32,77]])

            if hasattr(M,'dtype'):
                assert_equal(A.dtype, M.dtype)
Example #8
0
    def test_basic(self):

        for M, A_array in self.cases:
            A = interface.aslinearoperator(M)
            M,N = A.shape

            xs = [np.array([1, 2, 3]),
                  np.array([[1], [2], [3]])]
            ys = [np.array([1, 2]), np.array([[1], [2]])]

            if A.dtype == np.complex_:
                xs += [np.array([1, 2j, 3j]),
                       np.array([[1], [2j], [3j]])]
                ys += [np.array([1, 2j]), np.array([[1], [2j]])]

            x2 = np.array([[1, 4], [2, 5], [3, 6]])

            for x in xs:
                assert_equal(A.matvec(x), A_array.dot(x))
                assert_equal(A * x, A_array.dot(x))

            assert_equal(A.matmat(x2), A_array.dot(x2))
            assert_equal(A * x2, A_array.dot(x2))

            for y in ys:
                assert_equal(A.rmatvec(y), A_array.T.conj().dot(y))
                assert_equal(A.T.matvec(y), A_array.T.dot(y))
                assert_equal(A.H.matvec(y), A_array.T.conj().dot(y))

            if hasattr(M,'dtype'):
                assert_equal(A.dtype, M.dtype)

            assert_(hasattr(A, 'args'))
Example #9
0
    def test(self):
        maxn = 15  # Dimension of square matrix to be solved
        # Use a PDP^-1 factorisation to construct matrix with known
        # eiegevalues/vectors. Used random eiegenvectors initially.
        P = np.mat(np.random.random((maxn, ) * 2))
        P /= map(np.linalg.norm, P.T)  # Normalise the eigenvectors
        D = np.mat(np.zeros((maxn, ) * 2))
        D[range(maxn),
          range(maxn)] = (np.arange(maxn, dtype=float) + 1) / np.sqrt(maxn)
        A = P * D * np.linalg.inv(P)
        vals = np.array(D.diagonal())[0]
        vecs = P
        uv_sortind = vals.argsort()
        vals = vals[uv_sortind]
        vecs = vecs[:, uv_sortind]

        A = aslinearoperator(A)
        matvec = A.matvec
        #= lambda x: np.asarray(A*x)[0]
        nev = 4
        eigvs = ARPACK_eigs(matvec, A.shape[0], nev=nev)
        calc_vals = eigvs[0]
        # Ensure the calculated eigenvectors have the same sign as the reference values
        calc_vecs = eigvs[1] / [np.sign(x[0]) for x in eigvs[1].T]
        assert_array_almost_equal(calc_vals, vals[0:nev], decimal=7)
        assert_array_almost_equal(calc_vecs,
                                  np.array(vecs)[:, 0:nev],
                                  decimal=7)
Example #10
0
    def test(self):
        maxn=15                # Dimension of square matrix to be solved
        # Use a PDP^-1 factorisation to construct matrix with known
        # eiegevalues/vectors. Used random eiegenvectors initially.
        P = np.mat(np.random.random((maxn,)*2))
        P /= map(np.linalg.norm, P.T)            # Normalise the eigenvectors
        D = np.mat(np.zeros((maxn,)*2))
        D[range(maxn), range(maxn)] = (np.arange(maxn, dtype=float)+1)/np.sqrt(maxn)
        A = P*D*np.linalg.inv(P)
        vals = np.array(D.diagonal())[0]
        vecs = P
        uv_sortind = vals.argsort()
        vals = vals[uv_sortind]
        vecs = vecs[:,uv_sortind]

        A=aslinearoperator(A)
        matvec = A.matvec
        #= lambda x: np.asarray(A*x)[0]
        nev=4
        eigvs = ARPACK_eigs(matvec, A.shape[0], nev=nev)
        calc_vals = eigvs[0]
        # Ensure the calculated eigenvectors have the same sign as the reference values
        calc_vecs = eigvs[1] / [np.sign(x[0]) for x in eigvs[1].T]
        assert_array_almost_equal(calc_vals, vals[0:nev], decimal=7)
        assert_array_almost_equal(calc_vecs,  np.array(vecs)[:,0:nev], decimal=7)
Example #11
0
 def setUp(self):
     self.n = 10
     self.A = lowerBidiagonalMatrix(20,self.n)
     self.xtrue = transpose(arange(self.n,0,-1))
     self.Afun = aslinearoperator(self.A)
     self.b = self.Afun.matvec(self.xtrue)
     self.returnValues = lsmr(self.A,self.b)
Example #12
0
 def setup_method(self):
     self.n = 10
     self.A = lowerBidiagonalMatrix(20, self.n)
     self.xtrue = transpose(arange(self.n, 0, -1))
     self.Afun = aslinearoperator(self.A)
     self.b = self.Afun.matvec(self.xtrue)
     self.x0 = ones(self.n)
     self.x00 = self.x0.copy()
     self.returnValues = lsmr(self.A, self.b)
     self.returnValuesX0 = lsmr(self.A, self.b, x0=self.x0)
Example #13
0
    def test_dot(self):

        for M in self.cases:
            A = interface.aslinearoperator(M)
            M, N = A.shape

            assert_equal(A.dot(np.array([1, 2, 3])), [14, 32])
            assert_equal(A.dot(np.array([[1], [2], [3]])), [[14], [32]])

            assert_equal(A.dot(np.array([[1, 4], [2, 5], [3, 6]])), [[14, 32], [32, 77]])
Example #14
0
def test_adjoint_conjugate():
    X = np.array([[1j]])
    A = interface.aslinearoperator(X)

    B = 1j * A
    Y = 1j * X

    v = np.array([1])

    assert_equal(B.dot(v), Y.dot(v))
    assert_equal(B.H.dot(v), Y.T.conj().dot(v))
Example #15
0
def test_adjoint_conjugate():
    X = np.array([[1j]])
    A = interface.aslinearoperator(X)

    B = 1j * A
    Y = 1j * X

    v = np.array([1])

    assert_equal(B.dot(v), Y.dot(v))
    assert_equal(B.H.dot(v), Y.T.conj().dot(v))
Example #16
0
    def test_dot(self):

        for M in self.cases:
            A = interface.aslinearoperator(M)
            M, N = A.shape

            assert_equal(A.dot(np.array([1, 2, 3])), [14, 32])
            assert_equal(A.dot(np.array([[1], [2], [3]])), [[14], [32]])

            assert_equal(A.dot(np.array([[1, 4], [2, 5], [3, 6]])),
                         [[14, 32], [32, 77]])
Example #17
0
def test_transpose_noconjugate():
    X = np.array([[1j]])
    A = interface.aslinearoperator(X)

    B = 1j * A
    Y = 1j * X

    v = np.array([1])

    assert_equal(B.dot(v), Y.dot(v))
    assert_equal(B.T.dot(v), Y.T.dot(v))
Example #18
0
    def test_dot(self):

        for M, A_array in self.cases:
            A = interface.aslinearoperator(M)
            M, N = A.shape

            x0 = np.array([1, 2, 3])
            x1 = np.array([[1], [2], [3]])
            x2 = np.array([[1, 4], [2, 5], [3, 6]])

            assert_equal(A.dot(x0), A_array.dot(x0))
            assert_equal(A.dot(x1), A_array.dot(x1))
            assert_equal(A.dot(x2), A_array.dot(x2))
Example #19
0
def test_attributes():
    A = interface.aslinearoperator(np.arange(16).reshape(4, 4))

    def always_four_ones(x):
        x = np.asarray(x)
        assert_(x.shape == (3,) or x.shape == (3, 1))
        return np.ones(4)

    B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones)

    for op in [A, B, A * B, A.H, A + A, B + B, A ** 4]:
        assert_(hasattr(op, "dtype"))
        assert_(hasattr(op, "shape"))
        assert_(hasattr(op, "_matvec"))
Example #20
0
def test_attributes():
    A = interface.aslinearoperator(np.arange(16).reshape(4, 4))

    def always_four_ones(x):
        x = np.asarray(x)
        assert_(x.shape == (3, ) or x.shape == (3, 1))
        return np.ones(4)

    B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones)

    for op in [A, B, A * B, A.H, A + A, B + B, A**4]:
        assert_(hasattr(op, "dtype"))
        assert_(hasattr(op, "shape"))
        assert_(hasattr(op, "_matvec"))
Example #21
0
def arpack_eigs(mtx, nev=1, which='SM'):
    """
    Calculate several eigenvalues and corresponding eigenvectors of a
    matrix using ARPACK from SciPy. The eigenvalues are sorted in
    ascending order.
    """
    from scipy.sparse.linalg.interface import aslinearoperator
    from scipy.sparse.linalg.eigen.arpack import speigs

    matvec = aslinearoperator(mtx).matvec
    eigs, vecs = speigs.ARPACK_eigs(matvec, mtx.shape[0], nev=nev, which=which)

    ii = nm.argsort(eigs)
    eigs = eigs[ii]
    vecs = vecs[:,ii]

    return eigs, vecs
Example #22
0
def lsmrtest(m, n, damp):
    """Verbose testing of lsmr"""

    A = lowerBidiagonalMatrix(m,n)
    xtrue = arange(n,0,-1, dtype=float)
    Afun = aslinearoperator(A)

    b = Afun.matvec(xtrue)

    atol = 1.0e-7
    btol = 1.0e-7
    conlim = 1.0e+10
    itnlim = 10*n
    show = 1

    x, istop, itn, normr, normar, norma, conda, normx \
      = lsmr(A, b, damp, atol, btol, conlim, itnlim, show)

    j1 = min(n,5)
    j2 = max(n-4,1)
    print(' ')
    print('First elements of x:')
    str = ['%10.4f' % (xi) for xi in x[0:j1]]
    print(''.join(str))
    print(' ')
    print('Last  elements of x:')
    str = ['%10.4f' % (xi) for xi in x[j2-1:]]
    print(''.join(str))

    r = b - Afun.matvec(x)
    r2 = sqrt(norm(r)**2 + (damp*norm(x))**2)
    print(' ')
    str = 'normr (est.)  %17.10e' % (normr)
    str2 = 'normr (true)  %17.10e' % (r2)
    print(str)
    print(str2)
    print(' ')
Example #23
0
def svd(A, k=6):
    """Compute a few singular values/vectors for a sparse matrix using ARPACK.

    Parameters
    ----------
    A: sparse matrix
        Array to compute the SVD on.
    k: int
        Number of singular values and vectors to compute.

    Note
    ----
    This is a naive implementation using the symmetric eigensolver on A.T * A
    or A * A.T, depending on which one is more efficient.

    Complex support is not implemented yet
    """
    # TODO: implement complex support once ARPACK-based eigen_hermitian is
    # available
    n, m = A.shape

    if np.iscomplexobj(A):
        raise NotImplementedError("Complex support for sparse SVD not "
                                  "implemented yet")
        op = lambda x: x.T.conjugate()
    else:
        op = lambda x: x.T

    tp = A.dtype.char
    linear_at = aslinearoperator(op(A))
    linear_a = aslinearoperator(A)

    def _left(x, sz):
        x = csc_matrix(x)

        matvec = lambda x: linear_at.matvec(linear_a.matvec(x))
        params = _SymmetricArpackParams(sz, k, tp, matvec)

        while not params.converged:
            params.iterate()
        eigvals, eigvec = params.extract(True)
        s = np.sqrt(eigvals)

        v = eigvec
        u = (x * v) / s
        return u, s, op(v)

    def _right(x, sz):
        x = csr_matrix(x)

        matvec = lambda x: linear_a.matvec(linear_at.matvec(x))
        params = _SymmetricArpackParams(sz, k, tp, matvec)

        while not params.converged:
            params.iterate()
        eigvals, eigvec = params.extract(True)

        s = np.sqrt(eigvals)

        u = eigvec
        vh = (op(u) * x) / s[:, None]
        return u, s, vh

    if n > m:
        return _left(A, m)
    else:
        return _right(A, n)
Example #24
0
 def assertCompatibleSystem(self, A, xtrue):
     Afun = aslinearoperator(A)
     b = Afun.matvec(xtrue)
     x = lsmr(A, b)[0]
     assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
Example #25
0
def eigsh(A,
          k=6,
          M=None,
          sigma=None,
          which='LM',
          v0=None,
          ncv=None,
          maxiter=None,
          tol=0,
          return_eigenvectors=True,
          Minv=None,
          OPinv=None,
          mode='normal',
          lock=None,
          return_stats=False,
          maxBlockSize=0,
          minRestartSize=0,
          maxPrevRetain=0,
          method=None):
    """
    Find k eigenvalues and eigenvectors of the real symmetric square matrix
    or complex Hermitian matrix A.

    Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
    w[i] eigenvalues with corresponding eigenvectors x[i].

    If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
    generalized eigenvalue problem for w[i] eigenvalues
    with corresponding eigenvectors x[i]

    Parameters
    ----------
    A : An N x N matrix, array, sparse matrix, or LinearOperator representing
        the operation A * x, where A is a real symmetric matrix or complex
        Hermitian.
    k : int, optional
        The number of eigenvalues and eigenvectors desired.
        `k` must be smaller than N. It is not possible to compute all
        eigenvectors of a matrix.
    M : An N x N matrix, array, sparse matrix, or LinearOperator representing
        the operation M * x for the generalized eigenvalue problem

            A * x = w * M * x.

        M must represent a real, symmetric matrix if A is real, and must
        represent a complex, hermitian matrix if A is complex. For best
        results, the data type of M should be the same as that of A.
    sigma : real, optional
        Find eigenvalues near sigma.
    v0 : N x i, ndarray, optional
        Starting vectors for iteration.
    ncv : int, optional
        The maximum size of the basis
    which : str ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
        If A is a complex hermitian matrix, 'BE' is invalid.
        Which `k` eigenvectors and eigenvalues to find:

            'LM' : Largest (in magnitude) eigenvalues

            'SM' : Smallest (in magnitude) eigenvalues

            'LA' : Largest (algebraic) eigenvalues

            'SA' : Smallest (algebraic) eigenvalues

            'BE' : Half (k/2) from each end of the spectrum (not supported)

        When sigma != None, 'which' refers to the shifted eigenvalues ``w'[i]``
    maxiter : int, optional
        Maximum number of iterations.
    tol : float
        Accuracy for eigenvalues (stopping criterion).
        The default value is sqrt of machine precision.
    Minv : (not supported)
    OPinv : N x N matrix, array, sparse matrix, or LinearOperator
        Preconditioner to accelerate the convergence. Usually it is an
        approximation of the inverse of (A - sigma*M).
    return_eigenvectors : bool
        Return eigenvectors (True) in addition to eigenvalues
    mode : string ['normal' | 'buckling' | 'cayley']
        Only 'normal' mode is supported.
    lock : N x i, ndarray, optional
        Seek the eigenvectors orthogonal to these ones. The provided
        vectors *should* be orthonormal. Useful to not converge some already
        computed solutions.
    maxBlockSize : int, optional
        Maximum number of vectors added at every iteration.
    minRestartSize : int, optional
        Number of approximate eigenvectors kept from last iteration in restart.
    maxPrevRetain: int, optional
        Number of approximate eigenvectors kept from previous iteration in
        restart. Also referred as +k vectors in GD+k.
    method : int, optional
        Preset method, one of:

        - DEFAULT_MIN_TIME : a variant of JDQMR,
        - DEFAULT_MIN_MATVECS : GD+k
        - DYNAMIC : choose dynamically between both previous methods.

        See a detailed description of the methods and other possible values
        in [2]_.
        
    report_stats : bool, optional
        If True, it is also returned extra information from PRIMME.

    Returns
    -------
    w : array
        Array of k eigenvalues
    v : array
        An array representing the `k` eigenvectors.  The column ``v[:, i]`` is
        the eigenvector corresponding to the eigenvalue ``w[i]``.
    stats : dict, optional (if return_stats)
        Extra information reported by PRIMME:

        - "numOuterIterations": number of outer iterations
        - "numRestarts": number of restarts
        - "numMatvecs": number of A*v
        - "numPreconds": number of OPinv*v
        - "elapsedTime": time that took 
        - "estimateMinEVal": the leftmost Ritz value seen
        - "estimateMaxEVal": the rightmost Ritz value seen
        - "estimateLargestSVal": the largest singular value seen

    Raises
    ------
    PrimmeError
        When the requested convergence is not obtained.

        The PRIMME error code can be found as ``err`` attribute of the exception
        object.

    See Also
    --------
    scipy.sparse.linalg.eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
    Primme.svds : singular value decomposition for a matrix A

    Notes
    -----
    This function is a wrapper to PRIMME functions to find the eigenvalues and
    eigenvectors [1]_.

    References
    ----------
    .. [1] PRIMME Software, https://github.com/primme/primme
    .. [2] Preset Methods, http://www.cs.wm.edu/~andreas/software/doc/readme.html#preset-methods

    Examples
    --------
    >>> import Primme, scipy.sparse
    >>> A = scipy.sparse.spdiags(range(100), [0], 100, 100) # sparse diag. matrix
    >>> evals, evecs = Primme.eigsh(A, 3, tol=1e-6, which='LA')
    >>> evals # the three largest eigenvalues of A
    array([ 99.,  98.,  97.])
    >>> evals, evecs = Primme.eigsh(A, 3, tol=1e-6, which='LA', lock=evecs)
    >>> evals # the next three largest eigenvalues
    array([ 96.,  95.,  94.])
    """

    A = aslinearoperator(A)
    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
        raise ValueError('A: expected square matrix (shape=%s)' % (A.shape, ))

    if M is not None:
        raise ValueError('generalized problems (M != None) are not supported')

    class PP(PrimmeParams):
        def __init__(self):
            PrimmeParams.__init__(self)

        def matvec(self, X):
            return A.matmat(X)

        def prevec(self, X):
            return OPinv.matmat(X)

    pp = PP()

    pp.n = A.shape[0]

    if k <= 0 or k > pp.n:
        raise ValueError("k=%d must be between 1 and %d, the order of the "
                         "square input matrix." % (k, pp.n))
    pp.numEvals = k
    pp.correctionParams.precondition = 0 if OPinv is None else 1

    if which == 'LM':
        pp.target = primme_largest_abs
        if sigma is None:
            sigma = 0.0
    elif which == 'LA':
        pp.target = primme_largest
        sigma = None
    elif which == 'SA':
        pp.target = primme_smallest
        sigma = None
    elif which == 'SM':
        pp.target = primme_closest_abs
        if sigma is None:
            sigma = 0.0
    else:
        raise ValueError("which='%s' not supported" % which)

    if sigma is not None:
        pp.targetShifts = np.array([sigma], dtype=np.dtype('d'))

    pp.eps = tol

    if ncv is not None:
        pp.maxBasisSize = ncv

    if maxiter is not None:
        pp.maxMatvecs = maxiter

    if OPinv is not None:
        OPinv = aslinearoperator(OPinv)
        if OPinv.shape[0] != OPinv.shape[1] or OPinv.shape[0] != A.shape[0]:
            raise ValueError(
                'OPinv: expected square matrix with same shape as A (shape=%s)'
                % (OPinv.shape, ))
        pp.correctionParams.precondition = 1

    if lock is not None:
        if lock.shape[0] != pp.n:
            raise ValueError(
                'lock: expected matrix with the same columns as A (shape=%s)' %
                (lock.shape, ))
        pp.numOrthoConst = min(lock.shape[1], pp.n)

    if A.dtype.kind in frozenset(["b", "i", "u", "f"]):
        dtype = np.dtype("d")
    else:
        dtype = np.dtype("complex128")

    evals = np.zeros(pp.numEvals)
    norms = np.zeros(pp.numEvals)
    evecs = np.zeros((pp.n, pp.numOrthoConst + pp.numEvals), dtype, order='F')

    if lock is not None:
        np.copyto(evecs[:, 0:pp.numOrthoConst], lock[:, 0:pp.numOrthoConst])

    if v0 is not None:
        pp.initSize = min(v0.shape[1], pp.numEvals)
        np.copyto(evecs[:, pp.numOrthoConst:pp.numOrthoConst + pp.initSize],
                  v0[:, 0:pp.initSize])

    if maxBlockSize:
        pp.maxBlockSize = maxBlockSize

    if minRestartSize:
        pp.minRestartSize = minRestartSize

    if maxPrevRetain:
        pp.restartingParams.maxPrevRetain = maxPrevRetain

    if method is not None:
        pp.set_method(method)

    if dtype is np.dtype(np.complex128):
        err = zprimme(evals, evecs, norms, pp)
    else:
        err = dprimme(evals, evecs, norms, pp)

    if err != 0:
        raise PrimmeError(err)

    evecs = evecs[:, pp.numOrthoConst:]
    if return_stats:
        stats = dict((f, getattr(pp.stats, f)) for f in [
            "numOuterIterations", "numRestarts", "numMatvecs", "numPreconds",
            "elapsedTime", "estimateMinEVal", "estimateMaxEVal",
            "estimateLargestSVal"
        ])
        return evals, evecs, stats
    else:
        return evals, evecs
Example #26
0
def test_ndim():
    X = np.array([[1]])
    A = interface.aslinearoperator(X)
    assert_equal(A.ndim, 2)
Example #27
0
def eigen(A, k=6, M=None, sigma=None, which='LM', v0=None,
          ncv=None, maxiter=None, tol=0,
          return_eigenvectors=True):
    """Find k eigenvalues and eigenvectors of the square matrix A.

    Solves A * x[i] = w[i] * x[i], the standard eigenvalue problem for
    w[i] eigenvalues with corresponding eigenvectors x[i].


    Parameters
    ----------
    A : matrix, array, or object with matvec(x) method
        An N x N matrix, array, or an object with matvec(x) method to perform
        the matrix vector product A * x.  The sparse matrix formats
        in scipy.sparse are appropriate for A.

    k : integer
        The number of eigenvalues and eigenvectors desired

    Returns
    -------
    w : array
        Array of k eigenvalues

    v : array
        An array of k eigenvectors
        The v[i] is the eigenvector corresponding to the eigenvector w[i]

    Other Parameters
    ----------------

    M : matrix or array
        (Not implemented)
        A symmetric positive-definite matrix for the generalized
        eigenvalue problem A * x = w * M * x

    sigma : real or complex
        (Not implemented)
        Find eigenvalues near sigma.  Shift spectrum by sigma.

    v0 : array
        Starting vector for iteration.

    ncv : integer
        The number of Lanczos vectors generated
        ncv must be greater than k; it is recommended that ncv > 2*k

    which : string
        Which k eigenvectors and eigenvalues to find:
         - 'LM' : largest magnitude
         - 'SM' : smallest magnitude
         - 'LR' : largest real part
         - 'SR' : smallest real part
         - 'LI' : largest imaginary part
         - 'SI' : smallest imaginary part

    maxiter : integer
        Maximum number of Arnoldi update iterations allowed

    tol : float
        Relative accuracy for eigenvalues (stopping criterion)

    return_eigenvectors : boolean
        Return eigenvectors (True) in addition to eigenvalues

    See Also
    --------
    eigen_symmetric : eigenvalues and eigenvectors for symmetric matrix A

    Notes
    -----

    Examples
    --------

    """
    A = aslinearoperator(A)
    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix (shape=%s)' % A.shape)
    n = A.shape[0]

    # guess type
    typ = A.dtype.char
    if typ not in 'fdFD':
        raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")

    if M is not None:
        raise NotImplementedError("generalized eigenproblem not supported yet")
    if sigma is not None:
        raise NotImplementedError("shifted eigenproblem not supported yet")


    # some defaults
    if ncv is None:
        ncv=2*k+1
    ncv=min(ncv,n)
    if maxiter==None:
        maxiter=n*10
    # assign starting vector
    if v0 is not None:
        resid=v0
        info=1
    else:
        resid = np.zeros(n,typ)
        info=0


    # some sanity checks
    if k <= 0:
        raise ValueError("k must be positive, k=%d"%k)
    if k == n:
        raise ValueError("k must be less than rank(A), k=%d"%k)
    if maxiter <= 0:
        raise ValueError("maxiter must be positive, maxiter=%d"%maxiter)
    whiches=['LM','SM','LR','SR','LI','SI']
    if which not in whiches:
        raise ValueError("which must be one of %s"%' '.join(whiches))
    if ncv > n or ncv < k:
        raise ValueError("ncv must be k<=ncv<=n, ncv=%s"%ncv)

    # assign solver and postprocessor
    ltr = _type_conv[typ]
    eigsolver = _arpack.__dict__[ltr+'naupd']
    eigextract = _arpack.__dict__[ltr+'neupd']

    v = np.zeros((n,ncv),typ) # holds Ritz vectors
    workd = np.zeros(3*n,typ) # workspace
    workl = np.zeros(3*ncv*ncv+6*ncv,typ) # workspace
    iparam = np.zeros(11,'int') # problem parameters
    ipntr = np.zeros(14,'int') # pointers into workspaces
    ido = 0

    if typ in 'FD':
        rwork = np.zeros(ncv,typ.lower())

    # set solver mode and parameters
    # only supported mode is 1: Ax=lx
    ishfts = 1
    mode1 = 1
    bmat = 'I'
    iparam[0] = ishfts
    iparam[2] = maxiter
    iparam[6] = mode1

    while True:
        if typ in 'fd':
            ido,resid,v,iparam,ipntr,info =\
                eigsolver(ido,bmat,which,k,tol,resid,v,iparam,ipntr,
                          workd,workl,info)
        else:
            ido,resid,v,iparam,ipntr,info =\
                eigsolver(ido,bmat,which,k,tol,resid,v,iparam,ipntr,
                          workd,workl,rwork,info)

        xslice = slice(ipntr[0]-1, ipntr[0]-1+n)
        yslice = slice(ipntr[1]-1, ipntr[1]-1+n)
        if ido == -1:
            # initialization
            workd[yslice]=A.matvec(workd[xslice])
        elif ido == 1:
            # compute y=Ax
            workd[yslice]=A.matvec(workd[xslice])
        else:
            break

    if  info < -1 :
        raise RuntimeError("Error info=%d in arpack"%info)
        return None
    if info == -1:
        warnings.warn("Maximum number of iterations taken: %s"%iparam[2])
#    if iparam[3] != k:
#        warnings.warn("Only %s eigenvalues converged"%iparam[3])


    # now extract eigenvalues and (optionally) eigenvectors
    rvec = return_eigenvectors
    ierr = 0
    howmny = 'A' # return all eigenvectors
    sselect = np.zeros(ncv,'int') # unused
    sigmai = 0.0 # no shifts, not implemented
    sigmar = 0.0 # no shifts, not implemented
    workev = np.zeros(3*ncv,typ)

    if typ in 'fd':
        dr=np.zeros(k+1,typ)
        di=np.zeros(k+1,typ)
        zr=np.zeros((n,k+1),typ)
        dr,di,zr,info=\
            eigextract(rvec,howmny,sselect,sigmar,sigmai,workev,
                   bmat,which,k,tol,resid,v,iparam,ipntr,
                   workd,workl,info)

        # The ARPACK nonsymmetric real and double interface (s,d)naupd return
        # eigenvalues and eigenvectors in real (float,double) arrays.

        # Build complex eigenvalues from real and imaginary parts
        d=dr+1.0j*di

        # Arrange the eigenvectors: complex eigenvectors are stored as
        # real,imaginary in consecutive columns
        z=zr.astype(typ.upper())
        eps=np.finfo(typ).eps
        i=0
        while i<=k:
            # check if complex
            if abs(d[i].imag)>eps:
                # assume this is a complex conjugate pair with eigenvalues
                # in consecutive columns
                z[:,i]=zr[:,i]+1.0j*zr[:,i+1]
                z[:,i+1]=z[:,i].conjugate()
                i+=1
            i+=1

        # Now we have k+1 possible eigenvalues and eigenvectors
        # Return the ones specified by the keyword "which"
        nreturned=iparam[4] # number of good eigenvalues returned
        if nreturned==k:    # we got exactly how many eigenvalues we wanted
            d=d[:k]
            z=z[:,:k]
        else:   # we got one extra eigenvalue (likely a cc pair, but which?)
            # cut at approx precision for sorting
            rd=np.round(d,decimals=_ndigits[typ])
            if which in ['LR','SR']:
                ind=np.argsort(rd.real)
            elif which in ['LI','SI']:
                # for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
                ind=np.argsort(abs(rd.imag))
            else:
                ind=np.argsort(abs(rd))
            if which in ['LR','LM','LI']:
                d=d[ind[-k:]]
                z=z[:,ind[-k:]]
            if which in ['SR','SM','SI']:
                d=d[ind[:k]]
                z=z[:,ind[:k]]


    else:
        # complex is so much simpler...
        d,z,info =\
              eigextract(rvec,howmny,sselect,sigmar,workev,
                         bmat,which,k,tol,resid,v,iparam,ipntr,
                         workd,workl,rwork,ierr)



    if ierr != 0:
        raise RuntimeError("Error info=%d in arpack"%info)
        return None
    if return_eigenvectors:
        return d,z
    return d
Example #28
0
def eigen_symmetric(A, k=6, M=None, sigma=None, which='LM', v0=None,
                    ncv=None, maxiter=None, tol=0,
                    return_eigenvectors=True):
    """Find k eigenvalues and eigenvectors of the real symmetric
    square matrix A.

    Solves A * x[i] = w[i] * x[i], the standard eigenvalue problem for
    w[i] eigenvalues with corresponding eigenvectors x[i].


    Parameters
    ----------
    A : matrix or array with real entries or object with matvec(x) method
        An N x N real symmetric matrix or array or an object with matvec(x)
        method to perform the matrix vector product A * x.  The sparse
        matrix formats in scipy.sparse are appropriate for A.

    k : integer
        The number of eigenvalues and eigenvectors desired

    Returns
    -------
    w : array
        Array of k eigenvalues

    v : array
       An array of k eigenvectors
       The v[i] is the eigenvector corresponding to the eigenvector w[i]

    Other Parameters
    ----------------
    M : matrix or array
        (Not implemented)
        A symmetric positive-definite matrix for the generalized
        eigenvalue problem A * x = w * M * x


    sigma : real
        (Not implemented)
        Find eigenvalues near sigma.  Shift spectrum by sigma.

    v0 : array
        Starting vector for iteration.

    ncv : integer
        The number of Lanczos vectors generated
        ncv must be greater than k; it is recommended that ncv > 2*k

    which : string
        Which k eigenvectors and eigenvalues to find:
         - 'LA' : Largest (algebraic) eigenvalues
         - 'SA' : Smallest (algebraic) eigenvalues
         - 'LM' : Largest (in magnitude) eigenvalues
         - 'SM' : Smallest (in magnitude) eigenvalues
         - 'BE' : Half (k/2) from each end of the spectrum
                  When k is odd, return one more (k/2+1) from the high end

    maxiter : integer
        Maximum number of Arnoldi update iterations allowed

    tol : float
        Relative accuracy for eigenvalues (stopping criterion)

    return_eigenvectors : boolean
        Return eigenvectors (True) in addition to eigenvalues

    See Also
    --------
    eigen : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A

    Notes
    -----

    Examples
    --------
    """
    A = aslinearoperator(A)
    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
    n = A.shape[0]

    if M is not None:
        raise NotImplementedError("generalized eigenproblem not supported yet")

    matvec = lambda x : A.matvec(x)
    params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, sigma,
                           ncv, v0, maxiter, which, tol)

    while not params.converged:
        params.iterate()

    return params.extract(return_eigenvectors)
Example #29
0
def make_system(A, M, x0, b):
    """Make a linear system Ax=b

    Parameters
    ----------
    A : LinearOperator
        sparse or dense matrix (or any valid input to aslinearoperator)
    M : {LinearOperator, Nones}
        preconditioner
        sparse or dense matrix (or any valid input to aslinearoperator)
    x0 : {array_like, None}
        initial guess to iterative method
    b : array_like
        right hand side

    Returns
    -------
    (A, M, x, b, postprocess)
        A : LinearOperator
            matrix of the linear system
        M : LinearOperator
            preconditioner
        x : rank 1 ndarray
            initial guess
        b : rank 1 ndarray
            right hand side
        postprocess : function
            converts the solution vector to the appropriate
            type and dimensions (e.g. (N,1) matrix)

    """
    A_ = A
    A = aslinearoperator(A)

    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix, but got shape=%s' % (A.shape,))

    N = A.shape[0]

    b = asanyarray(b)

    if not (b.shape == (N,1) or b.shape == (N,)):
        raise ValueError('A and b have incompatible dimensions')

    if b.dtype.char not in 'fdFD':
        b = b.astype('d')  # upcast non-FP types to double

    def postprocess(x):
        if isinstance(b,matrix):
            x = asmatrix(x)
        return x.reshape(b.shape)

    if hasattr(A,'dtype'):
        xtype = A.dtype.char
    else:
        xtype = A.matvec(b).dtype.char
    xtype = coerce(xtype, b.dtype.char)

    b = asarray(b,dtype=xtype)  # make b the same type as x
    b = b.ravel()

    if x0 is None:
        x = zeros(N, dtype=xtype)
    else:
        x = array(x0, dtype=xtype)
        if not (x.shape == (N,1) or x.shape == (N,)):
            raise ValueError('A and x have incompatible dimensions')
        x = x.ravel()

    # process preconditioner
    if M is None:
        if hasattr(A_,'psolve'):
            psolve = A_.psolve
        else:
            psolve = id
        if hasattr(A_,'rpsolve'):
            rpsolve = A_.rpsolve
        else:
            rpsolve = id
        if psolve is id and rpsolve is id:
            M = IdentityOperator(shape=A.shape, dtype=A.dtype)
        else:
            M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,
                               dtype=A.dtype)
    else:
        M = aslinearoperator(M)
        if A.shape != M.shape:
            raise ValueError('matrix and preconditioner have different shapes')

    return A, M, x, b, postprocess
Example #30
0
def lsqr(A,
         b,
         damp=0.0,
         atol=1e-8,
         btol=1e-8,
         conlim=1e8,
         iter_lim=None,
         show=False,
         calc_var=False,
         x0=None):
    """Find the least-squares solution to a large, sparse, linear system
    of equations.

    The function solves ``Ax = b``  or  ``min ||b - Ax||^2`` or
    ``min ||Ax - b||^2 + d^2 ||x||^2``.

    The matrix A may be square or rectangular (over-determined or
    under-determined), and may have any rank.

    ::

      1. Unsymmetric equations --    solve  A*x = b

      2. Linear least squares  --    solve  A*x = b
                                     in the least-squares sense

      3. Damped least squares  --    solve  (   A    )*x = ( b )
                                            ( damp*I )     ( 0 )
                                     in the least-squares sense

    Parameters
    ----------
    A : {sparse matrix, ndarray, LinearOperator}
        Representation of an m-by-n matrix.
        Alternatively, ``A`` can be a linear operator which can
        produce ``Ax`` and ``A^T x`` using, e.g.,
        ``scipy.sparse.linalg.LinearOperator``.
    b : array_like, shape (m,)
        Right-hand side vector ``b``.
    damp : float
        Damping coefficient.
    atol, btol : float, optional
        Stopping tolerances. If both are 1.0e-9 (say), the final
        residual norm should be accurate to about 9 digits.  (The
        final x will usually have fewer correct digits, depending on
        cond(A) and the size of damp.)
    conlim : float, optional
        Another stopping tolerance.  lsqr terminates if an estimate of
        ``cond(A)`` exceeds `conlim`.  For compatible systems ``Ax =
        b``, `conlim` could be as large as 1.0e+12 (say).  For
        least-squares problems, conlim should be less than 1.0e+8.
        Maximum precision can be obtained by setting ``atol = btol =
        conlim = zero``, but the number of iterations may then be
        excessive.
    iter_lim : int, optional
        Explicit limitation on number of iterations (for safety).
    show : bool, optional
        Display an iteration log.
    calc_var : bool, optional
        Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.
    x0 : array_like, shape (n,), optional
        Initial guess of x, if None zeros are used.

        .. versionadded:: 1.0.0

    Returns
    -------
    x : ndarray of float
        The final solution.
    istop : int
        Gives the reason for termination.
        1 means x is an approximate solution to Ax = b.
        2 means x approximately solves the least-squares problem.
    itn : int
        Iteration number upon termination.
    r1norm : float
        ``norm(r)``, where ``r = b - Ax``.
    r2norm : float
        ``sqrt( norm(r)^2  +  damp^2 * norm(x)^2 )``.  Equal to `r1norm` if
        ``damp == 0``.
    anorm : float
        Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
    acond : float
        Estimate of ``cond(Abar)``.
    arnorm : float
        Estimate of ``norm(A'*r - damp^2*x)``.
    xnorm : float
        ``norm(x)``
    var : ndarray of float
        If ``calc_var`` is True, estimates all diagonals of
        ``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
        damp^2*I)^{-1}``.  This is well defined if A has full column
        rank or ``damp > 0``.  (Not sure what var means if ``rank(A)
        < n`` and ``damp = 0.``)

    Notes
    -----
    LSQR uses an iterative method to approximate the solution.  The
    number of iterations required to reach a certain accuracy depends
    strongly on the scaling of the problem.  Poor scaling of the rows
    or columns of A should therefore be avoided where possible.

    For example, in problem 1 the solution is unaltered by
    row-scaling.  If a row of A is very small or large compared to
    the other rows of A, the corresponding row of ( A  b ) should be
    scaled up or down.

    In problems 1 and 2, the solution x is easily recovered
    following column-scaling.  Unless better information is known,
    the nonzero columns of A should be scaled so that they all have
    the same Euclidean norm (e.g., 1.0).

    In problem 3, there is no freedom to re-scale if damp is
    nonzero.  However, the value of damp should be assigned only
    after attention has been paid to the scaling of A.

    The parameter damp is intended to help regularize
    ill-conditioned systems, by preventing the true solution from
    being very large.  Another aid to regularization is provided by
    the parameter acond, which may be used to terminate iterations
    before the computed solution becomes very large.

    If some initial estimate ``x0`` is known and if ``damp == 0``,
    one could proceed as follows:

      1. Compute a residual vector ``r0 = b - A*x0``.
      2. Use LSQR to solve the system  ``A*dx = r0``.
      3. Add the correction dx to obtain a final solution ``x = x0 + dx``.

    This requires that ``x0`` be available before and after the call
    to LSQR.  To judge the benefits, suppose LSQR takes k1 iterations
    to solve A*x = b and k2 iterations to solve A*dx = r0.
    If x0 is "good", norm(r0) will be smaller than norm(b).
    If the same stopping tolerances atol and btol are used for each
    system, k1 and k2 will be similar, but the final solution x0 + dx
    should be more accurate.  The only way to reduce the total work
    is to use a larger stopping tolerance for the second system.
    If some value btol is suitable for A*x = b, the larger value
    btol*norm(b)/norm(r0)  should be suitable for A*dx = r0.

    Preconditioning is another way to reduce the number of iterations.
    If it is possible to solve a related system ``M*x = b``
    efficiently, where M approximates A in some helpful way (e.g. M -
    A has low rank or its elements are small relative to those of A),
    LSQR may converge more rapidly on the system ``A*M(inverse)*z =
    b``, after which x can be recovered by solving M*x = z.

    If A is symmetric, LSQR should not be used!

    Alternatives are the symmetric conjugate-gradient method (cg)
    and/or SYMMLQ.  SYMMLQ is an implementation of symmetric cg that
    applies to any symmetric A and will converge more rapidly than
    LSQR.  If A is positive definite, there are other implementations
    of symmetric cg that require slightly less work per iteration than
    SYMMLQ (but will take the same number of iterations).

    References
    ----------
    .. [1] C. C. Paige and M. A. Saunders (1982a).
           "LSQR: An algorithm for sparse linear equations and
           sparse least squares", ACM TOMS 8(1), 43-71.
    .. [2] C. C. Paige and M. A. Saunders (1982b).
           "Algorithm 583.  LSQR: Sparse linear equations and least
           squares problems", ACM TOMS 8(2), 195-209.
    .. [3] M. A. Saunders (1995).  "Solution of sparse rectangular
           systems using LSQR and CRAIG", BIT 35, 588-604.

    Examples
    --------
    >>> from scipy.sparse import csc_matrix
    >>> from scipy.sparse.linalg import lsqr
    >>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)

    The first example has the trivial solution `[0, 0]`

    >>> b = np.array([0., 0., 0.], dtype=float)
    >>> x, istop, itn, normr = lsqr(A, b)[:4]
    The exact solution is  x = 0
    >>> istop
    0
    >>> x
    array([ 0.,  0.])

    The stopping code `istop=0` returned indicates that a vector of zeros was
    found as a solution. The returned solution `x` indeed contains `[0., 0.]`.
    The next example has a non-trivial solution:

    >>> b = np.array([1., 0., -1.], dtype=float)
    >>> x, istop, itn, r1norm = lsqr(A, b)[:4]
    >>> istop
    1
    >>> x
    array([ 1., -1.])
    >>> itn
    1
    >>> r1norm
    4.440892098500627e-16

    As indicated by `istop=1`, `lsqr` found a solution obeying the tolerance
    limits. The given solution `[1., -1.]` obviously solves the equation. The
    remaining return values include information about the number of iterations
    (`itn=1`) and the remaining difference of left and right side of the solved
    equation.
    The final example demonstrates the behavior in the case where there is no
    solution for the equation:

    >>> b = np.array([1., 0.01, -1.], dtype=float)
    >>> x, istop, itn, r1norm = lsqr(A, b)[:4]
    >>> istop
    2
    >>> x
    array([ 1.00333333, -0.99666667])
    >>> A.dot(x)-b
    array([ 0.00333333, -0.00333333,  0.00333333])
    >>> r1norm
    0.005773502691896255

    `istop` indicates that the system is inconsistent and thus `x` is rather an
    approximate solution to the corresponding least-squares problem. `r1norm`
    contains the norm of the minimal residual that was found.
    """
    A = aslinearoperator(A)
    b = np.atleast_1d(b)
    if b.ndim > 1:
        b = b.squeeze()

    m, n = A.shape
    if iter_lim is None:
        iter_lim = 2 * n
    var = np.zeros(n)

    msg = ('The exact solution is  x = 0                              ',
           'Ax - b is small enough, given atol, btol                  ',
           'The least-squares solution is good enough, given atol     ',
           'The estimate of cond(Abar) has exceeded conlim            ',
           'Ax - b is small enough for this machine                   ',
           'The least-squares solution is good enough for this machine',
           'Cond(Abar) seems to be too large for this machine         ',
           'The iteration limit has been reached                      ')

    if show:
        print(' ')
        print('LSQR            Least-squares solution of  Ax = b')
        str1 = 'The matrix A has %8g rows  and %8g cols' % (m, n)
        str2 = 'damp = %20.14e   calc_var = %8g' % (damp, calc_var)
        str3 = 'atol = %8.2e                 conlim = %8.2e' % (atol, conlim)
        str4 = 'btol = %8.2e               iter_lim = %8g' % (btol, iter_lim)
        print(str1)
        print(str2)
        print(str3)
        print(str4)

    itn = 0
    istop = 0
    ctol = 0
    if conlim > 0:
        ctol = 1 / conlim
    anorm = 0
    acond = 0
    dampsq = damp**2
    ddnorm = 0
    res2 = 0
    xnorm = 0
    xxnorm = 0
    z = 0
    cs2 = -1
    sn2 = 0
    """
    Set up the first vectors u and v for the bidiagonalization.
    These satisfy  beta*u = b - A*x,  alfa*v = A'*u.
    """
    u = b
    bnorm = np.linalg.norm(b)
    if x0 is None:
        x = np.zeros(n)
        beta = bnorm.copy()
    else:
        x = np.asarray(x0)
        u = u - A.matvec(x)
        beta = np.linalg.norm(u)

    if beta > 0:
        u = (1 / beta) * u
        v = A.rmatvec(u)
        alfa = np.linalg.norm(v)
    else:
        v = x.copy()
        alfa = 0

    if alfa > 0:
        v = (1 / alfa) * v
    w = v.copy()

    rhobar = alfa
    phibar = beta
    rnorm = beta
    r1norm = rnorm
    r2norm = rnorm

    # Reverse the order here from the original matlab code because
    # there was an error on return when arnorm==0
    arnorm = alfa * beta
    if arnorm == 0:
        print(msg[0])
        return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var

    head1 = '   Itn      x[0]       r1norm     r2norm '
    head2 = ' Compatible    LS      Norm A   Cond A'

    if show:
        print(' ')
        print(head1, head2)
        test1 = 1
        test2 = alfa / beta
        str1 = '%6g %12.5e' % (itn, x[0])
        str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
        str3 = '  %8.1e %8.1e' % (test1, test2)
        print(str1, str2, str3)

    # Main iteration loop.
    while itn < iter_lim:
        itn = itn + 1
        """
        %     Perform the next step of the bidiagonalization to obtain the
        %     next  beta, u, alfa, v.  These satisfy the relations
        %                beta*u  =  a*v   -  alfa*u,
        %                alfa*v  =  A'*u  -  beta*v.
        """
        u = A.matvec(v) - alfa * u
        beta = np.linalg.norm(u)

        if beta > 0:
            u = (1 / beta) * u
            anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2)
            v = A.rmatvec(u) - beta * v
            alfa = np.linalg.norm(v)
            if alfa > 0:
                v = (1 / alfa) * v

        # Use a plane rotation to eliminate the damping parameter.
        # This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
        rhobar1 = sqrt(rhobar**2 + damp**2)
        cs1 = rhobar / rhobar1
        sn1 = damp / rhobar1
        psi = sn1 * phibar
        phibar = cs1 * phibar

        # Use a plane rotation to eliminate the subdiagonal element (beta)
        # of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
        cs, sn, rho = _sym_ortho(rhobar1, beta)

        theta = sn * alfa
        rhobar = -cs * alfa
        phi = cs * phibar
        phibar = sn * phibar
        tau = sn * phi

        # Update x and w.
        t1 = phi / rho
        t2 = -theta / rho
        dk = (1 / rho) * w

        x = x + t1 * w
        w = v + t2 * w
        ddnorm = ddnorm + np.linalg.norm(dk)**2

        if calc_var:
            var = var + dk**2

        # Use a plane rotation on the right to eliminate the
        # super-diagonal element (theta) of the upper-bidiagonal matrix.
        # Then use the result to estimate norm(x).
        delta = sn2 * rho
        gambar = -cs2 * rho
        rhs = phi - delta * z
        zbar = rhs / gambar
        xnorm = sqrt(xxnorm + zbar**2)
        gamma = sqrt(gambar**2 + theta**2)
        cs2 = gambar / gamma
        sn2 = theta / gamma
        z = rhs / gamma
        xxnorm = xxnorm + z**2

        # Test for convergence.
        # First, estimate the condition of the matrix  Abar,
        # and the norms of  rbar  and  Abar'rbar.
        acond = anorm * sqrt(ddnorm)
        res1 = phibar**2
        res2 = res2 + psi**2
        rnorm = sqrt(res1 + res2)
        arnorm = alfa * abs(tau)

        # Distinguish between
        #    r1norm = ||b - Ax|| and
        #    r2norm = rnorm in current code
        #           = sqrt(r1norm^2 + damp^2*||x||^2).
        #    Estimate r1norm from
        #    r1norm = sqrt(r2norm^2 - damp^2*||x||^2).
        # Although there is cancellation, it might be accurate enough.
        r1sq = rnorm**2 - dampsq * xxnorm
        r1norm = sqrt(abs(r1sq))
        if r1sq < 0:
            r1norm = -r1norm
        r2norm = rnorm

        # Now use these norms to estimate certain other quantities,
        # some of which will be small near a solution.
        test1 = rnorm / bnorm
        test2 = arnorm / (anorm * rnorm + eps)
        test3 = 1 / (acond + eps)
        t1 = test1 / (1 + anorm * xnorm / bnorm)
        rtol = btol + atol * anorm * xnorm / bnorm

        # The following tests guard against extremely small values of
        # atol, btol  or  ctol.  (The user may have set any or all of
        # the parameters  atol, btol, conlim  to 0.)
        # The effect is equivalent to the normal tests using
        # atol = eps,  btol = eps,  conlim = 1/eps.
        if itn >= iter_lim:
            istop = 7
        if 1 + test3 <= 1:
            istop = 6
        if 1 + test2 <= 1:
            istop = 5
        if 1 + t1 <= 1:
            istop = 4

        # Allow for tolerances set by the user.
        if test3 <= ctol:
            istop = 3
        if test2 <= atol:
            istop = 2
        if test1 <= rtol:
            istop = 1

        # See if it is time to print something.
        prnt = False
        if n <= 40:
            prnt = True
        if itn <= 10:
            prnt = True
        if itn >= iter_lim - 10:
            prnt = True
        # if itn%10 == 0: prnt = True
        if test3 <= 2 * ctol:
            prnt = True
        if test2 <= 10 * atol:
            prnt = True
        if test1 <= 10 * rtol:
            prnt = True
        if istop != 0:
            prnt = True

        if prnt:
            if show:
                str1 = '%6g %12.5e' % (itn, x[0])
                str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
                str3 = '  %8.1e %8.1e' % (test1, test2)
                str4 = ' %8.1e %8.1e' % (anorm, acond)
                print(str1, str2, str3, str4)

        if istop != 0:
            break

    # End of iteration loop.
    # Print the stopping condition.
    if show:
        print(' ')
        print('LSQR finished')
        print(msg[istop])
        print(' ')
        str1 = 'istop =%8g   r1norm =%8.1e' % (istop, r1norm)
        str2 = 'anorm =%8.1e   arnorm =%8.1e' % (anorm, arnorm)
        str3 = 'itn   =%8g   r2norm =%8.1e' % (itn, r2norm)
        str4 = 'acond =%8.1e   xnorm  =%8.1e' % (acond, xnorm)
        print(str1 + '   ' + str2)
        print(str3 + '   ' + str4)
        print(' ')

    return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
Example #31
0
 def assertCompatibleSystem(self, A, xtrue):
     Afun = aslinearoperator(A)
     b = Afun.matvec(xtrue)
     x = lsmr(A, b)[0]
     assert_almost_equal(norm(x - xtrue), 0, decimal=5)
Example #32
0
def cgne(A,
         b,
         x0=None,
         tol=1e-5,
         maxiter=None,
         xtype=None,
         M=None,
         callback=None,
         residuals=None):
    '''Conjugate Gradient, Normal Error algorithm

    Applies CG to the normal equations, A.H A x = b. Left preconditioning
    is supported.  Note that unless A is well-conditioned, the use of
    CGNE is inadvisable

    Parameters
    ----------
    A : {array, matrix, sparse matrix, LinearOperator}
        n x n, linear system to solve
    b : {array, matrix}
        right hand side, shape is (n,) or (n,1)
    x0 : {array, matrix}
        initial guess, default is a vector of zeros
    tol : float
        relative convergence tolerance, i.e. tol is scaled by ||r_0||_2
    maxiter : int
        maximum number of allowed iterations
    xtype : type
        dtype for the solution, default is automatic type detection
    M : {array, matrix, sparse matrix, LinearOperator}
        n x n, inverted preconditioner, i.e. solve M A A.H x = M b.
    callback : function
        User-supplied function is called after each iteration as
        callback(xk), where xk is the current solution vector
    residuals : list
        residuals has the residual norm history,
        including the initial residual, appended to it

    Returns
    -------
    (xNew, info)
    xNew : an updated guess to the solution of Ax = b
    info : halting status of cgne

            ==  =======================================
            0   successful exit
            >0  convergence to tolerance not achieved,
                return iteration count instead.
            <0  numerical breakdown, or illegal input
            ==  =======================================

    Notes
    -----
        - The LinearOperator class is in scipy.sparse.linalg.interface.
          Use this class if you prefer to define A or M as a mat-vec routine
          as opposed to explicitly constructing the matrix.  A.psolve(..) is
          still supported as a legacy.

    Examples
    --------
    >>> from pyamg.krylov.cgne import cgne
    >>> from pyamg.util.linalg import norm
    >>> import numpy as np
    >>> from pyamg.gallery import poisson
    >>> A = poisson((10,10))
    >>> b = np.ones((A.shape[0],))
    >>> (x,flag) = cgne(A,b, maxiter=2, tol=1e-8)
    >>> print norm(b - A*x)
    46.1547104367

    References
    ----------
    .. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
       Second Edition", SIAM, pp. 276-7, 2003
       http://www-users.cs.umn.edu/~saad/books.html

    '''

    # Store the conjugate transpose explicitly as it will be used much later on
    if isspmatrix(A):
        AH = A.H
    else:
        # TODO avoid doing this since A may be a different sparse type
        AH = aslinearoperator(np.asmatrix(A).H)

    # Convert inputs to linear system, with error checking
    A, M, x, b, postprocess = make_system(A, M, x0, b, xtype)
    dimen = A.shape[0]

    # Ensure that warnings are always reissued from this function
    import warnings
    warnings.filterwarnings('always', module='pyamg\.krylov\._cgne')

    # Choose type
    if not hasattr(A, 'dtype'):
        Atype = upcast(x.dtype, b.dtype)
    else:
        Atype = A.dtype
    if not hasattr(M, 'dtype'):
        Mtype = upcast(x.dtype, b.dtype)
    else:
        Mtype = M.dtype
    xtype = upcast(Atype, x.dtype, b.dtype, Mtype)

    # Should norm(r) be kept
    if residuals == []:
        keep_r = True
    else:
        keep_r = False

    # How often should r be recomputed
    recompute_r = 8

    # Check iteration numbers. CGNE suffers from loss of orthogonality quite
    # easily, so we arbitrarily let the method go up to 130% over the
    # theoretically necessary limit of maxiter=dimen
    if maxiter is None:
        maxiter = int(np.ceil(1.3 * dimen)) + 2
    elif maxiter < 1:
        raise ValueError('Number of iterations must be positive')
    elif maxiter > (1.3 * dimen):
        warn('maximum allowed inner iterations (maxiter) are the 130% times \
              the number of dofs')
        maxiter = int(np.ceil(1.3 * dimen)) + 2

    # Prep for method
    r = b - A * x
    normr = norm(r)
    if keep_r:
        residuals.append(normr)

    # Check initial guess ( scaling by b, if b != 0,
    #   must account for case when norm(b) is very small)
    normb = norm(b)
    if normb == 0.0:
        normb = 1.0
    if normr < tol * normb:
        if callback is not None:
            callback(x)
        return (postprocess(x), 0)

    # Scale tol by ||r_0||_2
    if normr != 0.0:
        tol = tol * normr

    # Begin CGNE

    # Apply preconditioner and calculate initial search direction
    z = M * r
    p = AH * z
    old_zr = np.inner(z.conjugate(), r)

    for iter in range(maxiter):

        # alpha = (z_j, r_j) / (p_j, p_j)
        alpha = old_zr / np.inner(p.conjugate(), p)

        # x_{j+1} = x_j + alpha*p_j
        x += alpha * p

        # r_{j+1} = r_j - alpha*w_j,   where w_j = A*p_j
        if np.mod(iter, recompute_r) and iter > 0:
            r -= alpha * (A * p)
        else:
            r = b - A * x

        # z_{j+1} = M*r_{j+1}
        z = M * r

        # beta = (z_{j+1}, r_{j+1}) / (z_j, r_j)
        new_zr = np.inner(z.conjugate(), r)
        beta = new_zr / old_zr
        old_zr = new_zr

        # p_{j+1} = A.H*z_{j+1} + beta*p_j
        p *= beta
        p += AH * z

        # Allow user access to residual
        if callback is not None:
            callback(x)

        # test for convergence
        normr = norm(r)
        if keep_r:
            residuals.append(normr)
        if normr < tol:
            return (postprocess(x), 0)

    # end loop

    return (postprocess(x), iter + 1)
Example #33
0
def lsmr(A,
         b,
         damp=0.0,
         atol=1e-6,
         btol=1e-6,
         conlim=1e8,
         maxiter=None,
         show=False):
    """Iterative solver for least-squares problems.

    lsmr solves the system of linear equations ``Ax = b``. If the system
    is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
    A is a rectangular matrix of dimension m-by-n, where all cases are
    allowed: m = n, m > n, or m < n. B is a vector of length m.
    The matrix A may be dense or sparse (usually sparse).

    .. versionadded:: 0.11.0

    Parameters
    ----------
    A : {matrix, sparse matrix, ndarray, LinearOperator}
        Matrix A in the linear system.
    b : (m,) ndarray
        Vector b in the linear system.
    damp : float
        Damping factor for regularized least-squares. `lsmr` solves
        the regularized least-squares problem::

         min ||(b) - (  A   )x||
             ||(0)   (damp*I) ||_2

        where damp is a scalar.  If damp is None or 0, the system
        is solved without regularization.
    atol, btol : float
        Stopping tolerances. `lsmr` continues iterations until a
        certain backward error estimate is smaller than some quantity
        depending on atol and btol.  Let ``r = b - Ax`` be the
        residual vector for the current approximate solution ``x``.
        If ``Ax = b`` seems to be consistent, ``lsmr`` terminates
        when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
        Otherwise, lsmr terminates when ``norm(A^{T} r) <=
        atol * norm(A) * norm(r)``.  If both tolerances are 1.0e-6 (say),
        the final ``norm(r)`` should be accurate to about 6
        digits. (The final x will usually have fewer correct digits,
        depending on ``cond(A)`` and the size of LAMBDA.)  If `atol`
        or `btol` is None, a default value of 1.0e-6 will be used.
        Ideally, they should be estimates of the relative error in the
        entries of A and B respectively.  For example, if the entries
        of `A` have 7 correct digits, set atol = 1e-7. This prevents
        the algorithm from doing unnecessary work beyond the
        uncertainty of the input data.
    conlim : float
        `lsmr` terminates if an estimate of ``cond(A)`` exceeds
        `conlim`.  For compatible systems ``Ax = b``, conlim could be
        as large as 1.0e+12 (say).  For least-squares problems,
        `conlim` should be less than 1.0e+8. If `conlim` is None, the
        default value is 1e+8.  Maximum precision can be obtained by
        setting ``atol = btol = conlim = 0``, but the number of
        iterations may then be excessive.
    maxiter : int
        `lsmr` terminates if the number of iterations reaches
        `maxiter`.  The default is ``maxiter = min(m, n)``.  For
        ill-conditioned systems, a larger value of `maxiter` may be
        needed.
    show : bool
        Print iterations logs if ``show=True``.

    Returns
    -------
    x : ndarray of float
        Least-square solution returned.
    istop : int
        istop gives the reason for stopping::

          istop   = 0 means x=0 is a solution.
                  = 1 means x is an approximate solution to A*x = B,
                      according to atol and btol.
                  = 2 means x approximately solves the least-squares problem
                      according to atol.
                  = 3 means COND(A) seems to be greater than CONLIM.
                  = 4 is the same as 1 with atol = btol = eps (machine
                      precision)
                  = 5 is the same as 2 with atol = eps.
                  = 6 is the same as 3 with CONLIM = 1/eps.
                  = 7 means ITN reached maxiter before the other stopping
                      conditions were satisfied.

    itn : int
        Number of iterations used.
    normr : float
        ``norm(b-Ax)``
    normar : float
        ``norm(A^T (b - Ax))``
    norma : float
        ``norm(A)``
    conda : float
        Condition number of A.
    normx : float
        ``norm(x)``

    References
    ----------
    .. [1] D. C.-L. Fong and M. A. Saunders,
           "LSMR: An iterative algorithm for sparse least-squares problems",
           SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
           http://arxiv.org/abs/1006.0758
    .. [2] LSMR Software, http://www.stanford.edu/~clfong/lsmr.html

    """

    A = aslinearoperator(A)
    b = b.squeeze()

    msg = ('The exact solution is  x = 0                              ',
           'Ax - b is small enough, given atol, btol                  ',
           'The least-squares solution is good enough, given atol     ',
           'The estimate of cond(Abar) has exceeded conlim            ',
           'Ax - b is small enough for this machine                   ',
           'The least-squares solution is good enough for this machine',
           'Cond(Abar) seems to be too large for this machine         ',
           'The iteration limit has been reached                      ')

    hdg1 = '   itn      x(1)       norm r    norm A' 'r'
    hdg2 = ' compatible   LS      norm A   cond A'
    pfreq = 20  # print frequency (for repeating the heading)
    pcount = 0  # print counter

    m, n = A.shape

    # stores the num of singular values
    minDim = min([m, n])

    if maxiter is None:
        maxiter = minDim

    if show:
        print(' ')
        print('LSMR            Least-squares solution of  Ax = b\n')
        print('The matrix A has %8g rows  and %8g cols' % (m, n))
        print('damp = %20.14e\n' % (damp))
        print('atol = %8.2e                 conlim = %8.2e\n' % (atol, conlim))
        print('btol = %8.2e             maxiter = %8g\n' % (btol, maxiter))

    u = b
    beta = norm(u)

    v = zeros(n)
    alpha = 0

    if beta > 0:
        u = (1 / beta) * u
        v = A.rmatvec(u)
        alpha = norm(v)

    if alpha > 0:
        v = (1 / alpha) * v

    # Initialize variables for 1st iteration.

    itn = 0
    zetabar = alpha * beta
    alphabar = alpha
    rho = 1
    rhobar = 1
    cbar = 1
    sbar = 0

    h = v.copy()
    hbar = zeros(n)
    x = zeros(n)

    # Initialize variables for estimation of ||r||.

    betadd = beta
    betad = 0
    rhodold = 1
    tautildeold = 0
    thetatilde = 0
    zeta = 0
    d = 0

    # Initialize variables for estimation of ||A|| and cond(A)

    normA2 = alpha * alpha
    maxrbar = 0
    minrbar = 1e+100
    normA = sqrt(normA2)
    condA = 1
    normx = 0

    # Items for use in stopping rules.
    normb = beta
    istop = 0
    ctol = 0
    if conlim > 0:
        ctol = 1 / conlim
    normr = beta

    # Reverse the order here from the original matlab code because
    # there was an error on return when arnorm==0
    normar = alpha * beta
    if normar == 0:
        if show:
            print(msg[0])
        return x, istop, itn, normr, normar, normA, condA, normx

    if show:
        print(' ')
        print(hdg1, hdg2)
        test1 = 1
        test2 = alpha / beta
        str1 = '%6g %12.5e' % (itn, x[0])
        str2 = ' %10.3e %10.3e' % (normr, normar)
        str3 = '  %8.1e %8.1e' % (test1, test2)
        print(''.join([str1, str2, str3]))

    # Main iteration loop.
    while itn < maxiter:
        itn = itn + 1

        # Perform the next step of the bidiagonalization to obtain the
        # next  beta, u, alpha, v.  These satisfy the relations
        #         beta*u  =  a*v   -  alpha*u,
        #        alpha*v  =  A'*u  -  beta*v.

        u = A.matvec(v) - alpha * u
        beta = norm(u)

        if beta > 0:
            u = (1 / beta) * u
            v = A.rmatvec(u) - beta * v
            alpha = norm(v)
            if alpha > 0:
                v = (1 / alpha) * v

        # At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.

        # Construct rotation Qhat_{k,2k+1}.

        chat, shat, alphahat = _sym_ortho(alphabar, damp)

        # Use a plane rotation (Q_i) to turn B_i to R_i

        rhoold = rho
        c, s, rho = _sym_ortho(alphahat, beta)
        thetanew = s * alpha
        alphabar = c * alpha

        # Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar

        rhobarold = rhobar
        zetaold = zeta
        thetabar = sbar * rho
        rhotemp = cbar * rho
        cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)
        zeta = cbar * zetabar
        zetabar = -sbar * zetabar

        # Update h, h_hat, x.

        hbar = h - (thetabar * rho / (rhoold * rhobarold)) * hbar
        x = x + (zeta / (rho * rhobar)) * hbar
        h = v - (thetanew / rho) * h

        # Estimate of ||r||.

        # Apply rotation Qhat_{k,2k+1}.
        betaacute = chat * betadd
        betacheck = -shat * betadd

        # Apply rotation Q_{k,k+1}.
        betahat = c * betaacute
        betadd = -s * betaacute

        # Apply rotation Qtilde_{k-1}.
        # betad = betad_{k-1} here.

        thetatildeold = thetatilde
        ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)
        thetatilde = stildeold * rhobar
        rhodold = ctildeold * rhobar
        betad = -stildeold * betad + ctildeold * betahat

        # betad   = betad_k here.
        # rhodold = rhod_k  here.

        tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
        taud = (zeta - thetatilde * tautildeold) / rhodold
        d = d + betacheck * betacheck
        normr = sqrt(d + (betad - taud)**2 + betadd * betadd)

        # Estimate ||A||.
        normA2 = normA2 + beta * beta
        normA = sqrt(normA2)
        normA2 = normA2 + alpha * alpha

        # Estimate cond(A).
        maxrbar = max(maxrbar, rhobarold)
        if itn > 1:
            minrbar = min(minrbar, rhobarold)
        condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)

        # Test for convergence.

        # Compute norms for convergence testing.
        normar = abs(zetabar)
        normx = norm(x)

        # Now use these norms to estimate certain other quantities,
        # some of which will be small near a solution.

        test1 = normr / normb
        if (normA * normr) != 0:
            test2 = normar / (normA * normr)
        else:
            test2 = infty
        test3 = 1 / condA
        t1 = test1 / (1 + normA * normx / normb)
        rtol = btol + atol * normA * normx / normb

        # The following tests guard against extremely small values of
        # atol, btol or ctol.  (The user may have set any or all of
        # the parameters atol, btol, conlim  to 0.)
        # The effect is equivalent to the normAl tests using
        # atol = eps,  btol = eps,  conlim = 1/eps.

        if itn >= maxiter:
            istop = 7
        if 1 + test3 <= 1:
            istop = 6
        if 1 + test2 <= 1:
            istop = 5
        if 1 + t1 <= 1:
            istop = 4

        # Allow for tolerances set by the user.

        if test3 <= ctol:
            istop = 3
        if test2 <= atol:
            istop = 2
        if test1 <= rtol:
            istop = 1

        # See if it is time to print something.

        if show:
            if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \
               (itn % 10 == 0) or (test3 <= 1.1 * ctol) or \
               (test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \
               (istop != 0):

                if pcount >= pfreq:
                    pcount = 0
                    print(' ')
                    print(hdg1, hdg2)
                pcount = pcount + 1
                str1 = '%6g %12.5e' % (itn, x[0])
                str2 = ' %10.3e %10.3e' % (normr, normar)
                str3 = '  %8.1e %8.1e' % (test1, test2)
                str4 = ' %8.1e %8.1e' % (normA, condA)
                print(''.join([str1, str2, str3, str4]))

        if istop > 0:
            break

    # Print the stopping condition.

    if show:
        print(' ')
        print('LSMR finished')
        print(msg[istop])
        print('istop =%8g    normr =%8.1e' % (istop, normr))
        print('    normA =%8.1e    normAr =%8.1e' % (normA, normar))
        print('itn   =%8g    condA =%8.1e' % (itn, condA))
        print('    normx =%8.1e' % (normx))
        print(str1, str2)
        print(str3, str4)

    return x, istop, itn, normr, normar, normA, condA, normx
Example #34
0
def mlem(y,
         A,
         no_iter,
         verbose=False,
         ret_iter_x=0,
         ret_iter_y=0,
         ret_norm_r=False,
         ret_objective=False,
         AT_ones=None,
         x0=None,
         inverse_thres=0.0):
    '''
    Maximizes the log-likelihood for a Poisson Random Varible.  y is the
    observed poisson random variable, modeled by A * x.  Maximizes
        f(x) = 1^T Ax - y^T ln(Ax)
    Taken from Jingyu Cui's Thesis, "Fast and Accurate PET Image Reconstruction
    on Parallel Architectures," 2013.

    Parameters
    ----------
    y : (m,) array-like
        Observed Poisson variable
    A : (m,n) matrix, sparse matrix, or LinearOperator
        System Model
    no_iter : int scalar
        Number of update iterations to perform.
    verbose : boolean (Default = False), optional
        If the relative residual norm and objective should be printed out
        each iteration.
    ret_iter_x : int scalar, optional
        Return the inter-iteration history of x every from ret_iter_x
        iterations.  If zero, the inter-iteration history of x is not returned
    ret_iter_y : int scalar, optional
        Return the inter-iteration history of y_bar (the model) every from
        ret_iter_y iterations.  If zero, the inter-iteration history of y_bar
        is not returned.
    ret_norm_r : boolean (Default = False), optional
        Return the norm of the relative residual from iteration.
    ret_objective : boolean (Default = False), optional
        Return the objective function value from iteration.
    AT_ones : (n,) array-like, optional
        The result of A^T 1 can be provided to avoid the computation.  AT_ones
        is used to normalize the error backpropogation step.
    x0 : (n,) array-like, optional
        Override the default model initialization.  Default is the number of
        counts in y, divided by n for each x0.
    inverse_thres : float scalar
        Zeros out small inverse values for the error propogation.  The inverse
        of errors are backprojected with A^T.  Small model values can cause the
        value to explode unnecessarily.

    Returns
    -------
    x : (n,) ndarray
        The weights resulting from the algorithm.
    x_history : (variable, n) ndarray, optional
        The x from each iteration specified by ret_iter_x, plus the final value.
    y_history : (variable, m) ndarray, optional
        The y_bar from each iteration specified by ret_iter_y, plus the final
        model value.
    norm_r_history : (no_iter,) ndarray, optional
        The norm of the relative residual from iteration
    objective_history : (no_iter,) ndarray, optional
        The objective function value from iteration

    '''
    A = aslinearoperator(A)
    y = np.asarray(y, dtype=A.dtype).squeeze()
    if AT_ones is None:
        AT_ones = A.rmatvec(np.ones(A.shape[0]))
    else:
        AT_ones = np.asarray(AT_ones, dtype=A.dtype).squeeze()
        if AT_ones.shape != (A.shape[1], ):
            raise ValueError('AT_ones is not shaped (%d,)' % A.shape[1])

    if x0 is None:
        # Initialize it to uniform weights where the total counts would match
        x = np.ones(A.shape[1], dtype=A.dtype) * (y.sum() / A.shape[1])
    else:
        x = np.asarray(x0, dtype=A.dtype).squeeze()

    norm_y = np.linalg.norm(y)

    # Save every history_idx iterations, and the last one
    save_model_idx = np.zeros(no_iter + 1, dtype=bool)
    if ret_iter_y > 0:
        save_model_idx[(np.arange(no_iter + 1) % ret_iter_y) == 0] = True
        save_model_idx[-1] = True

    save_x_idx = np.zeros(no_iter + 1, dtype=bool)
    if ret_iter_x > 0:
        save_x_idx[(np.arange(no_iter + 1) % ret_iter_x) == 0] = True
        save_x_idx[-1] = True

    history_size_model = np.sum(save_model_idx)
    history_size_x = np.sum(save_x_idx)

    x_history = np.zeros((history_size_x, A.shape[1]))
    model_history = np.zeros((history_size_model, A.shape[0]))

    # These vectors are really small in comparison, so save them every
    # iteration, and worry about returning them later.
    norm_r_history = np.zeros((no_iter + 1, ))
    objective_history = np.zeros((no_iter + 1, ))
    history_count_x = 0
    history_count_model = 0

    for iter_no in xrange(no_iter + 1):
        model = A.matvec(x)

        norm_r = np.linalg.norm(model - y) / norm_y

        objective = model[model > 0].astype(np.float128).sum() - \
                    (y[model > 0] * np.log(model[model > 0])
                    ).astype(np.float128).sum()
        if verbose:
            print('{0:02d}: rel_norm = {1},  objective = {2}'.format(
                iter_no, norm_r, objective))

        if save_x_idx[iter_no]:
            x_history[history_count_x, :] = x.copy()
            history_count_x += 1
        if save_model_idx[iter_no]:
            model_history[history_count_model, :] = model.copy()
            history_count_model += 1
        norm_r_history[iter_no] = norm_r
        objective_history[iter_no] = objective

        # We loop for no_iter + 1 so that we can calculate the model error
        # for the final iteration.
        if iter_no == no_iter:
            continue

        error = np.zeros(A.shape[0], dtype=A.dtype)
        error[model > 0] = y[model > 0] / model[model > 0]
        error[model <= 0] = 0
        if inverse_thres > 0:
            error[error > 1.0 / inverse_thres] = 0

        error_bp = A.rmatvec(error)

        update = np.zeros(A.shape[1], dtype=A.dtype)
        update[AT_ones > 0] = error_bp[AT_ones > 0] / AT_ones[AT_ones > 0]
        update[AT_ones <= 0] = 0

        update[update < 0] = 0

        x *= update

    ret = [
        x,
    ]
    if ret_iter_x > 0:
        ret.append(x_history)
    if ret_iter_y > 0:
        ret.append(model_history)
    if ret_norm_r:
        ret.append(norm_r_history)
    if ret_objective:
        ret.append(objective_history)

    if len(ret) == 1:
        ret = ret[0]

    return ret
Example #35
0
def svds(A,
         k=6,
         ncv=None,
         tol=0,
         which='LM',
         v0=None,
         maxiter=None,
         return_singular_vectors=True,
         precAHA=None,
         precAAH=None,
         precAug=None,
         u0=None,
         locku0=None,
         lockv0=None,
         return_stats=False,
         maxBlockSize=0):
    """
    Compute k singular values and vectors for a sparse matrix.

    Parameters
    ----------
    A : {sparse matrix, LinearOperator}
        Array to compute the SVD on, of shape (M, N)
    k : int, optional
        Number of singular values and vectors to compute.
        Must be 1 <= k < min(A.shape).
    ncv : int, optional
        The maximum size of the basis
    tol : float, optional
        Tolerance for singular values. Zero (default) means machine precision.
    which : str ['LM' | 'SM'] or number, optional
        Which `k` singular values to find:

            - 'LM' : largest singular values
            - 'SM' : smallest singular values
            - number : closest singular values to (referred as sigma later)

    u0 : ndarray, optional
        Left starting vectors for the iterations.

        Should be approximate left singular vectors. If only u0 or v0 is
        provided, the other is computed.
    v0 : ndarray, optional
        Right starting vectors for the iterations.
    maxiter : int, optional
        Maximum number of iterations.
    precAHA : {N x N matrix, array, sparse matrix, LinearOperator}, optional
        Approximate inverse of (A.H*A - sigma**2*I). If provided and M>N, it
        usually accelerates the convergence.
    precAAH : {M x M matrix, array, sparse matrix, LinearOperator}, optional
        Approximate inverse of (A*A.H - sigma**2*I). If provided and M<N, it
        usually accelerates the convergence.
    precAug : {(M+N) x (M+N) matrix, array, sparse matrix, LinearOperator}, optional
        Approximate inverse of ([zeros() A.H; zeros() A] - sigma*I). It usually
        accelerates the convergence if tol<dtype.eps**.5.
    locku0 : ndarray, optional
        Left orthogonal vector constrain.

        Seek singular triplets orthogonal to locku0 and lockv0. The provided vectors
        *should* be orthonormal. If only locku0 or lockv0 is provided, the other
        is computed. Useful to not converge some already computed solutions.
    lockv0 : ndarray, optional
        Right orthogonal vector constrain. See locku0.
    maxBlockSize : int, optional
        Maximum number of vectors added at every iteration.
    report_stats : bool, optional
        If True, it is also returned extra information from PRIMME.

    Returns
    -------
    u : ndarray, shape=(M, k), optional
        Unitary matrix having left singular vectors as columns.
        Returned if `return_singular_vectors` is True.
    s : ndarray, shape=(k,)
        The singular values.
    vt : ndarray, shape=(k, N), optional
        Unitary matrix having right singular vectors as rows.
        Returned if `return_singular_vectors` is True.
    stats : dict, optional (if return_stats)
        Extra information reported by PRIMME:

        - "numOuterIterations": number of outer iterations
        - "numRestarts": number of restarts
        - "numMatvecs": number of A*v
        - "numPreconds": number of OPinv*v
        - "elapsedTime": time that took 

        Returned if `return_stats` is True.

    See Also
    --------
    Primme.eigsh : eigenvalue decomposition for a sparse symmetrix/complex Hermitian matrix A
    scipy.sparse.linalg.eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A

    Examples
    --------
    >>> import Primme, scipy.sparse
    >>> A = scipy.sparse.spdiags(range(10), [0], 100, 10) # sparse diag. rect. matrix
    >>> svecs_left, svals, svecs_right = Primme.svds(A, 3, tol=1e-6, which='SM')
    >>> svals # the three smallest singular values of A
    array([ 1.,  2.,  3.])

    >>> import Primme, scipy.sparse
    >>> A = scipy.sparse.rand(10000, 100, random_state=10)
    >>> prec = scipy.sparse.spdiags(np.reciprocal(A.multiply(A).sum(axis=0)),
    ...           [0], 100, 100) # square diag. preconditioner
    >>> svecs_left, svals, svecs_right = Primme.svds(A, 3, which=6.0, tol=1e-6, precAHA=prec)
    >>> ["%.5f" % x for x in svals.flat] # the three closest singular values of A to 0.5
    ['5.99871', '5.99057', '6.01065']
    """

    A = aslinearoperator(A)

    m, n = A.shape

    if k <= 0 or k > min(n, m):
        raise ValueError("k=%d must be between 1 and min(A.shape)=%d" %
                         (k, min(n, m)))

    if precAHA is not None:
        precAHA = aslinearoperator(precAHA)
        if precAHA.shape[0] != precAHA.shape[1] or precAHA.shape[0] != n:
            raise ValueError('precAHA: expected square matrix with size %d' %
                             n)

    if precAAH is not None:
        precAAH = aslinearoperator(precAAH)
        if precAAH.shape[0] != precAAH.shape[1] or precAAH.shape[0] != m:
            raise ValueError('precAAH: expected square matrix with size %d' %
                             m)

    if precAug is not None:
        precAug = aslinearoperator(precAug)
        if precAug.shape[0] != precAug.shape[1] or precAug.shape[0] != m + n:
            raise ValueError('precAug: expected square matrix with size %d' %
                             (m + n))

    class PSP(PrimmeSvdsParams):
        def __init__(self):
            PrimmeSvdsParams.__init__(self)

        def matvec(self, X, transpose):
            if transpose == 0:
                return A.matmat(X)
            else:
                return A.H.matmat(X)

        def prevec(self, X, mode):
            if mode == primme_svds_op_AtA and precAHA is not None:
                return precAHA.matmat(X)
            elif mode == primme_svds_op_AAt and precAAH is not None:
                return precAAH.matmat(X)
            elif mode == primme_svds_op_augmented and precAug is not None:
                return precAug.matmat(X)
            return X

    pp = PSP()

    pp.m = A.shape[0]
    pp.n = A.shape[1]

    pp.numSvals = k

    if which == 'LM':
        pp.target = primme_svds_largest
    elif which == 'SM':
        pp.target = primme_svds_smallest
    else:
        try:
            which = float(which)
        except:
            raise ValueError("which must be either 'LM', 'SM' or a number.")
        pp.target = primme_svds_closest_abs
        pp.targetShifts = np.array([which], dtype='d')

    pp.eps = tol

    if ncv:
        pp.maxBasisSize = ncv

    if maxiter:
        # NOTE: every eigensolver iteration spend two matvecs*blockSize
        pp.maxMatvecs = maxiter * (maxBlockSize if maxBlockSize else 1) / 2

    if maxBlockSize:
        pp.maxBlockSize = maxBlockSize

    if precAHA is not None or precAAH is not None or precAug is not None:
        pp.precondition = 1

    def check_pair(u, v, var_names):
        if ((u is not None and u.shape[0] != m)
                or (v is not None and v.shape[0] != n)):
            aux = v
            v = u
            u = aux

        if ((u is not None and u.shape[0] != m)
                or (v is not None and v.shape[0] != n)):
            aux = v
            v = u
            u = aux
            raise ValueError("%s don't have the expected number of rows." %
                             var_names)

        if u is not None and v is not None and u.shape[1] != v.shape[1]:
            raise ValueError("%s don't have the same number of columns." %
                             var_names)

        if u is not None and v is None:
            v, _ = np.linalg.qr(A.H.matmult(u))

        if v is not None and u is None:
            u, _ = np.linalg.qr(A.matmult(v))

        return u, v

    locku0, lockv0 = check_pair(locku0, lockv0, "lockv0 or locku0")

    if locku0 is not None:
        pp.numOrthoConst = min(locku0.shape[1], min(m, n))

    if A.dtype.kind in frozenset(["b", "i", "u", "f"]):
        dtype = np.dtype("d")
    else:
        dtype = np.dtype("complex128")

    svals = np.zeros(pp.numSvals)
    svecsl = np.zeros((pp.m, pp.numOrthoConst + pp.numSvals), dtype, order='F')
    svecsr = np.zeros((pp.n, pp.numOrthoConst + pp.numSvals), dtype, order='F')
    norms = np.zeros(pp.numSvals)

    if locku0 is not None:
        np.copyto(svecsl[:, 0:pp.numOrthoConst], locku0[:, 0:pp.numOrthoConst])
        np.copyto(svecsr[:, 0:pp.numOrthoConst], lockv0[:, 0:pp.numOrthoConst])

    u0, v0 = check_pair(u0, v0, "v0 or u0")

    if v0 is not None:
        pp.initSize = min(v0.shape[1], pp.numSvals)
        np.copyto(svecsl[:, pp.numOrthoConst:pp.numOrthoConst + pp.initSize],
                  u0[:, 0:pp.initSize])
        np.copyto(svecsr[:, pp.numOrthoConst:pp.numOrthoConst + pp.initSize],
                  v0[:, 0:pp.initSize])

    if dtype is np.dtype('d'):
        err = dprimme_svds(svals, svecsl, svecsr, norms, pp)
    else:
        err = zprimme_svds(svals, svecsl, svecsr, norms, pp)

    if err != 0:
        raise PrimmeSvdsError(err)

    if return_stats:
        stats = dict((f, getattr(pp.stats, f)) for f in [
            "numOuterIterations", "numRestarts", "numMatvecs", "numPreconds",
            "elapsedTime"
        ])

    if not return_singular_vectors:
        return svals if not return_stats else (svals, stats)

    svecsl = svecsl[:, pp.numOrthoConst:]
    svecsr = svecsr[:, pp.numOrthoConst:]

    # Transpose conjugate svecsr
    svecsr = svecsr.T.conj()

    if not return_stats:
        return svecsl, svals, svecsr
    else:
        return svecsl, svals, svecsr, stats
Example #36
0
def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8,
         iter_lim=None, show=False, calc_var=False):
    """Find the least-squares solution to a large, sparse, linear system
    of equations.

    The function solves ``Ax = b``  or  ``min ||b - Ax||^2`` or
    ``min ||Ax - b||^2 + d^2 ||x||^2``.

    The matrix A may be square or rectangular (over-determined or
    under-determined), and may have any rank.

    ::

      1. Unsymmetric equations --    solve  A*x = b

      2. Linear least squares  --    solve  A*x = b
                                     in the least-squares sense

      3. Damped least squares  --    solve  (   A    )*x = ( b )
                                            ( damp*I )     ( 0 )
                                     in the least-squares sense

    Parameters
    ----------
    A : {sparse matrix, ndarray, LinearOperator}
        Representation of an m-by-n matrix.  It is required that
        the linear operator can produce ``Ax`` and ``A^T x``.
    b : (m,) ndarray
        Right-hand side vector ``b``.
    damp : float
        Damping coefficient.
    atol, btol : float, optional
        Stopping tolerances. If both are 1.0e-9 (say), the final
        residual norm should be accurate to about 9 digits.  (The
        final x will usually have fewer correct digits, depending on
        cond(A) and the size of damp.)
    conlim : float, optional
        Another stopping tolerance.  lsqr terminates if an estimate of
        ``cond(A)`` exceeds `conlim`.  For compatible systems ``Ax =
        b``, `conlim` could be as large as 1.0e+12 (say).  For
        least-squares problems, conlim should be less than 1.0e+8.
        Maximum precision can be obtained by setting ``atol = btol =
        conlim = zero``, but the number of iterations may then be
        excessive.
    iter_lim : int, optional
        Explicit limitation on number of iterations (for safety).
    show : bool, optional
        Display an iteration log.
    calc_var : bool, optional
        Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.

    Returns
    -------
    x : ndarray of float
        The final solution.
    istop : int
        Gives the reason for termination.
        1 means x is an approximate solution to Ax = b.
        2 means x approximately solves the least-squares problem.
    itn : int
        Iteration number upon termination.
    r1norm : float
        ``norm(r)``, where ``r = b - Ax``.
    r2norm : float
        ``sqrt( norm(r)^2  +  damp^2 * norm(x)^2 )``.  Equal to `r1norm` if
        ``damp == 0``.
    anorm : float
        Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
    acond : float
        Estimate of ``cond(Abar)``.
    arnorm : float
        Estimate of ``norm(A'*r - damp^2*x)``.
    xnorm : float
        ``norm(x)``
    var : ndarray of float
        If ``calc_var`` is True, estimates all diagonals of
        ``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
        damp^2*I)^{-1}``.  This is well defined if A has full column
        rank or ``damp > 0``.  (Not sure what var means if ``rank(A)
        < n`` and ``damp = 0.``)

    Notes
    -----
    LSQR uses an iterative method to approximate the solution.  The
    number of iterations required to reach a certain accuracy depends
    strongly on the scaling of the problem.  Poor scaling of the rows
    or columns of A should therefore be avoided where possible.

    For example, in problem 1 the solution is unaltered by
    row-scaling.  If a row of A is very small or large compared to
    the other rows of A, the corresponding row of ( A  b ) should be
    scaled up or down.

    In problems 1 and 2, the solution x is easily recovered
    following column-scaling.  Unless better information is known,
    the nonzero columns of A should be scaled so that they all have
    the same Euclidean norm (e.g., 1.0).

    In problem 3, there is no freedom to re-scale if damp is
    nonzero.  However, the value of damp should be assigned only
    after attention has been paid to the scaling of A.

    The parameter damp is intended to help regularize
    ill-conditioned systems, by preventing the true solution from
    being very large.  Another aid to regularization is provided by
    the parameter acond, which may be used to terminate iterations
    before the computed solution becomes very large.

    If some initial estimate ``x0`` is known and if ``damp == 0``,
    one could proceed as follows:

      1. Compute a residual vector ``r0 = b - A*x0``.
      2. Use LSQR to solve the system  ``A*dx = r0``.
      3. Add the correction dx to obtain a final solution ``x = x0 + dx``.

    This requires that ``x0`` be available before and after the call
    to LSQR.  To judge the benefits, suppose LSQR takes k1 iterations
    to solve A*x = b and k2 iterations to solve A*dx = r0.
    If x0 is "good", norm(r0) will be smaller than norm(b).
    If the same stopping tolerances atol and btol are used for each
    system, k1 and k2 will be similar, but the final solution x0 + dx
    should be more accurate.  The only way to reduce the total work
    is to use a larger stopping tolerance for the second system.
    If some value btol is suitable for A*x = b, the larger value
    btol*norm(b)/norm(r0)  should be suitable for A*dx = r0.

    Preconditioning is another way to reduce the number of iterations.
    If it is possible to solve a related system ``M*x = b``
    efficiently, where M approximates A in some helpful way (e.g. M -
    A has low rank or its elements are small relative to those of A),
    LSQR may converge more rapidly on the system ``A*M(inverse)*z =
    b``, after which x can be recovered by solving M*x = z.

    If A is symmetric, LSQR should not be used!

    Alternatives are the symmetric conjugate-gradient method (cg)
    and/or SYMMLQ.  SYMMLQ is an implementation of symmetric cg that
    applies to any symmetric A and will converge more rapidly than
    LSQR.  If A is positive definite, there are other implementations
    of symmetric cg that require slightly less work per iteration than
    SYMMLQ (but will take the same number of iterations).

    References
    ----------
    .. [1] C. C. Paige and M. A. Saunders (1982a).
           "LSQR: An algorithm for sparse linear equations and
           sparse least squares", ACM TOMS 8(1), 43-71.
    .. [2] C. C. Paige and M. A. Saunders (1982b).
           "Algorithm 583.  LSQR: Sparse linear equations and least
           squares problems", ACM TOMS 8(2), 195-209.
    .. [3] M. A. Saunders (1995).  "Solution of sparse rectangular
           systems using LSQR and CRAIG", BIT 35, 588-604.

    """
    A = aslinearoperator(A)
    if len(b.shape) > 1:
        b = b.squeeze()

    m, n = A.shape
    if iter_lim is None:
        iter_lim = 2 * n
    var = np.zeros(n)

    msg = ('The exact solution is  x = 0                              ',
         'Ax - b is small enough, given atol, btol                  ',
         'The least-squares solution is good enough, given atol     ',
         'The estimate of cond(Abar) has exceeded conlim            ',
         'Ax - b is small enough for this machine                   ',
         'The least-squares solution is good enough for this machine',
         'Cond(Abar) seems to be too large for this machine         ',
         'The iteration limit has been reached                      ')

    if show:
        print(' ')
        print('LSQR            Least-squares solution of  Ax = b')
        str1 = 'The matrix A has %8g rows  and %8g cols' % (m, n)
        str2 = 'damp = %20.14e   calc_var = %8g' % (damp, calc_var)
        str3 = 'atol = %8.2e                 conlim = %8.2e' % (atol, conlim)
        str4 = 'btol = %8.2e               iter_lim = %8g' % (btol, iter_lim)
        print(str1)
        print(str2)
        print(str3)
        print(str4)

    itn = 0
    istop = 0
    ctol = 0
    if conlim > 0:
        ctol = 1/conlim
    anorm = 0
    acond = 0
    dampsq = damp**2
    ddnorm = 0
    res2 = 0
    xnorm = 0
    xxnorm = 0
    z = 0
    cs2 = -1
    sn2 = 0

    """
    Set up the first vectors u and v for the bidiagonalization.
    These satisfy  beta*u = b,  alfa*v = A'u.
    """
    v = np.zeros(n)
    u = b
    x = np.zeros(n)
    alfa = 0
    beta = np.linalg.norm(u)
    w = np.zeros(n)

    if beta > 0:
        u = (1/beta) * u
        v = A.rmatvec(u)
        alfa = np.linalg.norm(v)

    if alfa > 0:
        v = (1/alfa) * v
        w = v.copy()

    rhobar = alfa
    phibar = beta
    bnorm = beta
    rnorm = beta
    r1norm = rnorm
    r2norm = rnorm

    # Reverse the order here from the original matlab code because
    # there was an error on return when arnorm==0
    arnorm = alfa * beta
    if arnorm == 0:
        print(msg[0])
        return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var

    head1 = '   Itn      x[0]       r1norm     r2norm '
    head2 = ' Compatible    LS      Norm A   Cond A'

    if show:
        print(' ')
        print(head1, head2)
        test1 = 1
        test2 = alfa / beta
        str1 = '%6g %12.5e' % (itn, x[0])
        str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
        str3 = '  %8.1e %8.1e' % (test1, test2)
        print(str1, str2, str3)

    # Main iteration loop.
    while itn < iter_lim:
        itn = itn + 1
        """
        %     Perform the next step of the bidiagonalization to obtain the
        %     next  beta, u, alfa, v.  These satisfy the relations
        %                beta*u  =  a*v   -  alfa*u,
        %                alfa*v  =  A'*u  -  beta*v.
        """
        u = A.matvec(v) - alfa * u
        beta = np.linalg.norm(u)

        if beta > 0:
            u = (1/beta) * u
            anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2)
            v = A.rmatvec(u) - beta * v
            alfa = np.linalg.norm(v)
            if alfa > 0:
                v = (1 / alfa) * v

        # Use a plane rotation to eliminate the damping parameter.
        # This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
        rhobar1 = sqrt(rhobar**2 + damp**2)
        cs1 = rhobar / rhobar1
        sn1 = damp / rhobar1
        psi = sn1 * phibar
        phibar = cs1 * phibar

        # Use a plane rotation to eliminate the subdiagonal element (beta)
        # of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
        cs, sn, rho = _sym_ortho(rhobar1, beta)

        theta = sn * alfa
        rhobar = -cs * alfa
        phi = cs * phibar
        phibar = sn * phibar
        tau = sn * phi

        # Update x and w.
        t1 = phi / rho
        t2 = -theta / rho
        dk = (1 / rho) * w

        x = x + t1 * w
        w = v + t2 * w
        ddnorm = ddnorm + np.linalg.norm(dk)**2

        if calc_var:
            var = var + dk**2

        # Use a plane rotation on the right to eliminate the
        # super-diagonal element (theta) of the upper-bidiagonal matrix.
        # Then use the result to estimate norm(x).
        delta = sn2 * rho
        gambar = -cs2 * rho
        rhs = phi - delta * z
        zbar = rhs / gambar
        xnorm = sqrt(xxnorm + zbar**2)
        gamma = sqrt(gambar**2 + theta**2)
        cs2 = gambar / gamma
        sn2 = theta / gamma
        z = rhs / gamma
        xxnorm = xxnorm + z**2

        # Test for convergence.
        # First, estimate the condition of the matrix  Abar,
        # and the norms of  rbar  and  Abar'rbar.
        acond = anorm * sqrt(ddnorm)
        res1 = phibar**2
        res2 = res2 + psi**2
        rnorm = sqrt(res1 + res2)
        arnorm = alfa * abs(tau)

        # Distinguish between
        #    r1norm = ||b - Ax|| and
        #    r2norm = rnorm in current code
        #           = sqrt(r1norm^2 + damp^2*||x||^2).
        #    Estimate r1norm from
        #    r1norm = sqrt(r2norm^2 - damp^2*||x||^2).
        # Although there is cancellation, it might be accurate enough.
        r1sq = rnorm**2 - dampsq * xxnorm
        r1norm = sqrt(abs(r1sq))
        if r1sq < 0:
            r1norm = -r1norm
        r2norm = rnorm

        # Now use these norms to estimate certain other quantities,
        # some of which will be small near a solution.
        test1 = rnorm / bnorm
        test2 = arnorm / (anorm * rnorm + eps)
        test3 = 1 / (acond + eps)
        t1 = test1 / (1 + anorm * xnorm / bnorm)
        rtol = btol + atol * anorm * xnorm / bnorm

        # The following tests guard against extremely small values of
        # atol, btol  or  ctol.  (The user may have set any or all of
        # the parameters  atol, btol, conlim  to 0.)
        # The effect is equivalent to the normal tests using
        # atol = eps,  btol = eps,  conlim = 1/eps.
        if itn >= iter_lim:
            istop = 7
        if 1 + test3 <= 1:
            istop = 6
        if 1 + test2 <= 1:
            istop = 5
        if 1 + t1 <= 1:
            istop = 4

        # Allow for tolerances set by the user.
        if test3 <= ctol:
            istop = 3
        if test2 <= atol:
            istop = 2
        if test1 <= rtol:
            istop = 1

        # See if it is time to print something.
        prnt = False
        if n <= 40:
            prnt = True
        if itn <= 10:
            prnt = True
        if itn >= iter_lim-10:
            prnt = True
        # if itn%10 == 0: prnt = True
        if test3 <= 2*ctol:
            prnt = True
        if test2 <= 10*atol:
            prnt = True
        if test1 <= 10*rtol:
            prnt = True
        if istop != 0:
            prnt = True

        if prnt:
            if show:
                str1 = '%6g %12.5e' % (itn, x[0])
                str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
                str3 = '  %8.1e %8.1e' % (test1, test2)
                str4 = ' %8.1e %8.1e' % (anorm, acond)
                print(str1, str2, str3, str4)

        if istop != 0:
            break

    # End of iteration loop.
    # Print the stopping condition.
    if show:
        print(' ')
        print('LSQR finished')
        print(msg[istop])
        print(' ')
        str1 = 'istop =%8g   r1norm =%8.1e' % (istop, r1norm)
        str2 = 'anorm =%8.1e   arnorm =%8.1e' % (anorm, arnorm)
        str3 = 'itn   =%8g   r2norm =%8.1e' % (itn, r2norm)
        str4 = 'acond =%8.1e   xnorm  =%8.1e' % (acond, xnorm)
        print(str1 + '   ' + str2)
        print(str3 + '   ' + str4)
        print(' ')

    return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
Example #37
0
def lsmr(A,
         b,
         damp=0.0,
         atol=1e-6,
         btol=1e-6,
         conlim=1e8,
         maxiter=None,
         show=False,
         x0=None):
    """Iterative solver for least-squares problems.

    lsmr solves the system of linear equations ``Ax = b``. If the system
    is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
    ``A`` is a rectangular matrix of dimension m-by-n, where all cases are
    allowed: m = n, m > n, or m < n. ``b`` is a vector of length m.
    The matrix A may be dense or sparse (usually sparse).

    Parameters
    ----------
    A : {matrix, sparse matrix, ndarray, LinearOperator}
        Matrix A in the linear system.
        Alternatively, ``A`` can be a linear operator which can
        produce ``Ax`` and ``A^H x`` using, e.g.,
        ``scipy.sparse.linalg.LinearOperator``.
    b : array_like, shape (m,)
        Vector ``b`` in the linear system.
    damp : float
        Damping factor for regularized least-squares. `lsmr` solves
        the regularized least-squares problem::

         min ||(b) - (  A   )x||
             ||(0)   (damp*I) ||_2

        where damp is a scalar.  If damp is None or 0, the system
        is solved without regularization.
    atol, btol : float, optional
        Stopping tolerances. `lsmr` continues iterations until a
        certain backward error estimate is smaller than some quantity
        depending on atol and btol.  Let ``r = b - Ax`` be the
        residual vector for the current approximate solution ``x``.
        If ``Ax = b`` seems to be consistent, ``lsmr`` terminates
        when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
        Otherwise, lsmr terminates when ``norm(A^H r) <=
        atol * norm(A) * norm(r)``.  If both tolerances are 1.0e-6 (say),
        the final ``norm(r)`` should be accurate to about 6
        digits. (The final ``x`` will usually have fewer correct digits,
        depending on ``cond(A)`` and the size of LAMBDA.)  If `atol`
        or `btol` is None, a default value of 1.0e-6 will be used.
        Ideally, they should be estimates of the relative error in the
        entries of ``A`` and ``b`` respectively.  For example, if the entries
        of ``A`` have 7 correct digits, set ``atol = 1e-7``. This prevents
        the algorithm from doing unnecessary work beyond the
        uncertainty of the input area_data.
    conlim : float, optional
        `lsmr` terminates if an estimate of ``cond(A)`` exceeds
        `conlim`.  For compatible systems ``Ax = b``, conlim could be
        as large as 1.0e+12 (say).  For least-squares problems,
        `conlim` should be less than 1.0e+8. If `conlim` is None, the
        default value is 1e+8.  Maximum precision can be obtained by
        setting ``atol = btol = conlim = 0``, but the number of
        iterations may then be excessive.
    maxiter : int, optional
        `lsmr` terminates if the number of iterations reaches
        `maxiter`.  The default is ``maxiter = min(m, n)``.  For
        ill-conditioned systems, a larger value of `maxiter` may be
        needed.
    show : bool, optional
        Print iterations logs if ``show=True``.
    x0 : array_like, shape (n,), optional
        Initial guess of ``x``, if None zeros are used.

        .. versionadded:: 1.0.0
        
    Returns
    -------
    x : ndarray of float
        Least-square solution returned.
    istop : int
        istop gives the reason for stopping::

          istop   = 0 means x=0 is a solution.  If x0 was given, then x=x0 is a
                      solution.
                  = 1 means x is an approximate solution to A*x = B,
                      according to atol and btol.
                  = 2 means x approximately solves the least-squares problem
                      according to atol.
                  = 3 means COND(A) seems to be greater than CONLIM.
                  = 4 is the same as 1 with atol = btol = eps (machine
                      precision)
                  = 5 is the same as 2 with atol = eps.
                  = 6 is the same as 3 with CONLIM = 1/eps.
                  = 7 means ITN reached maxiter before the other stopping
                      conditions were satisfied.

    itn : int
        Number of iterations used.
    normr : float
        ``norm(b-Ax)``
    normar : float
        ``norm(A^H (b - Ax))``
    norma : float
        ``norm(A)``
    conda : float
        Condition number of A.
    normx : float
        ``norm(x)``

    Notes
    -----

    .. versionadded:: 0.11.0

    References
    ----------
    .. [1] D. C.-L. Fong and M. A. Saunders,
           "LSMR: An iterative algorithm for sparse least-squares problems",
           SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
           :arxiv:`1006.0758`
    .. [2] LSMR Software, https://web.stanford.edu/group/SOL/software/lsmr/

    Examples
    --------
    >>> from scipy.sparse import csc_matrix
    >>> from scipy.sparse.linalg import lsmr
    >>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)

    The first example has the trivial solution `[0, 0]`

    >>> b = np.array([0., 0., 0.], dtype=float)
    >>> x, istop, itn, normr = lsmr(A, b)[:4]
    >>> istop
    0
    >>> x
    array([ 0.,  0.])

    The stopping code `istop=0` returned indicates that a vector of zeros was
    found as a solution. The returned solution `x` indeed contains `[0., 0.]`.
    The next example has a non-trivial solution:

    >>> b = np.array([1., 0., -1.], dtype=float)
    >>> x, istop, itn, normr = lsmr(A, b)[:4]
    >>> istop
    1
    >>> x
    array([ 1., -1.])
    >>> itn
    1
    >>> normr
    4.440892098500627e-16

    As indicated by `istop=1`, `lsmr` found a solution obeying the tolerance
    limits. The given solution `[1., -1.]` obviously solves the equation. The
    remaining return values include information about the number of iterations
    (`itn=1`) and the remaining difference of left and right side of the solved
    equation.
    The final example demonstrates the behavior in the case where there is no
    solution for the equation:

    >>> b = np.array([1., 0.01, -1.], dtype=float)
    >>> x, istop, itn, normr = lsmr(A, b)[:4]
    >>> istop
    2
    >>> x
    array([ 1.00333333, -0.99666667])
    >>> A.dot(x)-b
    array([ 0.00333333, -0.00333333,  0.00333333])
    >>> normr
    0.005773502691896255

    `istop` indicates that the system is inconsistent and thus `x` is rather an
    approximate solution to the corresponding least-squares problem. `normr`
    contains the minimal distance that was found.
    """

    A = aslinearoperator(A)
    b = atleast_1d(b)
    if b.ndim > 1:
        b = b.squeeze()

    msg = ('The exact solution is x = 0, or x = x0, if x0 was given  ',
           'Ax - b is small enough, given atol, btol                  ',
           'The least-squares solution is good enough, given atol     ',
           'The estimate of cond(Abar) has exceeded conlim            ',
           'Ax - b is small enough for this machine                   ',
           'The least-squares solution is good enough for this machine',
           'Cond(Abar) seems to be too large for this machine         ',
           'The iteration limit has been reached                      ')

    hdg1 = '   itn      x(1)       norm r    norm Ar'
    hdg2 = ' compatible   LS      norm A   cond A'
    pfreq = 20  # print frequency (for repeating the heading)
    pcount = 0  # print counter

    m, n = A.shape

    # stores the num of singular values
    minDim = min([m, n])

    if maxiter is None:
        maxiter = minDim

    if x0 is None:
        dtype = result_type(A, b, float)
    else:
        dtype = result_type(A, b, x0, float)

    if show:
        print(' ')
        print('LSMR            Least-squares solution of  Ax = b\n')
        print(f'The matrix A has {m} rows and {n} columns')
        print('damp = %20.14e\n' % (damp))
        print('atol = %8.2e                 conlim = %8.2e\n' % (atol, conlim))
        print('btol = %8.2e             maxiter = %8g\n' % (btol, maxiter))

    u = b
    normb = norm(b)
    if x0 is None:
        x = zeros(n, dtype)
        beta = normb.copy()
    else:
        x = atleast_1d(x0)
        u = u - A.matvec(x)
        beta = norm(u)

    if beta > 0:
        u = (1 / beta) * u
        v = A.rmatvec(u)
        alpha = norm(v)
    else:
        v = zeros(n, dtype)
        alpha = 0

    if alpha > 0:
        v = (1 / alpha) * v

    # Initialize variables for 1st iteration.

    itn = 0
    zetabar = alpha * beta
    alphabar = alpha
    rho = 1
    rhobar = 1
    cbar = 1
    sbar = 0

    h = v.copy()
    hbar = zeros(n, dtype)

    # Initialize variables for estimation of ||r||.

    betadd = beta
    betad = 0
    rhodold = 1
    tautildeold = 0
    thetatilde = 0
    zeta = 0
    d = 0

    # Initialize variables for estimation of ||A|| and cond(A)

    normA2 = alpha * alpha
    maxrbar = 0
    minrbar = 1e+100
    normA = sqrt(normA2)
    condA = 1
    normx = 0

    # Items for use in stopping rules, normb set earlier
    istop = 0
    ctol = 0
    if conlim > 0:
        ctol = 1 / conlim
    normr = beta

    # Reverse the order here from the original matlab code because
    # there was an error on return when arnorm==0
    normar = alpha * beta
    if normar == 0:
        if show:
            print(msg[0])
        return x, istop, itn, normr, normar, normA, condA, normx

    if show:
        print(' ')
        print(hdg1, hdg2)
        test1 = 1
        test2 = alpha / beta
        str1 = '%6g %12.5e' % (itn, x[0])
        str2 = ' %10.3e %10.3e' % (normr, normar)
        str3 = '  %8.1e %8.1e' % (test1, test2)
        print(''.join([str1, str2, str3]))

    # Main iteration loop.
    while itn < maxiter:
        itn = itn + 1

        # Perform the next step of the bidiagonalization to obtain the
        # next  beta, u, alpha, v.  These satisfy the relations
        #         beta*u  =  a*v   -  alpha*u,
        #        alpha*v  =  A'*u  -  beta*v.

        u *= -alpha
        u += A.matvec(v)
        beta = norm(u)

        if beta > 0:
            u *= (1 / beta)
            v *= -beta
            v += A.rmatvec(u)
            alpha = norm(v)
            if alpha > 0:
                v *= (1 / alpha)

        # At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.

        # Construct rotation Qhat_{k,2k+1}.

        chat, shat, alphahat = _sym_ortho(alphabar, damp)

        # Use a plane rotation (Q_i) to turn B_i to R_i

        rhoold = rho
        c, s, rho = _sym_ortho(alphahat, beta)
        thetanew = s * alpha
        alphabar = c * alpha

        # Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar

        rhobarold = rhobar
        zetaold = zeta
        thetabar = sbar * rho
        rhotemp = cbar * rho
        cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)
        zeta = cbar * zetabar
        zetabar = -sbar * zetabar

        # Update h, h_hat, x.

        hbar *= -(thetabar * rho / (rhoold * rhobarold))
        hbar += h
        x += (zeta / (rho * rhobar)) * hbar
        h *= -(thetanew / rho)
        h += v

        # Estimate of ||r||.

        # Apply rotation Qhat_{k,2k+1}.
        betaacute = chat * betadd
        betacheck = -shat * betadd

        # Apply rotation Q_{k,k+1}.
        betahat = c * betaacute
        betadd = -s * betaacute

        # Apply rotation Qtilde_{k-1}.
        # betad = betad_{k-1} here.

        thetatildeold = thetatilde
        ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)
        thetatilde = stildeold * rhobar
        rhodold = ctildeold * rhobar
        betad = -stildeold * betad + ctildeold * betahat

        # betad   = betad_k here.
        # rhodold = rhod_k  here.

        tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
        taud = (zeta - thetatilde * tautildeold) / rhodold
        d = d + betacheck * betacheck
        normr = sqrt(d + (betad - taud)**2 + betadd * betadd)

        # Estimate ||A||.
        normA2 = normA2 + beta * beta
        normA = sqrt(normA2)
        normA2 = normA2 + alpha * alpha

        # Estimate cond(A).
        maxrbar = max(maxrbar, rhobarold)
        if itn > 1:
            minrbar = min(minrbar, rhobarold)
        condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)

        # Test for convergence.

        # Compute norms for convergence testing.
        normar = abs(zetabar)
        normx = norm(x)

        # Now use these norms to estimate certain other quantities,
        # some of which will be small near a solution.

        test1 = normr / normb
        if (normA * normr) != 0:
            test2 = normar / (normA * normr)
        else:
            test2 = infty
        test3 = 1 / condA
        t1 = test1 / (1 + normA * normx / normb)
        rtol = btol + atol * normA * normx / normb

        # The following tests guard against extremely small values of
        # atol, btol or ctol.  (The user may have set any or all of
        # the parameters atol, btol, conlim  to 0.)
        # The effect is equivalent to the normAl tests using
        # atol = eps,  btol = eps,  conlim = 1/eps.

        if itn >= maxiter:
            istop = 7
        if 1 + test3 <= 1:
            istop = 6
        if 1 + test2 <= 1:
            istop = 5
        if 1 + t1 <= 1:
            istop = 4

        # Allow for tolerances set by the user.

        if test3 <= ctol:
            istop = 3
        if test2 <= atol:
            istop = 2
        if test1 <= rtol:
            istop = 1

        # See if it is time to print something.

        if show:
            if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \
               (itn % 10 == 0) or (test3 <= 1.1 * ctol) or \
               (test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \
               (istop != 0):

                if pcount >= pfreq:
                    pcount = 0
                    print(' ')
                    print(hdg1, hdg2)
                pcount = pcount + 1
                str1 = '%6g %12.5e' % (itn, x[0])
                str2 = ' %10.3e %10.3e' % (normr, normar)
                str3 = '  %8.1e %8.1e' % (test1, test2)
                str4 = ' %8.1e %8.1e' % (normA, condA)
                print(''.join([str1, str2, str3, str4]))

        if istop > 0:
            break

    # Print the stopping condition.

    if show:
        print(' ')
        print('LSMR finished')
        print(msg[istop])
        print('istop =%8g    normr =%8.1e' % (istop, normr))
        print('    normA =%8.1e    normAr =%8.1e' % (normA, normar))
        print('itn   =%8g    condA =%8.1e' % (itn, condA))
        print('    normx =%8.1e' % (normx))
        print(str1, str2)
        print(str3, str4)

    return x, istop, itn, normr, normar, normA, condA, normx
Example #38
0
def _aslinearoperator_with_dtype(m):
    m = aslinearoperator(m)
    if not hasattr(m, 'dtype'):
        x = np.zeros(m.shape[1])
        m.dtype = (m*x).dtype
    return m
Example #39
0
def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
         maxiter=None, show=False):
    """Iterative solver for least-squares problems.

    lsmr solves the system of linear equations ``Ax = b``. If the system
    is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
    A is a rectangular matrix of dimension m-by-n, where all cases are
    allowed: m = n, m > n, or m < n. B is a vector of length m.
    The matrix A may be dense or sparse (usually sparse).

    .. versionadded:: 0.11.0

    Parameters
    ----------
    A : {matrix, sparse matrix, ndarray, LinearOperator}
        Matrix A in the linear system.
    b : (m,) ndarray
        Vector b in the linear system.
    damp : float
        Damping factor for regularized least-squares. `lsmr` solves
        the regularized least-squares problem::

         min ||(b) - (  A   )x||
             ||(0)   (damp*I) ||_2

        where damp is a scalar.  If damp is None or 0, the system
        is solved without regularization.
    atol, btol : float
        Stopping tolerances. `lsmr` continues iterations until a
        certain backward error estimate is smaller than some quantity
        depending on atol and btol.  Let ``r = b - Ax`` be the
        residual vector for the current approximate solution ``x``.
        If ``Ax = b`` seems to be consistent, ``lsmr`` terminates
        when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
        Otherwise, lsmr terminates when ``norm(A^{T} r) <=
        atol * norm(A) * norm(r)``.  If both tolerances are 1.0e-6 (say),
        the final ``norm(r)`` should be accurate to about 6
        digits. (The final x will usually have fewer correct digits,
        depending on ``cond(A)`` and the size of LAMBDA.)  If `atol`
        or `btol` is None, a default value of 1.0e-6 will be used.
        Ideally, they should be estimates of the relative error in the
        entries of A and B respectively.  For example, if the entries
        of `A` have 7 correct digits, set atol = 1e-7. This prevents
        the algorithm from doing unnecessary work beyond the
        uncertainty of the input data.
    conlim : float
        `lsmr` terminates if an estimate of ``cond(A)`` exceeds
        `conlim`.  For compatible systems ``Ax = b``, conlim could be
        as large as 1.0e+12 (say).  For least-squares problems,
        `conlim` should be less than 1.0e+8. If `conlim` is None, the
        default value is 1e+8.  Maximum precision can be obtained by
        setting ``atol = btol = conlim = 0``, but the number of
        iterations may then be excessive.
    maxiter : int
        `lsmr` terminates if the number of iterations reaches
        `maxiter`.  The default is ``maxiter = min(m, n)``.  For
        ill-conditioned systems, a larger value of `maxiter` may be
        needed.
    show : bool
        Print iterations logs if ``show=True``.

    Returns
    -------
    x : ndarray of float
        Least-square solution returned.
    istop : int
        istop gives the reason for stopping::

          istop   = 0 means x=0 is a solution.
                  = 1 means x is an approximate solution to A*x = B,
                      according to atol and btol.
                  = 2 means x approximately solves the least-squares problem
                      according to atol.
                  = 3 means COND(A) seems to be greater than CONLIM.
                  = 4 is the same as 1 with atol = btol = eps (machine
                      precision)
                  = 5 is the same as 2 with atol = eps.
                  = 6 is the same as 3 with CONLIM = 1/eps.
                  = 7 means ITN reached maxiter before the other stopping
                      conditions were satisfied.

    itn : int
        Number of iterations used.
    normr : float
        ``norm(b-Ax)``
    normar : float
        ``norm(A^T (b - Ax))``
    norma : float
        ``norm(A)``
    conda : float
        Condition number of A.
    normx : float
        ``norm(x)``

    References
    ----------
    .. [1] D. C.-L. Fong and M. A. Saunders,
           "LSMR: An iterative algorithm for sparse least-squares problems",
           SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
           http://arxiv.org/abs/1006.0758
    .. [2] LSMR Software, http://www.stanford.edu/~clfong/lsmr.html

    """

    A = aslinearoperator(A)
    b = b.squeeze()

    msg = ('The exact solution is  x = 0                              ',
         'Ax - b is small enough, given atol, btol                  ',
         'The least-squares solution is good enough, given atol     ',
         'The estimate of cond(Abar) has exceeded conlim            ',
         'Ax - b is small enough for this machine                   ',
         'The least-squares solution is good enough for this machine',
         'Cond(Abar) seems to be too large for this machine         ',
         'The iteration limit has been reached                      ')

    hdg1 = '   itn      x(1)       norm r    norm A''r'
    hdg2 = ' compatible   LS      norm A   cond A'
    pfreq = 20   # print frequency (for repeating the heading)
    pcount = 0   # print counter

    m, n = A.shape

    # stores the num of singular values
    minDim = min([m, n])

    if maxiter is None:
        maxiter = minDim

    if show:
        print(' ')
        print('LSMR            Least-squares solution of  Ax = b\n')
        print('The matrix A has %8g rows  and %8g cols' % (m, n))
        print('damp = %20.14e\n' % (damp))
        print('atol = %8.2e                 conlim = %8.2e\n' % (atol, conlim))
        print('btol = %8.2e             maxiter = %8g\n' % (btol, maxiter))

    u = b
    beta = norm(u)

    v = zeros(n)
    alpha = 0

    if beta > 0:
        u = (1 / beta) * u
        v = A.rmatvec(u)
        alpha = norm(v)

    if alpha > 0:
        v = (1 / alpha) * v

    # Initialize variables for 1st iteration.

    itn = 0
    zetabar = alpha * beta
    alphabar = alpha
    rho = 1
    rhobar = 1
    cbar = 1
    sbar = 0

    h = v.copy()
    hbar = zeros(n)
    x = zeros(n)

    # Initialize variables for estimation of ||r||.

    betadd = beta
    betad = 0
    rhodold = 1
    tautildeold = 0
    thetatilde = 0
    zeta = 0
    d = 0

    # Initialize variables for estimation of ||A|| and cond(A)

    normA2 = alpha * alpha
    maxrbar = 0
    minrbar = 1e+100
    normA = sqrt(normA2)
    condA = 1
    normx = 0

    # Items for use in stopping rules.
    normb = beta
    istop = 0
    ctol = 0
    if conlim > 0:
        ctol = 1 / conlim
    normr = beta

    # Reverse the order here from the original matlab code because
    # there was an error on return when arnorm==0
    normar = alpha * beta
    if normar == 0:
        if show:
            print(msg[0])
        return x, istop, itn, normr, normar, normA, condA, normx

    if show:
        print(' ')
        print(hdg1, hdg2)
        test1 = 1
        test2 = alpha / beta
        str1 = '%6g %12.5e' % (itn, x[0])
        str2 = ' %10.3e %10.3e' % (normr, normar)
        str3 = '  %8.1e %8.1e' % (test1, test2)
        print(''.join([str1, str2, str3]))

    # Main iteration loop.
    while itn < maxiter:
        itn = itn + 1

        # Perform the next step of the bidiagonalization to obtain the
        # next  beta, u, alpha, v.  These satisfy the relations
        #         beta*u  =  a*v   -  alpha*u,
        #        alpha*v  =  A'*u  -  beta*v.

        u = A.matvec(v) - alpha * u
        beta = norm(u)

        if beta > 0:
            u = (1 / beta) * u
            v = A.rmatvec(u) - beta * v
            alpha = norm(v)
            if alpha > 0:
                v = (1 / alpha) * v

        # At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.

        # Construct rotation Qhat_{k,2k+1}.

        chat, shat, alphahat = _sym_ortho(alphabar, damp)

        # Use a plane rotation (Q_i) to turn B_i to R_i

        rhoold = rho
        c, s, rho = _sym_ortho(alphahat, beta)
        thetanew = s*alpha
        alphabar = c*alpha

        # Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar

        rhobarold = rhobar
        zetaold = zeta
        thetabar = sbar * rho
        rhotemp = cbar * rho
        cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)
        zeta = cbar * zetabar
        zetabar = - sbar * zetabar

        # Update h, h_hat, x.

        hbar = h - (thetabar * rho / (rhoold * rhobarold)) * hbar
        x = x + (zeta / (rho * rhobar)) * hbar
        h = v - (thetanew / rho) * h

        # Estimate of ||r||.

        # Apply rotation Qhat_{k,2k+1}.
        betaacute = chat * betadd
        betacheck = -shat * betadd

        # Apply rotation Q_{k,k+1}.
        betahat = c * betaacute
        betadd = -s * betaacute

        # Apply rotation Qtilde_{k-1}.
        # betad = betad_{k-1} here.

        thetatildeold = thetatilde
        ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)
        thetatilde = stildeold * rhobar
        rhodold = ctildeold * rhobar
        betad = - stildeold * betad + ctildeold * betahat

        # betad   = betad_k here.
        # rhodold = rhod_k  here.

        tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
        taud = (zeta - thetatilde * tautildeold) / rhodold
        d = d + betacheck * betacheck
        normr = sqrt(d + (betad - taud)**2 + betadd * betadd)

        # Estimate ||A||.
        normA2 = normA2 + beta * beta
        normA = sqrt(normA2)
        normA2 = normA2 + alpha * alpha

        # Estimate cond(A).
        maxrbar = max(maxrbar, rhobarold)
        if itn > 1:
            minrbar = min(minrbar, rhobarold)
        condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)

        # Test for convergence.

        # Compute norms for convergence testing.
        normar = abs(zetabar)
        normx = norm(x)

        # Now use these norms to estimate certain other quantities,
        # some of which will be small near a solution.

        test1 = normr / normb
        if (normA * normr) != 0:
            test2 = normar / (normA * normr)
        else:
            test2 = infty
        test3 = 1 / condA
        t1 = test1 / (1 + normA * normx / normb)
        rtol = btol + atol * normA * normx / normb

        # The following tests guard against extremely small values of
        # atol, btol or ctol.  (The user may have set any or all of
        # the parameters atol, btol, conlim  to 0.)
        # The effect is equivalent to the normAl tests using
        # atol = eps,  btol = eps,  conlim = 1/eps.

        if itn >= maxiter:
            istop = 7
        if 1 + test3 <= 1:
            istop = 6
        if 1 + test2 <= 1:
            istop = 5
        if 1 + t1 <= 1:
            istop = 4

        # Allow for tolerances set by the user.

        if test3 <= ctol:
            istop = 3
        if test2 <= atol:
            istop = 2
        if test1 <= rtol:
            istop = 1

        # See if it is time to print something.

        if show:
            if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \
               (itn % 10 == 0) or (test3 <= 1.1 * ctol) or \
               (test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \
               (istop != 0):

                if pcount >= pfreq:
                    pcount = 0
                    print(' ')
                    print(hdg1, hdg2)
                pcount = pcount + 1
                str1 = '%6g %12.5e' % (itn, x[0])
                str2 = ' %10.3e %10.3e' % (normr, normar)
                str3 = '  %8.1e %8.1e' % (test1, test2)
                str4 = ' %8.1e %8.1e' % (normA, condA)
                print(''.join([str1, str2, str3, str4]))

        if istop > 0:
            break

    # Print the stopping condition.

    if show:
        print(' ')
        print('LSMR finished')
        print(msg[istop])
        print('istop =%8g    normr =%8.1e' % (istop, normr))
        print('    normA =%8.1e    normAr =%8.1e' % (normA, normar))
        print('itn   =%8g    condA =%8.1e' % (itn, condA))
        print('    normx =%8.1e' % (normx))
        print(str1, str2)
        print(str3, str4)

    return x, istop, itn, normr, normar, normA, condA, normx
Example #40
0
    def setup_method(self):
        self.cases = []

        def make_cases(original, dtype):
            cases = []

            cases.append((matrix(original, dtype=dtype), original))
            cases.append((np.array(original, dtype=dtype), original))
            cases.append((sparse.csr_matrix(original, dtype=dtype), original))

            # Test default implementations of _adjoint and _rmatvec, which
            # refer to each other.
            def mv(x, dtype):
                y = original.dot(x)
                if len(x.shape) == 2:
                    y = y.reshape(-1, 1)
                return y

            def rmv(x, dtype):
                return original.T.conj().dot(x)

            class BaseMatlike(interface.LinearOperator):
                args = ()

                def __init__(self, dtype):
                    self.dtype = np.dtype(dtype)
                    self.shape = original.shape

                def _matvec(self, x):
                    return mv(x, self.dtype)

            class HasRmatvec(BaseMatlike):
                args = ()

                def _rmatvec(self, x):
                    return rmv(x, self.dtype)

            class HasAdjoint(BaseMatlike):
                args = ()

                def _adjoint(self):
                    shape = self.shape[1], self.shape[0]
                    matvec = partial(rmv, dtype=self.dtype)
                    rmatvec = partial(mv, dtype=self.dtype)
                    return interface.LinearOperator(matvec=matvec,
                                                    rmatvec=rmatvec,
                                                    dtype=self.dtype,
                                                    shape=shape)

            class HasRmatmat(HasRmatvec):
                def _matmat(self, x):
                    return original.dot(x)

                def _rmatmat(self, x):
                    return original.T.conj().dot(x)

            cases.append((HasRmatvec(dtype), original))
            cases.append((HasAdjoint(dtype), original))
            cases.append((HasRmatmat(dtype), original))
            return cases

        original = np.array([[1, 2, 3], [4, 5, 6]])
        self.cases += make_cases(original, np.int32)
        self.cases += make_cases(original, np.float32)
        self.cases += make_cases(original, np.float64)
        self.cases += [(interface.aslinearoperator(M).T, A.T)
                       for M, A in make_cases(original.T, np.float64)]
        self.cases += [(interface.aslinearoperator(M).H, A.T.conj())
                       for M, A in make_cases(original.T, np.float64)]

        original = np.array([[1, 2j, 3j], [4j, 5j, 6]])
        self.cases += make_cases(original, np.complex_)
        self.cases += [(interface.aslinearoperator(M).T, A.T)
                       for M, A in make_cases(original.T, np.complex_)]
        self.cases += [(interface.aslinearoperator(M).H, A.T.conj())
                       for M, A in make_cases(original.T, np.complex_)]
Example #41
0
def make_system(A, M, x0, b, xtype=None):
    """Make a linear system Ax=b

    Parameters
    ----------
    A : LinearOperator
        sparse or dense matrix (or any valid input to aslinearoperator)
    M : {LinearOperator, Nones}
        preconditioner
        sparse or dense matrix (or any valid input to aslinearoperator)
    x0 : {array_like, None}
        initial guess to iterative method
    b : array_like
        right hand side
    xtype : {'f', 'd', 'F', 'D', None}
        dtype of the x vector

    Returns
    -------
    (A, M, x, b, postprocess)
        A : LinearOperator
            matrix of the linear system
        M : LinearOperator
            preconditioner
        x : rank 1 ndarray
            initial guess
        b : rank 1 ndarray
            right hand side
        postprocess : function
            converts the solution vector to the appropriate
            type and dimensions (e.g. (N,1) matrix)

    """
    A_ = A
    A = aslinearoperator(A)

    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix, but got shape=%s' %
                         (A.shape, ))

    N = A.shape[0]

    b = asanyarray(b)

    if not (b.shape == (N, 1) or b.shape == (N, )):
        raise ValueError('A and b have incompatible dimensions')

    if b.dtype.char not in 'fdFD':
        b = b.astype('d')  # upcast non-FP types to double

    def postprocess(x):
        if isinstance(b, matrix):
            x = asmatrix(x)
        return x.reshape(b.shape)

    if xtype is None:
        if hasattr(A, 'dtype'):
            xtype = A.dtype.char
        else:
            xtype = A.matvec(b).dtype.char
        xtype = coerce(xtype, b.dtype.char)
    else:
        warn(
            'Use of xtype argument is deprecated. '
            'Use LinearOperator( ... , dtype=xtype) instead.',
            DeprecationWarning)
        if xtype == 0:
            xtype = b.dtype.char
        else:
            if xtype not in 'fdFD':
                raise ValueError("xtype must be 'f', 'd', 'F', or 'D'")

    b = asarray(b, dtype=xtype)  # make b the same type as x
    b = b.ravel()

    if x0 is None:
        x = zeros(N, dtype=xtype)
    else:
        x = array(x0, dtype=xtype)
        if not (x.shape == (N, 1) or x.shape == (N, )):
            raise ValueError('A and x have incompatible dimensions')
        x = x.ravel()

    # process preconditioner
    if M is None:
        if hasattr(A_, 'psolve'):
            psolve = A_.psolve
        else:
            psolve = id
        if hasattr(A_, 'rpsolve'):
            rpsolve = A_.rpsolve
        else:
            rpsolve = id
        if psolve is id and rpsolve is id:
            M = IdentityOperator(shape=A.shape, dtype=A.dtype)
        else:
            M = LinearOperator(A.shape,
                               matvec=psolve,
                               rmatvec=rpsolve,
                               dtype=A.dtype)
    else:
        M = aslinearoperator(M)
        if A.shape != M.shape:
            raise ValueError('matrix and preconditioner have different shapes')

    return A, M, x, b, postprocess
Example #42
0
def cgnr(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
         callback=None, residuals=None):
    '''Conjugate Gradient, Normal Residual algorithm

    Applies CG to the normal equations, A.H A x = b. Left preconditioning
    is supported.  Note that unless A is well-conditioned, the use of
    CGNR is inadvisable

    Parameters
    ----------
    A : {array, matrix, sparse matrix, LinearOperator}
        n x n, linear system to solve
    b : {array, matrix}
        right hand side, shape is (n,) or (n,1)
    x0 : {array, matrix}
        initial guess, default is a vector of zeros
    tol : float
        relative convergence tolerance, i.e. tol is scaled by ||r_0||_2
    maxiter : int
        maximum number of allowed iterations
    xtype : type
        dtype for the solution, default is automatic type detection
    M : {array, matrix, sparse matrix, LinearOperator}
        n x n, inverted preconditioner, i.e. solve M A.H A x = b.
    callback : function
        User-supplied function is called after each iteration as
        callback(xk), where xk is the current solution vector
    residuals : list
        residuals has the residual norm history,
        including the initial residual, appended to it

    Returns
    -------
    (xNew, info)
    xNew : an updated guess to the solution of Ax = b
    info : halting status of cgnr

            ==  =======================================
            0   successful exit
            >0  convergence to tolerance not achieved,
                return iteration count instead.
            <0  numerical breakdown, or illegal input
            ==  =======================================


    Notes
    -----
    The LinearOperator class is in scipy.sparse.linalg.interface.
    Use this class if you prefer to define A or M as a mat-vec routine
    as opposed to explicitly constructing the matrix.  A.psolve(..) is
    still supported as a legacy.

    Examples
    --------
    >>> from pyamg.krylov.cgnr import cgnr
    >>> from pyamg.util.linalg import norm
    >>> import numpy as np
    >>> from pyamg.gallery import poisson
    >>> A = poisson((10,10))
    >>> b = np.ones((A.shape[0],))
    >>> (x,flag) = cgnr(A,b, maxiter=2, tol=1e-8)
    >>> print norm(b - A*x)
    9.3910201849

    References
    ----------
    .. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
       Second Edition", SIAM, pp. 276-7, 2003
       http://www-users.cs.umn.edu/~saad/books.html

    '''

    # Store the conjugate transpose explicitly as it will be used much later on
    if isspmatrix(A):
        AH = A.H
    else:
        # TODO avoid doing this since A may be a different sparse type
        AH = aslinearoperator(asmatrix(A).H)

    # Convert inputs to linear system, with error checking
    A, M, x, b, postprocess = make_system(A, M, x0, b)
    dimen = A.shape[0]

    # Ensure that warnings are always reissued from this function
    import warnings
    warnings.filterwarnings('always', module='pyamg\.krylov\._cgnr')

    # Choose type
    if not hasattr(A, 'dtype'):
        Atype = upcast(x.dtype, b.dtype)
    else:
        Atype = A.dtype
    if not hasattr(M, 'dtype'):
        Mtype = upcast(x.dtype, b.dtype)
    else:
        Mtype = M.dtype
    xtype = upcast(Atype, x.dtype, b.dtype, Mtype)

    # Should norm(r) be kept
    if residuals == []:
        keep_r = True
    else:
        keep_r = False

    # How often should r be recomputed
    recompute_r = 8

    # Check iteration numbers. CGNR suffers from loss of orthogonality quite
    # easily, so we arbitrarily let the method go up to 130% over the
    # theoretically necessary limit of maxiter=dimen
    if maxiter is None:
        maxiter = int(ceil(1.3*dimen)) + 2
    elif maxiter < 1:
        raise ValueError('Number of iterations must be positive')
    elif maxiter > (1.3*dimen):
        warn('maximum allowed inner iterations (maxiter) are the 130% times \
              the number of dofs')
        maxiter = int(ceil(1.3*dimen)) + 2

    # Prep for method
    r = b - A*x
    rhat = AH*r
    normr = norm(r)
    if keep_r:
        residuals.append(normr)

    # Check initial guess ( scaling by b, if b != 0,
    #   must account for case when norm(b) is very small)
    normb = norm(b)
    if normb == 0.0:
        normb = 1.0
    if normr < tol*normb:
        if callback is not None:
            callback(x)
        return (postprocess(x), 0)

    # Scale tol by ||r_0||_2
    if normr != 0.0:
        tol = tol*normr

    # Begin CGNR

    # Apply preconditioner and calculate initial search direction
    z = M*rhat
    p = z.copy()
    old_zr = inner(z.conjugate(), rhat)

    for iter in range(maxiter):

        # w_j = A p_j
        w = A*p

        # alpha = (z_j, rhat_j) / (w_j, w_j)
        alpha = old_zr / inner(w.conjugate(), w)

        # x_{j+1} = x_j + alpha*p_j
        x += alpha*p

        # r_{j+1} = r_j - alpha*w_j
        if mod(iter, recompute_r) and iter > 0:
            r -= alpha*w
        else:
            r = b - A*x

        # rhat_{j+1} = A.H*r_{j+1}
        rhat = AH*r

        # z_{j+1} = M*r_{j+1}
        z = M*rhat

        # beta = (z_{j+1}, rhat_{j+1}) / (z_j, rhat_j)
        new_zr = inner(z.conjugate(), rhat)
        beta = new_zr / old_zr
        old_zr = new_zr

        # p_{j+1} = A.H*z_{j+1} + beta*p_j
        p *= beta
        p += z

        # Allow user access to residual
        if callback is not None:
            callback(x)

        # test for convergence
        normr = norm(r)
        if keep_r:
            residuals.append(normr)
        if normr < tol:
            return (postprocess(x), 0)

    # end loop

    return (postprocess(x), iter+1)
Example #43
0
 def assertCompatibleSystem(self, A, xtrue):
     Afun = aslinearoperator(A)
     b = Afun.matvec(xtrue)
     x = lsmr(A,b)[0]
     assert_almost_equal(norm(x - xtrue), 0, 6)
Example #44
0
def eigen_symmetric(A, k=6, M=None, sigma=None, which='LM', v0=None,
                    ncv=None, maxiter=None, tol=0,
                    return_eigenvectors=True):
    """Find k eigenvalues and eigenvectors of the real symmetric
    square matrix A.

    Solves A * x[i] = w[i] * x[i], the standard eigenvalue problem for
    w[i] eigenvalues with corresponding eigenvectors x[i].


    Parameters
    ----------
    A : matrix or array with real entries or object with matvec(x) method
        An N x N real symmetric matrix or array or an object with matvec(x)
        method to perform the matrix vector product A * x.  The sparse
        matrix formats in scipy.sparse are appropriate for A.

    k : integer
        The number of eigenvalues and eigenvectors desired

    Returns
    -------
    w : array
        Array of k eigenvalues

    v : array
       An array of k eigenvectors
       The v[i] is the eigenvector corresponding to the eigenvector w[i]

    Other Parameters
    ----------------
    M : matrix or array
        (Not implemented)
        A symmetric positive-definite matrix for the generalized
        eigenvalue problem A * x = w * M * x


    sigma : real
        (Not implemented)
        Find eigenvalues near sigma.  Shift spectrum by sigma.

    v0 : array
        Starting vector for iteration.

    ncv : integer
        The number of Lanczos vectors generated
        ncv must be greater than k; it is recommended that ncv > 2*k

    which : string
        Which k eigenvectors and eigenvalues to find:
         - 'LA' : Largest (algebraic) eigenvalues
         - 'SA' : Smallest (algebraic) eigenvalues
         - 'LM' : Largest (in magnitude) eigenvalues
         - 'SM' : Smallest (in magnitude) eigenvalues
         - 'BE' : Half (k/2) from each end of the spectrum
                  When k is odd, return one more (k/2+1) from the high end

    maxiter : integer
        Maximum number of Arnoldi update iterations allowed

    tol : float
        Relative accuracy for eigenvalues (stopping criterion)

    return_eigenvectors : boolean
        Return eigenvectors (True) in addition to eigenvalues

    See Also
    --------
    eigen : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A

    Notes
    -----

    Examples
    --------
    """
    A = aslinearoperator(A)
    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix (shape=%s)' % shape)
    n = A.shape[0]

    # guess type
    typ = A.dtype.char
    if typ not in 'fd':
        raise ValueError("matrix must be real valued (type must be 'f' or 'd')")

    if M is not None:
        raise NotImplementedError("generalized eigenproblem not supported yet")
    if sigma is not None:
        raise NotImplementedError("shifted eigenproblem not supported yet")

    if ncv is None:
        ncv=2*k+1
    ncv=min(ncv,n)
    if maxiter==None:
        maxiter=n*10
    # assign starting vector
    if v0 is not None:
        resid=v0
        info=1
    else:
        resid = np.zeros(n,typ)
        info=0

    # some sanity checks
    if k <= 0:
        raise ValueError("k must be positive, k=%d"%k)
    if k == n:
        raise ValueError("k must be less than rank(A), k=%d"%k)
    if maxiter <= 0:
        raise ValueError("maxiter must be positive, maxiter=%d"%maxiter)
    whiches=['LM','SM','LA','SA','BE']
    if which not in whiches:
        raise ValueError("which must be one of %s"%' '.join(whiches))
    if ncv > n or ncv < k:
        raise ValueError("ncv must be k<=ncv<=n, ncv=%s"%ncv)

    # assign solver and postprocessor
    ltr = _type_conv[typ]
    eigsolver = _arpack.__dict__[ltr+'saupd']
    eigextract = _arpack.__dict__[ltr+'seupd']

    # set output arrays, parameters, and workspace
    v = np.zeros((n,ncv),typ)
    workd = np.zeros(3*n,typ)
    workl = np.zeros(ncv*(ncv+8),typ)
    iparam = np.zeros(11,'int')
    ipntr = np.zeros(11,'int')
    ido = 0

    # set solver mode and parameters
    # only supported mode is 1: Ax=lx
    ishfts = 1
    mode1 = 1
    bmat='I'
    iparam[0] = ishfts
    iparam[2] = maxiter
    iparam[6] = mode1

    while True:
        ido,resid,v,iparam,ipntr,info =\
            eigsolver(ido,bmat,which,k,tol,resid,v,
                      iparam,ipntr,workd,workl,info)

        xslice = slice(ipntr[0]-1, ipntr[0]-1+n)
        yslice = slice(ipntr[1]-1, ipntr[1]-1+n)
        if ido == -1:
            # initialization
            workd[yslice]=A.matvec(workd[xslice])
        elif ido == 1:
            # compute y=Ax
            workd[yslice]=A.matvec(workd[xslice])
        else:
            break

    if info < -1 :
        raise RuntimeError("Error info=%d in arpack" % info)
        return None

    if info == 1:
        warnings.warn("Maximum number of iterations taken: %s" % iparam[2])

    if iparam[4] < k:
        warnings.warn("Only %d/%d eigenvectors converged" % (iparam[4], k))

    # now extract eigenvalues and (optionally) eigenvectors
    rvec = return_eigenvectors
    ierr = 0
    howmny = 'A' # return all eigenvectors
    sselect = np.zeros(ncv,'int') # unused
    sigma = 0.0 # no shifts, not implemented

    d,z,info =\
             eigextract(rvec,howmny,sselect,sigma,
                        bmat,which, k,tol,resid,v,iparam[0:7],ipntr,
                        workd[0:2*n],workl,ierr)

    if ierr != 0:
        raise RuntimeError("Error info=%d in arpack"%info)
        return None
    if return_eigenvectors:
        return d,z
    return d
Example #45
0
 def testComplexB(self):
     A = 4 * eye(self.n) + ones((self.n, self.n))
     xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
     b = aslinearoperator(A).matvec(xtrue)
     x = lsmr(A, b)[0]
     assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
Example #46
0
def lad(A,
        b,
        rho,
        alpha=1.3,
        verbose=True,
        ret_hist=False,
        abstol=1e-1,
        reltol=1e-2,
        no_iter=30,
        lsmr_atol=1e-6,
        lsmr_btol=1e-6,
        lsmr_conlim=1e8,
        lsmr_iter=1000):
    """
    A port of the demonstration by Boyd, et al. in Matlab which was modified to
    to use LSMR for the x-step.  Solves the following problem via ADMM:

       ``minimize     ||Ax - b||_1``

    More information can be found in the paper linked at:
    http://www.stanford.edu/~boyd/papers/distr_opt_stat_learning_admm.html

    Parameters
    ----------
    A : {sparse matrix, ndarray, LinearOperator}
        Representation of an m-by-n matrix.  It is required that
        the linear operator can produce ``Ax`` and ``A^T x``.
    b : array_like, shape (m,)
        Right-hand side vector ``b``.
    rho : scalar
        The augmented Lagrangian parameter, which will need to be set based
        upon the application.  Typical values have been around 0.05.
    alpha : scalar
        The over-relaxation parameter, which should be above 1.0 and are
        typically below 1.8.
    verbose : bool
        Print out the progress per iteration
    ret_hist : bool
        Return the history of ADMM and LSQR per iteration.
    abstol : scalar
        Absolute tolerance for the ADMM algorithm.  Not used currently.
    reltol : scalar
        Relative tolerance for the ADMM algorithm.  Not used currently.
    no_iter : int
        Maximum number of iterations of ADMM to be run.
    lsmr_atol : scalar
        The relative tolerance for ``A`` in the LSMR algorithm.
    lsmr_btol : scalar
        The relative tolerance for ``b`` in the LSMR algorithm.
    lsmr_conlim : scalar
        The condition limit for ``A`` in the LSMR algorithm.
    lsmr_iter : int
        Maximum number of iterations of LSMR to be run each ADMM iteration.

    Parameters
    ----------
    x : array_like, shape (n,)
        The L1 norm estimate of ``x`` in ``Ax = b``

    """
    A = aslinearoperator(A)
    b = np.asarray(b)

    m = A.shape[0]
    n = A.shape[1]

    x = np.zeros(n)
    z = np.zeros(m)
    u = np.zeros(m)
    Ax = np.zeros(m)

    if ret_hist:
        history = dict()
        history['x'] = np.zeros((no_iter, n))
        history['r_norm'] = np.zeros(no_iter)
        history['s_norm'] = np.zeros(no_iter)
        history['eps_prim'] = np.zeros(no_iter)
        history['eps_dual'] = np.zeros(no_iter)
        history['objective'] = np.zeros(no_iter)
        history['lsmr'] = dict()
        history['lsmr']['flag'] = np.zeros(no_iter)
        history['lsmr']['iter'] = np.zeros(no_iter)
        history['lsmr']['normr'] = np.zeros(no_iter)
        history['lsmr']['normar'] = np.zeros(no_iter)
        history['lsmr']['norma'] = np.zeros(no_iter)
        history['lsmr']['conda'] = np.zeros(no_iter)
        history['lsmr']['normx'] = np.zeros(no_iter)

    if verbose:
        pass
        # print '%3s\t%10s\t%10s\t%10s\t%10s\t%10s' % (
        #    'iter', 'r norm', 'eps pri', 's norm', 'eps dual', 'objective')

    for iter_no in range(no_iter):
        q = b + z - u
        # Perform LSMR on the residual and add it to x as suggested by scipy
        # https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.sparse.linalg.lsqr.html
        # to perform a warm-start on the algorithm.
        # TODO: Warm start was added in v1.0, so change this to use that
        # feature
        r0 = q - Ax
        (dx, lsmr_flag, lsmr_iter, lsmr_normr, lsmr_normar, lsmr_norma,
             lsmr_conda, lsmr_normx) = \
             lsmr(A, r0, 0, lsmr_atol, lsmr_btol, lsmr_conlim, lsmr_iter)
        x += dx

        if ret_hist:
            history['x'][iter_no, :] = x.copy()
            history['lsmr']['flag'][iter_no] = lsmr_flag
            history['lsmr']['iter'][iter_no] = lsmr_iter
            history['lsmr']['normr'][iter_no] = lsmr_normr
            history['lsmr']['normar'][iter_no] = lsmr_normar
            history['lsmr']['norma'][iter_no] = lsmr_norma
            history['lsmr']['conda'][iter_no] = lsmr_conda
            history['lsmr']['normx'][iter_no] = lsmr_normx

        zold = z
        Ax = A.matvec(x)
        Ax_hat = alpha * Ax + (1 - alpha) * (zold + b)
        z = _shrinkage(Ax_hat - b + u, 1 / rho)

        u = u + (Ax_hat - z - b)

        # Calculate the iteration progress
        objval = np.linalg.norm(z, 1)
        r_norm = np.linalg.norm(Ax - z - b)
        s_norm = np.linalg.norm(rho * A.rmatvec(zold - z))

        # And the stopping criterion
        eps_prim = abstol * np.sqrt(n) + \
                   reltol * np.max((np.linalg.norm(Ax),
                                     np.linalg.norm(-z),
                                     np.linalg.norm(b)))
        eps_dual = abstol * np.sqrt(n) + \
                   reltol * np.linalg.norm(rho * A.rmatvec(u))

        if ret_hist:
            history['r_norm'][iter_no] = r_norm
            history['s_norm'][iter_no] = s_norm
            history['eps_prim'][iter_no] = eps_prim
            history['eps_dual'][iter_no] = eps_dual
            history['objective'][iter_no] = objval

        if (r_norm < eps_prim) & (s_norm < eps_dual):
            break

        if verbose:
            pass
            # print '%3s\t%10s\t%10s\t%10s\t%10s\t%10s' % (
            #     iter_no, r_norm, eps_prim, s_norm, eps_dual, objval)

    if ret_hist:
        return x, history
    else:
        return x
Example #47
0
def MinresQLP(A,
              b,
              rtol,
              maxit,
              M=None,
              shift=None,
              maxxnorm=None,
              Acondlim=None,
              TranCond=None,
              show=False,
              rnormvec=False):

    A = aslinearoperator(A)
    if shift is None:
        shift = 0
    if maxxnorm is None:
        maxxnorm = 1e7
    if Acondlim is None:
        Acondlim = 1e15
    if TranCond is None:
        TranCond = 1e7
    if rnormvec:
        resvec = []
        Aresvec = []

    n = len(b)
    b = b.reshape(n, 1)
    r2 = b
    r3 = r2
    beta1 = norm(r2)

    if M is None:
        noprecon = True
        pass
    else:
        noprecon = False
        r3 = Precond(M, r2)
        beta1 = r3.T.dot(r2)  #teta
        if beta1 < 0:
            print('Error: "M" is indefinite!')
        else:
            beta1 = np.sqrt(beta1)

    ## Initialize
    flag0 = -2
    flag = -2
    iters = 0
    QLPiter = 0
    beta = 0
    tau = 0
    taul = 0
    phi = beta1
    betan = beta1
    gmin = 0
    cs = -1
    sn = 0
    cr1 = -1
    sr1 = 0
    cr2 = -1
    sr2 = 0
    dltan = 0
    eplnn = 0
    gama = 0
    gamal = 0
    gamal2 = 0
    eta = 0
    etal = 0
    etal2 = 0
    vepln = 0
    veplnl = 0
    veplnl2 = 0
    ul3 = 0
    ul2 = 0
    ul = 0
    u = 0
    rnorm = betan
    xnorm = 0
    xl2norm = 0
    Axnorm = 0
    Anorm = 0
    Acond = 1
    relres = rnorm / (beta1 + 1e-50)
    x = np.zeros((n, 1))
    w = np.zeros((n, 1))
    wl = np.zeros((n, 1))
    if rnormvec:
        resvec = np.append(resvec, beta1)

    msg = [
        ' beta2 = 0.  b and x are eigenvectors                   ',  # -1
        ' beta1 = 0.  The exact solution is  x = 0               ',  # 0
        ' A solution to Ax = b found, given rtol                 ',  # 1
        ' Min-length solution for singular LS problem, given rtol',  # 2
        ' A solution to Ax = b found, given eps                  ',  # 3
        ' Min-length solution for singular LS problem, given eps ',  # 4
        ' x has converged to an eigenvector                      ',  # 5
        ' xnorm has exceeded maxxnorm                            ',  # 6
        ' Acond has exceeded Acondlim                            ',  # 7
        ' The iteration limit was reached                        ',  # 8
        ' Least-squares problem but no converged solution yet    '
    ]  # 9

    if show:
        print(' ')
        print('Enter Minres-QLP: ')
        print('Min-length solution of symmetric(singular)', end=' ')
        print('(A-sI)x = b or min ||(A-sI)x - b||')
        #||Ax - b|| is ||(A-sI)x - b|| if shift != 0 here
        hstr1 = '    n = %8g    ||Ax - b|| = %8.2e     ' % (n, beta1)
        hstr2 = 'shift = %8.2e       rtol = %8g' % (shift, rtol)
        hstr3 = 'maxit = %8g      maxxnorm = %8.2e  ' % (maxit, maxxnorm)
        hstr4 = 'Acondlim = %8.2e   TranCond = %8g' % (Acondlim, TranCond)
        print(hstr1, hstr2)
        print(hstr3, hstr4)

    #b = 0 --> x = 0 skip the main loop
    if beta1 == 0:
        flag = 0

    while flag == flag0 and iters < maxit:
        #lanczos
        iters += 1
        betal = beta
        beta = betan
        v = r3 / beta
        r3 = Ax(A, v)
        if shift == 0:
            pass
        else:
            r3 = r3 - shift * v

        if iters > 1:
            r3 = r3 - r1 * beta / betal

        alfa = np.real(r3.T.dot(v))
        r3 = r3 - r2 * alfa / beta
        r1 = r2
        r2 = r3

        if noprecon:
            betan = norm(r3)
            if iters == 1:
                if betan == 0:
                    if alfa == 0:
                        flag = 0
                        break
                    else:
                        flag = -1
                        x = b / alfa
                        break
        else:
            r3 = Precond(M, r2)
            betan = r2.T.dot(r3)
            if betan > 0:
                betan = np.sqrt(betan)
            else:
                print('Error: "M" is indefinite or singular!')
        pnorm = np.sqrt(betal**2 + alfa**2 + betan**2)

        #previous left rotation Q_{k-1}
        dbar = dltan
        dlta = cs * dbar + sn * alfa
        epln = eplnn
        gbar = sn * dbar - cs * alfa
        eplnn = sn * betan
        dltan = -cs * betan
        dlta_QLP = dlta
        #current left plane rotation Q_k
        gamal3 = gamal2
        gamal2 = gamal
        gamal = gama
        cs, sn, gama = SymGivens(gbar, betan)
        gama_tmp = gama
        taul2 = taul
        taul = tau
        tau = cs * phi
        Axnorm = np.sqrt(Axnorm**2 + tau**2)
        phi = sn * phi
        #previous right plane rotation P_{k-2,k}
        if iters > 2:
            veplnl2 = veplnl
            etal2 = etal
            etal = eta
            dlta_tmp = sr2 * vepln - cr2 * dlta
            veplnl = cr2 * vepln + sr2 * dlta
            dlta = dlta_tmp
            eta = sr2 * gama
            gama = -cr2 * gama
        #current right plane rotation P{k-1,k}
        if iters > 1:
            cr1, sr1, gamal = SymGivens(gamal, dlta)
            vepln = sr1 * gama
            gama = -cr1 * gama

        #update xnorm
        xnorml = xnorm
        ul4 = ul3
        ul3 = ul2
        if iters > 2:
            ul2 = (taul2 - etal2 * ul4 - veplnl2 * ul3) / gamal2
        if iters > 1:
            ul = (taul - etal * ul3 - veplnl * ul2) / gamal
        xnorm_tmp = np.sqrt(xl2norm**2 + ul2**2 + ul**2)
        if abs(gama) > np.finfo(np.double).tiny and xnorm_tmp < maxxnorm:
            u = (tau - eta * ul2 - vepln * ul) / gama
            if np.sqrt(xnorm_tmp**2 + u**2) > maxxnorm:
                u = 0
                flag = 6
        else:
            u = 0
            flag = 9
        xl2norm = np.sqrt(xl2norm**2 + ul2**2)
        xnorm = np.sqrt(xl2norm**2 + ul**2 + u**2)
        #update w&x
        #Minres
        if (Acond < TranCond) and flag != flag0 and QLPiter == 0:
            wl2 = wl
            wl = w
            w = (v - epln * wl2 - dlta_QLP * wl) / gama_tmp
            if xnorm < maxxnorm:
                x += tau * w
            else:
                flag = 6
        #Minres-QLP
        else:
            QLPiter += 1
            if QLPiter == 1:
                xl2 = np.zeros((n, 1))
                if (iters > 1):  # construct w_{k-3}, w_{k-2}, w_{k-1}
                    if iters > 3:
                        wl2 = gamal3 * wl2 + veplnl2 * wl + etal * w
                    if iters > 2:
                        wl = gamal_QLP * wl + vepln_QLP * w
                    w = gama_QLP * w
                    xl2 = x - wl * ul_QLP - w * u_QLP

            if iters == 1:
                wl2 = wl
                wl = v * sr1
                w = -v * cr1
            elif iters == 2:
                wl2 = wl
                wl = w * cr1 + v * sr1
                w = w * sr1 - v * cr1
            else:
                wl2 = wl
                wl = w
                w = wl2 * sr2 - v * cr2
                wl2 = wl2 * cr2 + v * sr2
                v = wl * cr1 + w * sr1
                w = wl * sr1 - w * cr1
                wl = v
            xl2 = xl2 + wl2 * ul2
            x = xl2 + wl * ul + w * u

        #next right plane rotation P{k-1,k+1}
        gamal_tmp = gamal
        cr2, sr2, gamal = SymGivens(gamal, eplnn)
        #transfering from Minres to Minres-QLP
        gamal_QLP = gamal_tmp
        #print('gamal_QLP=', gamal_QLP)
        vepln_QLP = vepln
        gama_QLP = gama
        ul_QLP = ul
        u_QLP = u
        ## Estimate various norms
        abs_gama = abs(gama)
        Anorml = Anorm
        Anorm = max([Anorm, pnorm, gamal, abs_gama])
        if iters == 1:
            gmin = gama
            gminl = gmin
        elif iters > 1:
            gminl2 = gminl
            gminl = gmin
            gmin = min([gminl2, gamal, abs_gama])
        Acondl = Acond
        Acond = Anorm / gmin
        rnorml = rnorm
        relresl = relres
        if flag != 9:
            rnorm = phi
        relres = rnorm / (Anorm * xnorm + beta1)
        rootl = np.sqrt(gbar**2 + dltan**2)
        Arnorml = rnorml * rootl
        relAresl = rootl / Anorm
        ## See if any of the stopping criteria are satisfied.
        epsx = Anorm * xnorm * np.finfo(float).eps
        if (flag == flag0) or (flag == 9):
            t1 = 1 + relres
            t2 = 1 + relAresl
            if iters >= maxit:
                flag = 8  #exit before maxit
            if Acond >= Acondlim:
                flag = 7  #Huge Acond
            if xnorm >= maxxnorm:
                flag = 6  #xnorm exceeded
            if epsx >= beta1:
                flag = 5  #x = eigenvector
            if t2 <= 1:
                flag = 4  #Accurate Least Square Solution
            if t1 <= 1:
                flag = 3  #Accurate Ax = b Solution
            if relAresl <= rtol:
                flag = 2  #Trustful Least Square Solution
            if relres <= rtol:
                flag = 1  #Trustful Ax = b Solution
        if flag == 2 or flag == 4 or flag == 6 or flag == 7:
            #possibly singular
            iters = iters - 1
            Acond = Acondl
            rnorm = rnorml
            relres = relresl
        else:
            if rnormvec:
                resvec = np.append(resvec, rnorm)
                Aresvec = np.append(Aresvec, Arnorml)

            if show:
                if iters % 10 - 1 == 0:
                    lstr = ('        iter     rnorm    Arnorm    relres   ' +
                            'relAres    Anorm     Acond     xnorm')
                    print(' ')
                    print(lstr)
                if QLPiter == 1:
                    print('QLP', end='')
                else:
                    print('   ', end='')
                lstr1 = '%8g    %8.2e ' % (iters - 1, rnorml)
                lstr2 = '%8.2e  %8.2e ' % (Arnorml, relresl)
                lstr3 = '%8.2e  %8.2e ' % (relAresl, Anorml)
                lstr4 = '%8.2e  %8.2e ' % (Acondl, xnorml)
                print(lstr1, lstr2, lstr3, lstr4)

    #exited the main loop
    if QLPiter == 1:
        print('QLP', end='')
    else:
        print('   ', end='')
    Miter = iters - QLPiter

    #final quantities
    r1 = b - Ax(A, x) + shift * x
    rnorm = norm(r1)
    Arnorm = norm(Ax(A, r1) - shift * r1)
    xnorm = norm(x)
    relres = rnorm / (Anorm * xnorm + beta1)
    relAres = 0
    if rnorm > np.finfo(np.double).tiny:
        relAres = Arnorm / (Anorm * rnorm)

    if show:
        if rnorm > np.finfo(np.double).tiny:
            lstr1 = '%8g    %8.2e ' % (iters, rnorm)
            lstr2 = '%8.2eD %8.2e ' % (Arnorm, relres)
            lstr3 = '%8.2eD %8.2e ' % (relAres, Anorm)
            lstr4 = '%8.2e  %8.2e ' % (Acond, xnorm)
            print(lstr1, lstr2, lstr3, lstr4)
        else:
            lstr1 = '%8g    %8.2e ' % (iters, rnorm)
            lstr2 = '%8.2eD %8.2e ' % (Arnorm, relres)
            lstr3 = '          %8.2e ' % (Anorm)
            lstr4 = '%8.2e  %8.2e ' % (Acond, xnorm)
            print(lstr1, lstr2, lstr3, lstr4)

        print(' ')
        print('Exit Minres-QLP: ')
        str1 = 'Flag = %8g    %8s' % (flag, msg[int(flag + 1)])
        str2 = 'Iter = %8g      ' % (iters)
        str3 = 'Minres = %8g       Minres-QLP = %8g' % (Miter, QLPiter)
        str4 = 'relres = %8.2e    relAres = %8.2e    ' % (relres, relAres)
        str5 = 'rnorm = %8.2e      Arnorm = %8.2e' % (rnorm, Arnorm)
        str6 = 'Anorm = %8.2e       Acond = %8.2e    ' % (Anorm, Acond)
        str7 = 'xnorm = %8.2e      Axnorm = %8.2e' % (xnorm, Axnorm)
        print(str1)
        print(str2, str3)
        print(str4, str5)
        print(str6, str7)

    if rnormvec:
        Aresvec = np.append(Aresvec, Arnorm)
        return (x, flag, iters, Miter, QLPiter, relres, relAres, Anorm, Acond,
                xnorm, Axnorm, resvec, Aresvec)

    return (x, flag, iters, Miter, QLPiter, relres, relAres, Anorm, Acond,
            xnorm, Axnorm)