示例#1
0
def test_normalization(dtype):
    atol = numpy.finfo(dtype).eps * 100
    n = 20
    x = generate_random_dtype_array([n], dtype)
    assert norm(x) > 1
    orthogonalization.normalize(x)
    assert_allclose(norm(x), 1, rtol=0, atol=atol)
示例#2
0
def test_orthogonalization(dtype, otype):
    atol = numpy.finfo(dtype).eps * 100
    n = 20
    x = generate_random_dtype_array([n], dtype)
    orthogonalization.normalize(x)

    y = generate_random_dtype_array([n], dtype)
    orthogonalization.orthogonalize(x, y, method=otype)
    assert_allclose(dot(x, y), 0, rtol=0, atol=atol)
示例#3
0
def jdqr(A,
         num=5,
         target=Target.SmallestMagnitude,
         tol=1e-8,
         M=None,
         prec=None,
         maxit=1000,
         subspace_dimensions=(20, 40),
         initial_subspace=None,
         arithmetic='real',
         return_eigenvectors=False,
         return_subspace=False,
         interface=None):

    if arithmetic not in ['real', 'complex', 'r', 'c']:
        raise ValueError("argument must be 'real', or 'complex'")

    if not prec:
        prec = _prec

    solver_tolerance = 1.0

    n = A.shape[0]

    subspace_dimensions = (min(subspace_dimensions[0],
                               n // 2), min(subspace_dimensions[1], n))

    it = 1
    k = 0  # Number of eigenvalues found
    m = 0  # Size of the search subspace
    nev = 1  # Amount of eigenvalues currently converging

    alpha = None
    evs = None

    dtype = A.dtype
    if interface:
        dtype = interface.dtype

    ctype = numpy.dtype(dtype.char.upper())
    if arithmetic in ['complex', 'c']:
        dtype = ctype

    if not interface:
        interface = NumPyInterface(n, dtype)

    extra = 0
    if dtype != ctype:
        # Allocate extra space in case a complex eigenpair may exist for a real matrix
        extra = 1

    # Eigenvalues
    aconv = numpy.zeros(num + extra, ctype)

    # Schur matrices
    R = numpy.zeros((num + extra, num + extra), dtype)

    # Schur vectors
    Q = interface.vector(num + extra)
    # Preconditioned Q
    Y = interface.vector(num + extra)
    H = numpy.zeros((num + extra, num + extra), dtype)

    MQ = Q
    if M is not None:
        MQ = interface.vector(num + extra)

    # Orthonormal search subspace
    V = interface.vector(subspace_dimensions[1])
    # AV = A*V without orthogonalization
    AV = interface.vector(subspace_dimensions[1])

    # MV = M*V without orthogonalization
    MV = None
    if M is not None:
        MV = interface.vector(subspace_dimensions[1])

    # Residual vector
    r = interface.vector(1 + extra)

    # Low-dimensional projection: VAV = V'*A*V
    VAV = numpy.zeros((subspace_dimensions[1], subspace_dimensions[1]), dtype)

    while k < num and it <= maxit:
        if it == 1:
            if initial_subspace is not None:
                nev = min(initial_subspace.shape[1], subspace_dimensions[1])
                V[:, 0:nev] = initial_subspace[:, 0:nev]
            else:
                V[:, 0] = interface.random()
                normalize(V[:, 0], M=M)
        else:
            solver_maxit = 100
            sigma = evs[0]

            # Build an initial search subspace in an inexpensive way
            # and as close to the target as possible
            if m < subspace_dimensions[0]:
                solver_tolerance = 0.5
                solver_maxit = 1
                if target != 0.0:
                    sigma = target

            if M is not None:
                V[:, m:m + nev] = solve_generalized_correction_equation(
                    A, M, prec, MQ[:, 0:k + nev], Q[:, 0:k + nev],
                    Y[:, 0:k + nev], H[0:k + nev, 0:k + nev], sigma, 1.0,
                    r[:, 0:nev], solver_tolerance, solver_maxit, interface)
            else:
                V[:, m:m + nev] = solve_correction_equation(
                    A, prec, Q[:, 0:k + nev], Y[:, 0:k + nev],
                    H[0:k + nev, 0:k + nev], sigma, r[:, 0:nev],
                    solver_tolerance, solver_maxit, interface)

            orthonormalize(V[:, 0:m],
                           V[:, m:m + nev],
                           M=M,
                           MV=None if MV is None else MV[:, 0:m])

        AV[:, m:m + nev] = A @ V[:, m:m + nev]
        if M is not None:
            MV[:, m:m + nev] = M @ V[:, m:m + nev]

        # Update VAV = V' * A * V
        for i in range(m):
            VAV[i, m:m + nev] = dot(V[:, i], AV[:, m:m + nev])
            VAV[m:m + nev, i] = dot(V[:, m:m + nev], AV[:, i])
        VAV[m:m + nev, m:m + nev] = dot(V[:, m:m + nev], AV[:, m:m + nev])

        m += nev

        [S, U] = schur(VAV[0:m, 0:m])

        found = True
        while found:
            [S, U] = schur_sort(S, U, target)

            nev = 1
            if dtype != ctype and S.shape[0] > 1 and abs(S[1, 0]) > 0.0:
                # Complex eigenvalue in real arithmetic
                nev = 2

            alpha = S[0:nev, 0:nev]

            evcond = norm(alpha)

            Q[:, k:k + nev] = V[:, 0:m] @ U[:, 0:nev]
            Y[:, k:k + nev] = prec(Q[:, k:k + nev], alpha)

            if M is not None:
                MQ[:, k:k + nev] = MV[:, 0:m] @ U[:, 0:nev]

            for i in range(k):
                H[i, k:k + nev] = dot(MQ[:, i], Y[:, k:k + nev])
                H[k:k + nev, i] = dot(MQ[:, k:k + nev], Y[:, i])
            H[k:k + nev, k:k + nev] = dot(MQ[:, k:k + nev], Y[:, k:k + nev])

            r[:, 0:nev] = A @ Q[:, k:k + nev] - MQ[:, k:k + nev] @ alpha
            orthogonalize(MQ[:, 0:k + nev],
                          r[:, 0:nev],
                          M=None,
                          MV=Q[:, 0:k + nev])

            rnorm = norm(r[:, 0:nev]) / evcond

            evs = scipy.linalg.eigvals(alpha)
            ev_est = evs[0]
            print(
                "Step: %4d, subspace dimension: %3d, eigenvalue estimate: %13.6e + %13.6ei, residual norm: %e"
                % (it, m, ev_est.real, ev_est.imag, rnorm))
            sys.stdout.flush()

            # Store converged Ritz pairs
            if rnorm <= tol:
                # Compute R so we can compute the eigenvectors
                if return_eigenvectors:
                    if k > 0:
                        AQ = AV[:, 0:m] @ U[:, 0:nev]
                        for i in range(k):
                            R[i, k:k + nev] = dot(Q[:, i], AQ)
                    R[k:k + nev, k:k + nev] = alpha

                # Store the converged eigenvalues
                for i in range(nev):
                    print("Found an eigenvalue:", evs[i])
                    sys.stdout.flush()

                    aconv[k] = evs[i]
                    k += 1

                if k >= num:
                    break

                # Reset the iterative solver tolerance
                solver_tolerance = 1.0

                # Remove the eigenvalue from the search space
                V[:, 0:m - nev] = V[:, 0:m] @ U[:, nev:m]
                AV[:, 0:m - nev] = AV[:, 0:m] @ U[:, nev:m]

                if M is not None:
                    MV[:, 0:m - nev] = MV[:, 0:m] @ U[:, nev:m]

                VAV[0:m - nev, 0:m - nev] = S[nev:m, nev:m]

                S = VAV[0:m - nev, 0:m - nev]

                U = numpy.identity(m - nev, dtype)

                m -= nev
            else:
                found = False

        solver_tolerance = max(solver_tolerance / 2, tol)

        if m >= min(subspace_dimensions[1], n - k) and k < num:
            # Maximum search space dimension has been reached.
            new_m = min(subspace_dimensions[0], n - k)

            print("Shrinking the search space from %d to %d" % (m, new_m))
            sys.stdout.flush()

            V[:, 0:new_m] = V[:, 0:m] @ U[:, 0:new_m]
            AV[:, 0:new_m] = AV[:, 0:m] @ U[:, 0:new_m]

            if M is not None:
                MV[:, 0:new_m] = MV[:, 0:m] @ U[:, 0:new_m]

            VAV[0:new_m, 0:new_m] = S[0:new_m, 0:new_m]

            m = new_m
        elif m + nev - 1 >= min(subspace_dimensions[1], n - k):
            # Only one extra vector fits in the search space.
            nev = 1

        it += 1

    if return_eigenvectors:
        evs, v = scipy.linalg.eig(R[0:k, 0:k], left=False, right=True)

        if ctype == dtype:
            if return_subspace:
                return evs, Q[:, 0:k] @ v, Q[:, 0:k]
            return evs, Q[:, 0:k] @ v

        i = 0
        while i < k:
            Y[:, i] = Q[:, 0:k] @ v[:, i].real
            if evs[i].imag:
                Y[:, i + 1] = Q[:, 0:k] @ v[:, i].imag
                i += 1
            i += 1
        if return_subspace:
            return evs, Y[:, 0:k], Q[:, 0:k]
        return evs, Y[:, 0:k]

    if return_subspace:
        return aconv[0:num], Q[:, 0:k]

    return aconv[0:num]
示例#4
0
def jdqz(A,
         B,
         num=5,
         target=Target.SmallestMagnitude,
         tol=1e-8,
         lock_tol=None,
         prec=None,
         maxit=1000,
         subspace_dimensions=(20, 40),
         initial_subspaces=None,
         arithmetic='real',
         testspace='Harmonic Petrov',
         return_eigenvectors=False,
         return_subspaces=False,
         interface=None):

    if arithmetic not in ['real', 'complex', 'r', 'c']:
        raise ValueError("argument must be 'real', or 'complex'")

    if not prec:
        prec = _prec

    if not lock_tol:
        lock_tol = tol * 1e2

    solver_tolerance = 1.0

    n = A.shape[0]

    subspace_dimensions = (min(subspace_dimensions[0],
                               n // 2), min(subspace_dimensions[1], n))

    it = 1
    k = 0  # Number of eigenvalues found
    m = 0  # Size of the search subspace
    nev = 1  # Amount of eigenvalues currently converging

    alpha = None
    beta = None
    evs = None
    sort_target = target

    dtype = A.dtype
    if interface:
        dtype = interface.dtype

    ctype = numpy.dtype(dtype.char.upper())
    if arithmetic in ['complex', 'c']:
        dtype = ctype

    if not interface:
        interface = NumPyInterface(n, dtype)

    extra = 0
    if dtype != ctype:
        # Allocate extra space in case a complex eigenpair may exist for a real matrix
        extra = 1

    # Generalized eigenvalues
    aconv = numpy.zeros(num + extra, ctype)
    bconv = numpy.zeros(num + extra, dtype)

    # Generalized Schur matrices
    RA = numpy.zeros((num + extra, num + extra), dtype)
    RB = numpy.zeros((num + extra, num + extra), dtype)

    # Generalized Schur vectors
    Q = interface.vector(num + extra)
    Z = interface.vector(num + extra)
    # Preconditioned Z
    Y = interface.vector(num + extra)
    QZ = numpy.zeros((num + extra, num + extra), dtype)

    # Orthonormal search subspace
    V = interface.vector(subspace_dimensions[1])
    # Orthonormal test subspace
    W = interface.vector(subspace_dimensions[1])
    # AV = A*V without orthogonalization
    AV = interface.vector(subspace_dimensions[1])
    # BV = B*V without orthogonalization
    BV = interface.vector(subspace_dimensions[1])

    # Residual vector
    r = interface.vector(1 + extra)

    # Low-dimensional projections: WAV = W'*A*V, WBV = W'*B*V
    WAV = numpy.zeros((subspace_dimensions[1], subspace_dimensions[1]), dtype)
    WBV = numpy.zeros((subspace_dimensions[1], subspace_dimensions[1]), dtype)

    while k < num and it <= maxit:
        if it == 1:
            if initial_subspaces is not None:
                nev = min(initial_subspaces[0].shape[1],
                          subspace_dimensions[1])
                V[:, 0:nev] = initial_subspaces[0][:, 0:nev]
                if len(initial_subspaces) > 1:
                    W[:, 0:nev] = initial_subspaces[1][:, 0:nev]
            else:
                V[:, 0] = interface.random()
                normalize(V[:, 0])
        else:
            solver_maxit = 100
            sigma_a = evs[0, 0]
            sigma_b = evs[1, 0]

            # Build an initial search subspace in an inexpensive way
            # and as close to the target as possible
            if m < subspace_dimensions[0]:
                solver_tolerance = 0.5
                solver_maxit = 1
                sigma_a = target
                sigma_b = 1.0

            V[:, m:m + nev] = solve_generalized_correction_equation(
                A, B, prec, Q[:, 0:k + nev], Z[:, 0:k + nev], Y[:, 0:k + nev],
                QZ[0:k + nev, 0:k + nev], sigma_a, sigma_b, r[:, 0:nev],
                solver_tolerance, solver_maxit, interface)

            orthonormalize(V[:, 0:m], V[:, m:m + nev], interface=interface)

        AV[:, m:m + nev] = A @ V[:, m:m + nev]
        BV[:, m:m + nev] = B @ V[:, m:m + nev]

        if it > 1 or initial_subspaces is None or len(initial_subspaces) < 2:
            nu, mu = _set_testspace(testspace, target, alpha, beta, dtype,
                                    ctype)

            if nu.shape[0] < nev:
                # Repeat nu and mu in case only an initial V was passed
                nu = numpy.diag(numpy.repeat(nu[0, 0], nev))
                mu = numpy.diag(numpy.repeat(mu[0, 0], nev))

            W[:, m:m + nev] = AV[:, m:m + nev] @ nu[
                0:nev, 0:nev] + BV[:, m:m + nev] @ mu[0:nev, 0:nev]

            orthogonalize(Z[:, 0:k], W[:, m:m + nev])
            orthonormalize(W[:, 0:m], W[:, m:m + nev], interface=interface)

        # Update WAV = W' * A * V
        for i in range(m):
            WAV[i, m:m + nev] = dot(W[:, i], AV[:, m:m + nev])
            WAV[m:m + nev, i] = dot(W[:, m:m + nev], AV[:, i])
        WAV[m:m + nev, m:m + nev] = dot(W[:, m:m + nev], AV[:, m:m + nev])

        # Update WBV = W' * B * V
        for i in range(m):
            WBV[i, m:m + nev] = dot(W[:, i], BV[:, m:m + nev])
            WBV[m:m + nev, i] = dot(W[:, m:m + nev], BV[:, i])
        WBV[m:m + nev, m:m + nev] = dot(W[:, m:m + nev], BV[:, m:m + nev])

        m += nev

        [S, T, UL, UR] = generalized_schur(WAV[0:m, 0:m], WBV[0:m, 0:m])

        found = True
        while found:
            [S, T, UL, UR] = generalized_schur_sort(S, T, UL, UR, sort_target)

            nev = 1
            if dtype != ctype and S.shape[0] > 1 and abs(S[1, 0]) > 0.0:
                # Complex eigenvalue in real arithmetic
                nev = 2

            alpha = S[0:nev, 0:nev]
            beta = T[0:nev, 0:nev]

            evcond = sqrt(norm(alpha)**2 + norm(beta)**2)

            Q[:, k:k + nev] = V[:, 0:m] @ UR[:, 0:nev]
            Z[:, k:k + nev] = W[:, 0:m] @ UL[:, 0:nev]
            Y[:, k:k + nev] = prec(Z[:, k:k + nev], alpha, beta)

            for i in range(k):
                QZ[i, k:k + nev] = dot(Q[:, i], Y[:, k:k + nev])
                QZ[k:k + nev, i] = dot(Q[:, k:k + nev], Y[:, i])
            QZ[k:k + nev, k:k + nev] = dot(Q[:, k:k + nev], Y[:, k:k + nev])

            r[:,
              0:nev] = A @ Q[:, k:k + nev] @ beta - B @ Q[:, k:k + nev] @ alpha
            orthogonalize(Z[:, 0:k + nev], r[:, 0:nev])

            rnorm = norm(r[:, 0:nev]) / evcond

            evs = scipy.linalg.eigvals(alpha, beta, homogeneous_eigvals=True)
            ev_est = evs[0, 0] / evs[1, 0]
            print(
                "Step: %4d, subspace dimension: %3d, eigenvalue estimate: %13.6e + %13.6ei, residual norm: %e"
                % (it, m, ev_est.real, ev_est.imag, rnorm))
            sys.stdout.flush()

            if rnorm <= lock_tol:
                sort_target = ev_est

            # Store converged Petrov pairs
            if rnorm <= tol and m > nev:
                # Compute RA and RB so we can compute the eigenvectors
                if return_eigenvectors:
                    if k > 0:
                        AQ = AV[:, 0:m] @ UR[:, 0:nev]
                        BQ = BV[:, 0:m] @ UR[:, 0:nev]
                        for i in range(k):
                            RA[i, k:k + nev] = dot(Z[:, i], AQ)
                            RB[i, k:k + nev] = dot(Z[:, i], BQ)

                    RA[k:k + nev, k:k + nev] = alpha
                    RB[k:k + nev, k:k + nev] = beta

                # Store the converged eigenvalues
                for i in range(nev):
                    print("Found an eigenvalue:", evs[0, i] / evs[1, i])
                    sys.stdout.flush()

                    aconv[k] = evs[0, i]
                    bconv[k] = evs[1, i].real
                    k += 1

                if k >= num:
                    break

                # Reset the iterative solver tolerance
                solver_tolerance = 1.0

                # Unlock the target
                sort_target = target

                # Remove the eigenvalue from the search space
                V[:, 0:m - nev] = V[:, 0:m] @ UR[:, nev:m]
                AV[:, 0:m - nev] = AV[:, 0:m] @ UR[:, nev:m]
                BV[:, 0:m - nev] = BV[:, 0:m] @ UR[:, nev:m]
                W[:, 0:m - nev] = W[:, 0:m] @ UL[:, nev:m]

                WAV[0:m - nev, 0:m - nev] = S[nev:m, nev:m]
                WBV[0:m - nev, 0:m - nev] = T[nev:m, nev:m]

                S = WAV[0:m - nev, 0:m - nev]
                T = WBV[0:m - nev, 0:m - nev]

                UL = numpy.identity(m - nev, dtype)
                UR = numpy.identity(m - nev, dtype)

                m -= nev
            else:
                found = False

        solver_tolerance = max(solver_tolerance / 2, tol / 100)

        if m >= min(subspace_dimensions[1], n - k) and k < num:
            # Maximum search space dimension has been reached.
            new_m = min(subspace_dimensions[0], n - k)

            print("Shrinking the search space from %d to %d" % (m, new_m))
            sys.stdout.flush()

            V[:, 0:new_m] = V[:, 0:m] @ UR[:, 0:new_m]
            AV[:, 0:new_m] = AV[:, 0:m] @ UR[:, 0:new_m]
            BV[:, 0:new_m] = BV[:, 0:m] @ UR[:, 0:new_m]
            W[:, 0:new_m] = W[:, 0:m] @ UL[:, 0:new_m]

            WAV[0:new_m, 0:new_m] = S[0:new_m, 0:new_m]
            WBV[0:new_m, 0:new_m] = T[0:new_m, 0:new_m]

            m = new_m
        elif m + nev - 1 >= min(subspace_dimensions[1], n - k):
            # Only one extra vector fits in the search space.
            nev = 1

        it += 1

    if return_eigenvectors:
        evs, v = scipy.linalg.eig(RA[0:k, 0:k],
                                  RB[0:k, 0:k],
                                  left=False,
                                  right=True,
                                  homogeneous_eigvals=True)

        if ctype == dtype:
            if return_subspaces:
                return evs[0], evs[1], Q[:, 0:k] @ v, Q[:, 0:k], Z[:, 0:k]
            return evs[0], evs[1], Q[:, 0:k] @ v

        i = 0
        while i < k:
            Y[:, i] = Q[:, 0:k] @ v[:, i].real
            if evs[0][i].imag:
                Y[:, i + 1] = Q[:, 0:k] @ v[:, i].imag
                i += 1
            i += 1
        if return_subspaces:
            return evs[0], evs[1], Y[:, 0:k], Q[:, 0:k], Z[:, 0:k]
        return evs[0], evs[1], Y[:, 0:k]

    if return_subspaces:
        return aconv[0:num], bconv[0:num], Q[:, 0:k], Z[:, 0:k]
    return aconv[0:num], bconv[0:num]