Пример #1
0
def compute_true_residuals(subspace, rvals, rvecs, epair_mask):
    """
    Compute the true residuals and residual norms (and not the ones estimated from
    the Lanczos subspace).
    """
    V = subspace.subspace
    AV = subspace.matrix_product

    def form_residual(rval, rvec):
        coefficients = np.hstack((rvec, -rval * rvec))
        return lincomb(coefficients, AV + V, evaluate=True)

    residuals = [
        form_residual(rvals[i], rvec)
        for i, rvec in enumerate(np.transpose(rvecs)) if i in epair_mask
    ]
    eigenvectors = [
        lincomb(rvec, V, evaluate=True)
        for i, rvec in enumerate(np.transpose(rvecs)) if i in epair_mask
    ]
    rnorms = np.array([np.sqrt(r @ r) for r in residuals])

    # Note here the actual residual norm (and not the residual norm squared)
    # is returned.
    return eigenvectors, residuals, rnorms
Пример #2
0
def amend_true_residuals(state, subspace, rvals, rvecs, epair_mask):
    """
    Compute the true residuals and residual norms (and not the ones estimated from
    the Lanczos subspace) and amend the `state` accordingly.
    """
    V = subspace.subspace
    AV = subspace.matrix_product

    def form_residual(rval, rvec):
        coefficients = np.hstack((rvec, -rval * rvec))
        return lincomb(coefficients, AV + V, evaluate=True)
    state.residuals = [form_residual(rvals[i], rvec)
                       for i, rvec in enumerate(np.transpose(rvecs))
                       if i in epair_mask]
    state.eigenvectors = [lincomb(rvec, V, evaluate=True)
                          for i, rvec in enumerate(np.transpose(rvecs))
                          if i in epair_mask]

    rnorms = np.array([np.sqrt(r @ r) for r in state.residuals])
    state.residual_norms = rnorms

    # TODO For consistency with the Davidson the residual norms are
    #      squared again to give output in the same order of magnitude.
    state.residual_norms = state.residual_norms**2
    return state
Пример #3
0
    def orthogonalise_against(self, vector, subspace):
        """
        Orthogonalise the passed vector against a subspace. The latter is assumed
        to only consist of orthonormal vectors. Effectively computes
        ``(1 - SS * SS^T) * vector`.

        vector
            Vector to make orthogonal to the subspace
        subspace : list
            Subspace of orthonormal vectors.
        """
        # Project out the components of the current subspace
        # That is form (1 - SS * SS^T) * vector = vector + SS * (-SS^T * vector)
        for _ in range(self.n_rounds):
            coefficients = np.hstack(([1], -(vector @ subspace)))
            vector = lincomb(coefficients, [vector] + subspace, evaluate=True)
            if self.explicit_symmetrisation is not None:
                self.explicit_symmetrisation.symmetrise(vector)
        return vector
Пример #4
0
 def matrix_product(self):
     """
     Return the reconstructed matrix-vector product for all subspace vectors
     (using the Lanczos relation).
     """
     r, T = self.residual, self.subspace_matrix
     # b = self.rayleigh_extension; V = self.subspace
     # Form AV  = V * T + r * b'
     AV = []
     for i in range(len(self.subspace)):
         # Compute AV[:, i]
         coefficients = []
         vectors = []
         for (j, v) in enumerate(self.subspace):
             if T[j, i] != 0:
                 coefficients.append(T[j, i])
                 vectors.append(v)
         if i >= len(self.subspace) - self.n_block:
             ires = i - (len(self.subspace) - self.n_block)
             coefficients.append(1)
             vectors.append(r[ires])
         AV.append(lincomb(np.array(coefficients), vectors, evaluate=True))
     return AV
Пример #5
0
 def form_residual(rval, rvec):
     coefficients = np.hstack((rvec, -rval * rvec))
     return lincomb(coefficients, AV + V, evaluate=True)
Пример #6
0
def lanczos_iterations(iterator,
                       n_ep,
                       min_subspace,
                       max_subspace,
                       conv_tol=1e-9,
                       which="LA",
                       max_iter=100,
                       callback=None,
                       debug_checks=False,
                       state=None):
    """Drive the Lanczos iterations

    Parameters
    ----------
    iterator : LanczosIterator
        Iterator generating the Lanczos subspace (contains matrix, guess,
        residual, Ritz pairs from restart, symmetrisation and orthogonalisation)
    n_ep : int
        Number of eigenpairs to be computed
    min_subspace : int
        Subspace size to collapse to when performing a thick restart.
    max_subspace : int
        Maximal subspace size
    conv_tol : float, optional
        Convergence tolerance on the l2 norm squared of residuals to consider
        them converged
    which : str, optional
        Which eigenvectors to converge to (e.g. LM, LA, SM, SA)
    max_iter : int, optional
        Maximal number of iterations
    callback : callable, optional
        Callback to run after each iteration
    debug_checks : bool, optional
        Enable some potentially costly debug checks
        (Loss of orthogonality etc.)
    """
    if callback is None:

        def callback(state, identifier):
            pass

    # TODO For consistency with the Davidson the conv_tol is interpreted
    #      as the residual norm *squared*. Arnoldi, however, uses the actual norm
    #      to check for convergence and so on. See also the comment in Davidson
    #      around the line computing state.residual_norms
    #
    #      See also the squaring of the residual norms below
    tol = np.sqrt(conv_tol)

    if state is None:
        state = LanczosState(iterator)
        callback(state, "start")
        state.timer.restart("iteration")
        n_applies_offset = 0
    else:
        n_applies_offset = state.n_applies

    for subspace in iterator:
        b = subspace.rayleigh_extension
        with state.timer.record("rayleigh_ritz"):
            rvals, rvecs = np.linalg.eigh(subspace.subspace_matrix)

        if debug_checks:
            eps = np.finfo(float).eps
            orthotol = max(tol / 1000, subspace.n_problem * eps)
            orth = subspace.check_orthogonality(orthotol)
            state.subspace_orthogonality = orth

        is_rval_converged, eigenpair_error = check_convergence(
            subspace, rvals, rvecs, tol)

        # Update state
        state.n_iter += 1
        state.n_applies = subspace.n_applies + n_applies_offset
        state.converged = False
        state.eigenvectors = None  # Not computed in Lanczos
        state.subspace_vectors = subspace.subspace
        state.subspace_residual = subspace.residual

        epair_mask = select_eigenpairs(rvals, n_ep, which)
        state.eigenvalues = rvals[epair_mask]
        state.residual_norms = eigenpair_error[epair_mask]
        converged = np.all(is_rval_converged[epair_mask])

        # TODO For consistency with the Davidson the residual norms are squared
        #      again to give output in the same order of magnitude.
        state.residual_norms = state.residual_norms**2

        callback(state, "next_iter")
        state.timer.restart("iteration")

        if converged:
            state = amend_true_residuals(state, subspace, rvals, rvecs,
                                         epair_mask)
            state.converged = True
            callback(state, "is_converged")
            state.timer.stop("iteration")
            return state

        if state.n_iter >= max_iter:
            warnings.warn(
                la.LinAlgWarning(
                    f"Maximum number of iterations (== {max_iter}) "
                    "reached in lanczos procedure."))
            state = amend_true_residuals(state, subspace, rvals, rvecs,
                                         epair_mask)
            state.timer.stop("iteration")
            state.converged = False
            return state

        if len(rvecs) + subspace.n_block > max_subspace:
            callback(state, "restart")

            epair_mask = select_eigenpairs(rvals, min_subspace, which)
            V = subspace.subspace
            vn, betan = subspace.ortho.qr(subspace.residual)

            Y = [
                lincomb(rvec, V, evaluate=True)
                for i, rvec in enumerate(np.transpose(rvecs))
                if i in epair_mask
            ]
            Theta = rvals[epair_mask]
            Sigma = rvecs[:, epair_mask].T @ b @ betan.T

            iterator = LanczosIterator(
                iterator.matrix,
                vn,
                ritz_vectors=Y,
                ritz_values=Theta,
                ritz_overlaps=Sigma,
                explicit_symmetrisation=iterator.explicit_symmetrisation)
            state.n_restart += 1
            return lanczos_iterations(iterator, n_ep, min_subspace,
                                      max_subspace, conv_tol, which, max_iter,
                                      callback, debug_checks, state)

    state = amend_true_residuals(state, subspace, rvals, rvecs, epair_mask)
    state.timer.stop("iteration")
    state.converged = False
    warnings.warn(
        la.LinAlgWarning(
            "Lanczos procedure found maximal subspace possible. Iteration cannot be "
            "continued like this and will be aborted without convergence. "
            "Try a different guess."))
    return state
Пример #7
0
    def __next__(self):
        """Advance the iterator, i.e. extend the Lanczos subspace"""
        if self.n_iter == 0:
            # Initialise Lanczos subspace
            v = self.ortho.orthogonalise(self.residual)
            self.lanczos_subspace = v
            r = evaluate(self.matrix @ v)
            alpha = np.empty((self.n_block, self.n_block))
            for p in range(self.n_block):
                alpha[p, :] = v[p] @ r

            # r = r - v * alpha - Y * Sigma
            Sigma, Y = self.ritz_overlaps, self.ritz_vectors
            r = [
                lincomb(np.hstack(([1], -alpha[:, p], -Sigma[:, p])),
                        [r[p]] + v + Y,
                        evaluate=True) for p in range(self.n_block)
            ]

            # r = r - Y * Y'r (Full reorthogonalisation)
            for p in range(self.n_block):
                r[p] = self.ortho.orthogonalise_against(
                    r[p], self.ritz_vectors)

            self.residual = r
            self.n_iter = 1
            self.n_applies = self.n_block
            self.alphas = [alpha]  # Diagonal matrix block of subspace matrix
            self.betas = []  # Side-diagonal matrix blocks
            return LanczosSubspace(self)

        # Iteration 1 and onwards:
        q = self.lanczos_subspace[-self.n_block:]
        v, beta = self.ortho.qr(self.residual)
        if np.linalg.norm(beta) < np.finfo(float).eps * self.n_problem:
            # No point to go on ... new vectors will be decoupled from old ones
            raise StopIteration()

        # r = A * v - q * beta^T
        self.n_applies += self.n_block
        r = self.matrix @ v
        r = [
            lincomb(np.hstack(([1], -(beta.T)[:, p])), [r[p]] + q,
                    evaluate=True) for p in range(self.n_block)
        ]

        # alpha = v^T * r
        alpha = np.empty((self.n_block, self.n_block))
        for p in range(self.n_block):
            alpha[p, :] = v[p] @ r

        # r = r - v * alpha
        r = [
            lincomb(np.hstack(([1], -alpha[:, p])), [r[p]] + v, evaluate=True)
            for p in range(self.n_block)
        ]

        # Full reorthogonalisation
        for p in range(self.n_block):
            r[p] = self.ortho.orthogonalise_against(
                r[p], self.lanczos_subspace + self.ritz_vectors)

        # Commit results
        self.n_iter += 1
        self.lanczos_subspace.extend(v)
        self.residual = r
        self.alphas.append(alpha)
        self.betas.append(beta)
        return LanczosSubspace(self)
Пример #8
0
def davidson_iterations(matrix,
                        state,
                        max_subspace,
                        max_iter,
                        n_ep,
                        is_converged,
                        which,
                        callback=None,
                        preconditioner=None,
                        preconditioning_method="Davidson",
                        debug_checks=False,
                        residual_min_norm=None,
                        explicit_symmetrisation=None):
    """Drive the davidson iterations

    Parameters
    ----------
    matrix
        Matrix to diagonalise
    state
        DavidsonState containing the eigenvector guess
    max_subspace : int or NoneType, optional
        Maximal subspace size
    max_iter : int, optional
        Maximal number of iterations
    n_ep : int or NoneType, optional
        Number of eigenpairs to be computed
    is_converged
        Function to test for convergence
    callback : callable, optional
        Callback to run after each iteration
    which : str, optional
        Which eigenvectors to converge to. Needs to be chosen such that
        it agrees with the selected preconditioner.
    preconditioner
        Preconditioner (type or instance)
    preconditioning_method : str, optional
        Precondititoning method. Valid values are "Davidson"
        or "Sleijpen-van-der-Vorst"
    debug_checks : bool, optional
        Enable some potentially costly debug checks
        (Loss of orthogonality etc.)
    residual_min_norm : float or NoneType, optional
        Minimal norm a residual needs to have in order to be accepted as
        a new subspace vector
        (defaults to 2 * len(matrix) * machine_expsilon)
    explicit_symmetrisation
        Explicit symmetrisation to apply to new subspace vectors before
        adding them to the subspace. Allows to correct for loss of index
        or spin symmetries (type or instance)
    """
    if preconditioning_method not in ["Davidson", "Sleijpen-van-der-Vorst"]:
        raise ValueError("Only 'Davidson' and 'Sleijpen-van-der-Vorst' "
                         "are valid preconditioner methods")
    if preconditioning_method == "Sleijpen-van-der-Vorst":
        raise NotImplementedError("Sleijpen-van-der-Vorst preconditioning "
                                  "not yet implemented.")

    if callback is None:

        def callback(state, identifier):
            pass

    # The problem size
    n_problem = matrix.shape[1]

    # The block size
    n_block = len(state.subspace_vectors)

    # The current subspace size
    n_ss_vec = n_block

    # The current subspace
    SS = state.subspace_vectors

    # The matrix A projected into the subspace
    # as a continuous array. Only the view
    # Ass[:n_ss_vec, :n_ss_vec] contains valid data.
    Ass_cont = np.empty((max_subspace, max_subspace))

    eps = np.finfo(float).eps
    if residual_min_norm is None:
        residual_min_norm = 2 * n_problem * eps

    callback(state, "start")
    state.timer.restart("iteration")

    with state.timer.record("projection"):
        # Initial application of A to the subspace
        Ax = evaluate(matrix @ SS)
        state.n_applies += n_ss_vec

    while state.n_iter < max_iter:
        state.n_iter += 1

        assert len(SS) >= n_block
        assert len(SS) <= max_subspace

        # Project A onto the subspace, keeping in mind
        # that the values Ass[:-n_block, :-n_block] are already valid,
        # since they have been computed in the previous iterations already.
        with state.timer.record("projection"):
            Ass = Ass_cont[:n_ss_vec, :n_ss_vec]  # Increase the work view size
            for i in range(n_block):
                Ass[:, -n_block + i] = Ax[-n_block + i] @ SS
            Ass[-n_block:, :] = np.transpose(Ass[:, -n_block:])

        # Compute the which(== largest, smallest, ...) eigenpair of Ass
        # and the associated ritz vector as well as residual
        with state.timer.record("rayleigh_ritz"):
            if Ass.shape == (n_block, n_block):
                rvals, rvecs = la.eigh(Ass)  # Do a full diagonalisation
            else:
                # TODO Maybe play with precision a little here
                # TODO Maybe use previous vectors somehow
                v0 = None
                rvals, rvecs = sla.eigsh(Ass, k=n_block, which=which, v0=v0)

        with state.timer.record("residuals"):
            # Form residuals, A * SS * v - λ * SS * v = Ax * v + SS * (-λ*v)
            def form_residual(rval, rvec):
                coefficients = np.hstack((rvec, -rval * rvec))
                return lincomb(coefficients, Ax + SS, evaluate=True)

            residuals = [
                form_residual(rvals[i], v)
                for i, v in enumerate(np.transpose(rvecs))
            ]
            assert len(residuals) == n_block

            # Update the state's eigenpairs and residuals
            epair_mask = select_eigenpairs(rvals, n_ep, which)
            state.eigenvalues = rvals[epair_mask]
            state.residuals = [residuals[i] for i in epair_mask]
            state.residual_norms = np.array([r @ r for r in state.residuals])
            # TODO This is misleading ... actually residual_norms contains
            #      the norms squared. That's also the used e.g. in adcman to
            #      check for convergence, so using the norm squared is fine,
            #      in theory ... it should just be consistent. I think it is
            #      better to go for the actual norm (no squared) inside the code
            #
            #      If this adapted, also change the conv_tol to tol conversion
            #      inside the Lanczos procedure.

        callback(state, "next_iter")
        state.timer.restart("iteration")
        if is_converged(state):
            # Build the eigenvectors we desire from the subspace vectors:
            state.eigenvectors = [
                lincomb(v, SS, evaluate=True)
                for i, v in enumerate(np.transpose(rvecs)) if i in epair_mask
            ]

            state.converged = True
            callback(state, "is_converged")
            state.timer.stop("iteration")
            return state

        if state.n_iter == max_iter:
            warnings.warn(
                la.LinAlgWarning(
                    f"Maximum number of iterations (== {max_iter}) "
                    "reached in davidson procedure."))
            state.eigenvectors = [
                lincomb(v, SS, evaluate=True)
                for i, v in enumerate(np.transpose(rvecs)) if i in epair_mask
            ]
            state.timer.stop("iteration")
            state.converged = False
            return state

        if n_ss_vec + n_block > max_subspace:
            callback(state, "restart")
            with state.timer.record("projection"):
                # The addition of the preconditioned vectors goes beyond max.
                # subspace size => Collapse first, ie keep current Ritz vectors
                # as new subspace
                SS = [
                    lincomb(v, SS, evaluate=True) for v in np.transpose(rvecs)
                ]
                state.subspace_vectors = SS
                Ax = [
                    lincomb(v, Ax, evaluate=True) for v in np.transpose(rvecs)
                ]
                n_ss_vec = len(SS)

                # Update projection of ADC matrix A onto subspace
                Ass = Ass_cont[:n_ss_vec, :n_ss_vec]
                for i in range(n_ss_vec):
                    Ass[:, i] = Ax[i] @ SS
            # continue to add residuals to space

        with state.timer.record("preconditioner"):
            if preconditioner:
                if hasattr(preconditioner, "update_shifts"):
                    # Epsilon factor to make sure that 1 / (shift - diagonal)
                    # does not become ill-conditioned as soon as the shift
                    # approaches the actual diagonal values (which are the
                    # eigenvalues for the ADC(2) doubles part if the coupling
                    # block are absent)
                    rvals_eps = 1e-6
                    preconditioner.update_shifts(rvals - rvals_eps)

                preconds = evaluate(preconditioner @ residuals)
            else:
                preconds = residuals

            # Explicitly symmetrise the new vectors if requested
            if explicit_symmetrisation:
                explicit_symmetrisation.symmetrise(preconds)

        # Project the components of the preconditioned vectors away
        # which are already contained in the subspace.
        # Then add those, which have a significant norm to the subspace.
        with state.timer.record("orthogonalisation"):
            n_ss_added = 0
            for i in range(n_block):
                pvec = preconds[i]
                # Project out the components of the current subspace
                # That is form (1 - SS * SS^T) * pvec = pvec + SS * (-SS^T * pvec)
                coefficients = np.hstack(([1], -(pvec @ SS)))
                pvec = lincomb(coefficients, [pvec] + SS, evaluate=True)
                pnorm = np.sqrt(pvec @ pvec)
                if pnorm > residual_min_norm:
                    # Extend the subspace
                    SS.append(evaluate(pvec / pnorm))
                    n_ss_added += 1
                    n_ss_vec = len(SS)

            if debug_checks:
                orth = np.array([[SS[i] @ SS[j] for i in range(n_ss_vec)]
                                 for j in range(n_ss_vec)])
                orth -= np.eye(n_ss_vec)
                state.subspace_orthogonality = np.max(np.abs(orth))
                if state.subspace_orthogonality > n_problem * eps:
                    warnings.warn(
                        la.LinAlgWarning(
                            "Subspace in davidson has lost orthogonality. "
                            "Expect inaccurate results."))

        if n_ss_added == 0:
            state.timer.stop("iteration")
            state.converged = False
            state.eigenvectors = [
                lincomb(v, SS, evaluate=True)
                for i, v in enumerate(np.transpose(rvecs)) if i in epair_mask
            ]
            warnings.warn(
                la.LinAlgWarning(
                    "Davidson procedure could not generate any further vectors for "
                    "the subspace. Iteration cannot be continued like this and will "
                    "be aborted without convergence. Try a different guess."))
            return state

        with state.timer.record("projection"):
            Ax.extend(matrix @ SS[-n_ss_added:])
            state.n_applies += n_ss_added