Exemple #1
0
def kernel_eig(hamiltonian, equations, amplitudes, tolerance=1e-9):
    """
    Coupled-cluster solver (eigenvalue problem).
    Args:
        hamiltonian (dict): hamiltonian matrix elements or pyscf ERIS;
        equations (callable): coupled-cluster equations;
        amplitudes (iterable): starting amplitudes (a list of OrderedDicts);
        tolerance (float): convergence criterion;

    Returns:
        Resulting coupled-cluster amplitudes and energy if specified.
    """
    # Convert ERIS to hamiltonian dict if needed
    if not isinstance(hamiltonian, dict):
        hamiltonian = eris_hamiltonian(hamiltonian)

    # Preconditioning
    e_occ = numpy.diag(hamiltonian["oo"])
    e_vir = numpy.diag(hamiltonian["vv"])

    # Antisymmetry data
    sample = amplitudes[0].values()
    labels = list(i.metadata["labels"] for i in sample)
    ixs = list(ltri_ix_amplitudes(i) for i in sample)
    shapes = list(i.shape for i in sample)

    def matvec(vec):
        result = []
        for i in vec:
            a = v2a_sym(i, labels, shapes, ixs)
            a = OrderedDict(zip(amplitudes[0].keys(), a))
            r = oneshot(equations, hamiltonian, a)
            result.append(a2v_sym(r, ixs))
        return result

    def precond(res, e0, x0):
        a = v2a_sym(res, labels, shapes, ixs)
        a = list(
            MetaArray(i, **j.metadata)
            for i, j in zip(a, amplitudes[0].values()))
        a = res2amps(a, e_occ, e_vir, constant=e0)
        return a2v_sym(a, ixs)

    amplitudes_plain = tuple(a2v_sym(i.values(), ixs) for i in amplitudes)

    conv, values, vectors = davidson(matvec,
                                     amplitudes_plain,
                                     precond,
                                     tol=tolerance,
                                     nroots=len(amplitudes))

    if any(not i for i in conv):
        warn("Following eigenvalues did not converge: {}".format(
            list(i for i, x in enumerate(conv) if not x)))

    return values, list(v2a_sym(i, labels, shapes, ixs) for i in vectors)
Exemple #2
0
def kernel_eig(hamiltonian, equations, amplitudes, tolerance=1e-9):
    """
    Coupled-cluster solver (eigenvalue problem).
    Args:
        hamiltonian (dict): hamiltonian matrix elements or pyscf ERIS;
        equations (callable): coupled-cluster equations;
        amplitudes (iterable): starting amplitudes (a list of OrderedDicts);
        tolerance (float): convergence criterion;

    Returns:
        Resulting coupled-cluster amplitudes and energy if specified.
    """
    # Convert ERIS to hamiltonian dict if needed
    if not isinstance(hamiltonian, dict):
        hamiltonian = eris_hamiltonian(hamiltonian)

    # Preconditioning
    e_occ = numpy.diag(hamiltonian["oo"])
    e_vir = numpy.diag(hamiltonian["vv"])

    # Antisymmetry data
    sample = amplitudes[0].values()
    labels = list(i.metadata["labels"] for i in sample)
    ixs = list(ltri_ix_amplitudes(i) for i in sample)
    shapes = list(i.shape for i in sample)

    def matvec(vec):
        result = []
        for i in vec:
            a = v2a_sym(i, labels, shapes, ixs)
            a = OrderedDict(zip(amplitudes[0].keys(), a))
            r = oneshot(equations, hamiltonian, a)
            result.append(a2v_sym(r, ixs))
        return result

    def precond(res, e0, x0):
        a = v2a_sym(res, labels, shapes, ixs)
        a = list(MetaArray(i, **j.metadata) for i, j in zip(a, amplitudes[0].values()))
        a = res2amps(a, e_occ, e_vir, constant=e0)
        return a2v_sym(a, ixs)

    amplitudes_plain = tuple(a2v_sym(i.values(), ixs) for i in amplitudes)

    conv, values, vectors = davidson(matvec, amplitudes_plain, precond, tol=tolerance, nroots=len(amplitudes))

    if any(not i for i in conv):
        warn("Following eigenvalues did not converge: {}".format(list(
            i for i, x in enumerate(conv) if not x
        )))

    return values, list(v2a_sym(i, labels, shapes, ixs) for i in vectors)
def solve(myMF, dm_guess=None, safe_guess=True):

    assert hasattr(myMF, "mol")
    assert hasattr(myMF, "mo_occ")
    assert hasattr(myMF, "mo_coeff")
    assert myMF.mol.nelectron % 2 == 0  # RHF possible

    S_ao = myMF.get_ovlp(myMF.mol)
    OEI_ao = myMF.get_hcore(myMF.mol)
    numPairs = myMF.mol.nelectron // 2
    numVirt = OEI_ao.shape[0] - numPairs
    numVars = numPairs * numVirt

    if dm_guess is None:
        if (len(myMF.mo_occ) == 0) or (safe_guess == True):
            dm_ao = myMF.get_init_guess(key=myMF.init_guess, mol=myMF.mol)
        else:
            dm_ao = np.dot(np.dot(myMF.mo_coeff, np.diag(myMF.mo_occ)), myMF.mo_coeff.T)
    else:
        dm_ao = np.array(dm_guess, copy=True)

    myJK_ao = __JKengine(myMF)
    FOCK_ao = OEI_ao + myJK_ao.getJK_ao(dm_ao)
    energies, orbitals = scipy.linalg.eigh(a=FOCK_ao, b=S_ao)
    dm_ao = 2 * np.dot(orbitals[:, :numPairs], orbitals[:, :numPairs].T)
    FOCK_ao = OEI_ao + myJK_ao.getJK_ao(dm_ao)
    FOCK_mo = np.dot(orbitals.T, np.dot(FOCK_ao, orbitals))
    grdnorm = 4 * np.linalg.norm(FOCK_mo[:numPairs, numPairs:])
    energy = myMF.mol.energy_nuc() + 0.5 * np.einsum("ij,ij->", OEI_ao + FOCK_ao, dm_ao)

    logger.note(myMF, "RHF:NewtonRaphson :: Starting augmented Hessian Newton-Raphson RHF.")

    iteration = 0

    while grdnorm > 1e-7:

        iteration += 1
        tempJK_mo = __JKengine(myMF, orbitals)
        ini_guess = np.ones([numVars + 1], dtype=float)
        for occ in range(numPairs):
            for virt in range(numVirt):
                ini_guess[occ + numPairs * virt] = -FOCK_mo[occ, numPairs + virt] / max(
                    FOCK_mo[numPairs + virt, numPairs + virt] - FOCK_mo[occ, occ], 1e-6
                )

        def myprecon(resid, eigval, eigvec):

            myprecon_cutoff = 1e-10
            local_myprecon = np.zeros([numVars + 1], dtype=float)
            for occ in range(numPairs):
                for virt in range(numVirt):
                    denominator = FOCK_mo[numPairs + virt, numPairs + virt] - FOCK_mo[occ, occ] - eigval
                    if abs(denominator) < myprecon_cutoff:
                        local_myprecon[occ + numPairs * virt] = eigvec[occ + numPairs * virt] / myprecon_cutoff
                    else:
                        # local_myprecon = eigvec / ( diag(H) - eigval ) = K^{-1} u
                        local_myprecon[occ + numPairs * virt] = eigvec[occ + numPairs * virt] / denominator
            if abs(eigval) < myprecon_cutoff:
                local_myprecon[numVars] = eigvec[numVars] / myprecon_cutoff
            else:
                local_myprecon[numVars] = -eigvec[numVars] / eigval
            # alpha_myprecon = - ( r, K^{-1} u ) / ( u, K^{-1} u )
            alpha_myprecon = -np.einsum("i,i->", local_myprecon, resid) / np.einsum("i,i->", local_myprecon, eigvec)
            # local_myprecon = r - ( r, K^{-1} u ) / ( u, K^{-1} u ) * u
            local_myprecon = resid + alpha_myprecon * eigvec
            for occ in range(numPairs):
                for virt in range(numVirt):
                    denominator = FOCK_mo[numPairs + virt, numPairs + virt] - FOCK_mo[occ, occ] - eigval
                    if abs(denominator) < myprecon_cutoff:
                        local_myprecon[occ + numPairs * virt] = -local_myprecon[occ + numPairs * virt] / myprecon_cutoff
                    else:
                        local_myprecon[occ + numPairs * virt] = -local_myprecon[occ + numPairs * virt] / denominator
            if abs(eigval) < myprecon_cutoff:
                local_myprecon[numVars] = -local_myprecon[occ + numPairs * virt] / myprecon_cutoff
            else:
                local_myprecon[numVars] = local_myprecon[occ + numPairs * virt] / eigval
            return local_myprecon

        eigenval, eigenvec = linalg_helper.davidson(
            aop=__wrapAugmentedHessian(FOCK_mo, numPairs, numVirt, tempJK_mo),
            x0=ini_guess,
            precond=myprecon,
            # tol=1e-14, \
            # max_cycle=50, \
            max_space=20,
            # lindep=1e-16, \
            # max_memory=2000, \
            nroots=1,
        )

        # logger.note(myMF, "   RHF:NewtonRaphson :: # JK computs  (iteration %d) = %d", iteration, tempJK_mo.iter)
        eigenvec = eigenvec / eigenvec[numVars]
        update = np.reshape(eigenvec[:-1], (numPairs, numVirt), order="F")
        xmat = np.zeros([OEI_ao.shape[0], OEI_ao.shape[0]], dtype=float)
        xmat[:numPairs, numPairs:] = -update
        xmat[numPairs:, :numPairs] = update.T
        unitary = scipy.linalg.expm(xmat)
        orbitals = np.dot(orbitals, unitary)
        dm_ao = 2 * np.dot(orbitals[:, :numPairs], orbitals[:, :numPairs].T)
        FOCK_ao = OEI_ao + myJK_ao.getJK_ao(dm_ao)
        FOCK_mo = np.dot(orbitals.T, np.dot(FOCK_ao, orbitals))
        grdnorm = 4 * np.linalg.norm(FOCK_mo[:numPairs, numPairs:])
        energy = myMF.mol.energy_nuc() + 0.5 * np.einsum("ij,ij->", OEI_ao + FOCK_ao, dm_ao)
        logger.note(myMF, "   RHF:NewtonRaphson :: gradient norm (iteration %d) = %1.3g", iteration, grdnorm)
        logger.note(myMF, "   RHF:NewtonRaphson :: RHF energy    (iteration %d) = %1.15g", iteration, energy)

    logger.note(myMF, "RHF:NewtonRaphson :: Convergence reached.")
    logger.note(myMF, "RHF:NewtonRaphson :: Converged RHF energy = %1.15g", energy)

    energies, orbitals = scipy.linalg.eigh(a=FOCK_ao, b=S_ao)

    myMF.mo_coeff = orbitals
    myMF.mo_occ = np.zeros([OEI_ao.shape[0]], dtype=int)
    myMF.mo_occ[:numPairs] = 2
    myMF.mo_energy = energies
    myMF.hf_energy = energy
    myMF.converged = True

    return myMF
def solve(myMF, dm_guess=None, safe_guess=True):

    assert (hasattr(myMF, 'mol'))
    assert (hasattr(myMF, 'mo_occ'))
    assert (hasattr(myMF, 'mo_coeff'))
    assert (myMF.mol.nelectron % 2 == 0)  # RHF possible

    S_ao = myMF.get_ovlp(myMF.mol)
    OEI_ao = myMF.get_hcore(myMF.mol)
    numPairs = myMF.mol.nelectron // 2
    numVirt = OEI_ao.shape[0] - numPairs
    numVars = numPairs * numVirt

    if (dm_guess is None):
        if ((len(myMF.mo_occ) == 0) or (safe_guess == True)):
            dm_ao = myMF.get_init_guess(key=myMF.init_guess, mol=myMF.mol)
        else:
            dm_ao = np.dot(np.dot(myMF.mo_coeff, np.diag(myMF.mo_occ)),
                           myMF.mo_coeff.T)
    else:
        dm_ao = np.array(dm_guess, copy=True)

    myJK_ao = __JKengine(myMF)
    FOCK_ao = OEI_ao + myJK_ao.getJK_ao(dm_ao)
    energies, orbitals = scipy.linalg.eigh(a=FOCK_ao, b=S_ao)
    dm_ao = 2 * np.dot(orbitals[:, :numPairs], orbitals[:, :numPairs].T)
    FOCK_ao = OEI_ao + myJK_ao.getJK_ao(dm_ao)
    FOCK_mo = np.dot(orbitals.T, np.dot(FOCK_ao, orbitals))
    grdnorm = 4 * np.linalg.norm(FOCK_mo[:numPairs, numPairs:])
    energy = myMF.mol.energy_nuc() + 0.5 * np.einsum('ij,ij->',
                                                     OEI_ao + FOCK_ao, dm_ao)

    logger.note(
        myMF,
        "RHF:NewtonRaphson :: Starting augmented Hessian Newton-Raphson RHF.")

    iteration = 0

    while (grdnorm > 1e-7):

        iteration += 1
        tempJK_mo = __JKengine(myMF, orbitals)
        ini_guess = np.ones([numVars + 1], dtype=float)
        for occ in range(numPairs):
            for virt in range(numVirt):
                ini_guess[occ + numPairs *
                          virt] = -FOCK_mo[occ, numPairs + virt] / max(
                              FOCK_mo[numPairs + virt, numPairs + virt] -
                              FOCK_mo[occ, occ], 1e-6)

        def myprecon(resid, eigval, eigvec):

            myprecon_cutoff = 1e-10
            local_myprecon = np.zeros([numVars + 1], dtype=float)
            for occ in range(numPairs):
                for virt in range(numVirt):
                    denominator = FOCK_mo[numPairs + virt, numPairs +
                                          virt] - FOCK_mo[occ, occ] - eigval
                    if (abs(denominator) < myprecon_cutoff):
                        local_myprecon[occ + numPairs * virt] = eigvec[
                            occ + numPairs * virt] / myprecon_cutoff
                    else:
                        # local_myprecon = eigvec / ( diag(H) - eigval ) = K^{-1} u
                        local_myprecon[occ + numPairs * virt] = eigvec[
                            occ + numPairs * virt] / denominator
            if (abs(eigval) < myprecon_cutoff):
                local_myprecon[numVars] = eigvec[numVars] / myprecon_cutoff
            else:
                local_myprecon[numVars] = -eigvec[numVars] / eigval
            # alpha_myprecon = - ( r, K^{-1} u ) / ( u, K^{-1} u )
            alpha_myprecon = -np.einsum('i,i->', local_myprecon,
                                        resid) / np.einsum(
                                            'i,i->', local_myprecon, eigvec)
            # local_myprecon = r - ( r, K^{-1} u ) / ( u, K^{-1} u ) * u
            local_myprecon = resid + alpha_myprecon * eigvec
            for occ in range(numPairs):
                for virt in range(numVirt):
                    denominator = FOCK_mo[numPairs + virt, numPairs +
                                          virt] - FOCK_mo[occ, occ] - eigval
                    if (abs(denominator) < myprecon_cutoff):
                        local_myprecon[
                            occ + numPairs * virt] = -local_myprecon[
                                occ + numPairs * virt] / myprecon_cutoff
                    else:
                        local_myprecon[occ +
                                       numPairs * virt] = -local_myprecon[
                                           occ + numPairs * virt] / denominator
            if (abs(eigval) < myprecon_cutoff):
                local_myprecon[numVars] = -local_myprecon[
                    occ + numPairs * virt] / myprecon_cutoff
            else:
                local_myprecon[numVars] = local_myprecon[occ + numPairs *
                                                         virt] / eigval
            return local_myprecon

        eigenval, eigenvec = linalg_helper.davidson( aop=__wrapAugmentedHessian( FOCK_mo, numPairs, numVirt, tempJK_mo ), \
                                                     x0=ini_guess, \
                                                     precond=myprecon, \
                                                     #tol=1e-14, \
                                                     #max_cycle=50, \
                                                     max_space=20, \
                                                     #lindep=1e-16, \
                                                     #max_memory=2000, \
                                                     nroots=1 )

        #logger.note(myMF, "   RHF:NewtonRaphson :: # JK computs  (iteration %d) = %d", iteration, tempJK_mo.iter)
        eigenvec = eigenvec / eigenvec[numVars]
        update = np.reshape(eigenvec[:-1], (numPairs, numVirt), order='F')
        xmat = np.zeros([OEI_ao.shape[0], OEI_ao.shape[0]], dtype=float)
        xmat[:numPairs, numPairs:] = -update
        xmat[numPairs:, :numPairs] = update.T
        unitary = scipy.linalg.expm(xmat)
        orbitals = np.dot(orbitals, unitary)
        dm_ao = 2 * np.dot(orbitals[:, :numPairs], orbitals[:, :numPairs].T)
        FOCK_ao = OEI_ao + myJK_ao.getJK_ao(dm_ao)
        FOCK_mo = np.dot(orbitals.T, np.dot(FOCK_ao, orbitals))
        grdnorm = 4 * np.linalg.norm(FOCK_mo[:numPairs, numPairs:])
        energy = myMF.mol.energy_nuc() + 0.5 * np.einsum(
            'ij,ij->', OEI_ao + FOCK_ao, dm_ao)
        logger.note(
            myMF,
            "   RHF:NewtonRaphson :: gradient norm (iteration %d) = %1.3g",
            iteration, grdnorm)
        logger.note(
            myMF,
            "   RHF:NewtonRaphson :: RHF energy    (iteration %d) = %1.15g",
            iteration, energy)

    logger.note(myMF, "RHF:NewtonRaphson :: Convergence reached.")
    logger.note(myMF, "RHF:NewtonRaphson :: Converged RHF energy = %1.15g",
                energy)

    energies, orbitals = scipy.linalg.eigh(a=FOCK_ao, b=S_ao)

    myMF.mo_coeff = orbitals
    myMF.mo_occ = np.zeros([OEI_ao.shape[0]], dtype=int)
    myMF.mo_occ[:numPairs] = 2
    myMF.mo_energy = energies
    myMF.e_tot = energy
    myMF.converged = True

    return myMF
Exemple #5
0
    def optimize( self, threshold=1e-6 ):
        r'''Augmented Hessian Newton-Raphson optimization of the localization cost function, using an exact gradient and hessian
        
        Args:
            threshold : The convergence threshold for the orbital rotation gradient
            
        Returns:
            The orbital coefficients of the orthonormal localized orbitals, expressed in terms of the AO
        '''

        # To break up symmetrical orbitals
        flatx = ( 0.0123 / self.numVars ) * np.ones( [ self.numVars ], dtype=float )
        self.__update_unitary( flatx )

        #self.__debug_gradient()
        #self.__debug_hessian_matvec()
        #self.__debug_diag_hessian()

        self.grd_norm = 1.0
        iteration = 0
        max_cf_encountered = 0.0
        logger.debug(self, "Localizer :: At iteration %d the cost function = %1.13f", iteration, -self.__costfunction())
        logger.debug(self, "Localizer :: Linear size of the augmented Hessian = %d", self.numVars+1)

        while ( self.grd_norm > threshold ):

            iteration += 1
            self.__set_gradient() # Sets self.gradient and self.grd_norm
            
            ini_guess = np.zeros( [ self.numVars + 1 ], dtype=float )
            diag_h    = np.zeros( [ self.numVars + 1 ], dtype=float )
            ini_guess[ self.numVars ] = 1.0
            diag_h[ :-1 ] = self.__diag_hessian()
            for elem in range( self.numVars ):
                if ( abs( diag_h[ elem ] ) < 1e-6 ):
                    ini_guess[ elem ] = -self.gradient[ elem ] / 1e-6
                else:
                    ini_guess[ elem ] = -self.gradient[ elem ] / diag_h[ elem ] # Minus the gradient divided by the diagonal elements of the hessian
            if ( self.use_hess == True ):
            
                def myprecon( resid, eigval, eigvec ):
                
                    myprecon_cutoff = 1e-10
                    local_myprecon = np.zeros( [ self.numVars + 1 ], dtype=float )
                    for elem in range( self.numVars + 1 ):
                        if ( abs( diag_h[ elem ] - eigval ) < myprecon_cutoff ):
                            local_myprecon[ elem ] = eigvec[ elem ] / myprecon_cutoff
                        else:
                            # local_myprecon = eigvec / ( diag(H) - eigval ) = K^{-1} u
                            local_myprecon[ elem ] = eigvec[ elem ] / ( diag_h[ elem ] - eigval )
                    # alpha_myprecon = - ( r, K^{-1} u ) / ( u, K^{-1} u )
                    alpha_myprecon = - np.einsum( 'i,i->', local_myprecon, resid ) / np.einsum( 'i,i->', local_myprecon, eigvec )
                    # local_myprecon = r - ( r, K^{-1} u ) / ( u, K^{-1} u ) * u
                    local_myprecon = resid + alpha_myprecon * eigvec
                    for elem in range( self.numVars + 1 ):
                        if ( abs( diag_h[ elem ] - eigval ) < myprecon_cutoff ):
                            local_myprecon[ elem ] = - local_myprecon[ elem ] / myprecon_cutoff
                        else:
                            local_myprecon[ elem ] = - local_myprecon[ elem ] / ( diag_h[ elem ] - eigval )
                    return local_myprecon
                
                #self.ahnr_cnt = 0
                eigenval, eigenvec = linalg_helper.davidson( aop=self.__augmented_hessian_matvec, \
                                                             x0=ini_guess, \
                                                             precond=myprecon, \
                                                             #tol=1e-14, \
                                                             #max_cycle=50, \
                                                             max_space=20, \
                                                             #lindep=1e-16, \
                                                             #max_memory=2000, \
                                                             nroots=1 )
                                                             
            else:
                eigenvec = np.array( ini_guess, copy=True )
            flatx = eigenvec[ :-1 ] / eigenvec[ self.numVars ]

            update_norm = np.linalg.norm( flatx )
            cost_func_prev = -self.__costfunction()
            self.__update_unitary( flatx )
            cost_func_now = -self.__costfunction()
            counter = 0
            while ( counter < 8 ) and ( cost_func_now < cost_func_prev ):
                logger.debug(self, "Localizer :: Taking half a step back")
                flatx *= 0.5
                self.__update_unitary( -flatx )
                cost_func_now = -self.__costfunction()
                counter += 1
                
            if ( cost_func_now > max_cf_encountered ):
                max_cf_encountered = cost_func_now

            logger.debug(self, "Localizer :: Gradient norm = %g", self.grd_norm)
            logger.debug(self, "Localizer :: Update norm   = %g", update_norm)
            logger.debug(self, "Localizer :: At iteration %d the cost function = %1.13f", iteration, cost_func_now)
            logger.debug(self, "             Diff. with prev. CF = %g", cost_func_now - cost_func_prev )
            logger.debug(self, "             Diff. with max.  CF = %g", cost_func_now - max_cf_encountered )
            
            if ( iteration % 10 == 0 ):
                self.__reorthogonalize()
                cost_func_now = -self.__costfunction()

        logger.note(self, "Localization procedure converged in %d iterations.", iteration)
        
        self.__reorder_orbitals()
        converged_coeff = np.dot( self.coeff, self.u )
        return converged_coeff
Exemple #6
0
    def optimize(self, threshold=1e-6):
        r'''Augmented Hessian Newton-Raphson optimization of the localization cost function, using an exact gradient and hessian
        
        Args:
            threshold : The convergence threshold for the orbital rotation gradient
            
        Returns:
            The orbital coefficients of the orthonormal localized orbitals, expressed in terms of the AO
        '''

        # To break up symmetrical orbitals
        flatx = (0.0000 / self.numVars) * np.ones([self.numVars], dtype=float)
        self.__update_unitary(flatx)

        #self.__debug_gradient()
        #self.__debug_hessian_matvec()
        #self.__debug_diag_hessian()

        self.grd_norm = 1.0
        iteration = 0
        max_cf_encountered = 0.0
        logger.debug(
            self, "Localizer :: At iteration %d the cost function = %1.13f",
            iteration, -self.__costfunction())
        logger.debug(self,
                     "Localizer :: Linear size of the augmented Hessian = %d",
                     self.numVars + 1)

        while (self.grd_norm > threshold):

            iteration += 1
            self.__set_gradient()  # Sets self.gradient and self.grd_norm

            ini_guess = np.zeros([self.numVars + 1], dtype=float)
            diag_h = np.zeros([self.numVars + 1], dtype=float)
            ini_guess[self.numVars] = 1.0
            diag_h[:-1] = self.__diag_hessian()
            for elem in range(self.numVars):
                if (abs(diag_h[elem]) < 1e-6):
                    ini_guess[elem] = -self.gradient[elem] / 1e-6
                else:
                    ini_guess[elem] = -self.gradient[elem] / diag_h[
                        elem]  # Minus the gradient divided by the diagonal elements of the hessian
            if (self.use_hess == True):

                def myprecon(resid, eigval, eigvec):

                    myprecon_cutoff = 1e-10
                    local_myprecon = np.zeros([self.numVars + 1], dtype=float)
                    for elem in range(self.numVars + 1):
                        if (abs(diag_h[elem] - eigval) < myprecon_cutoff):
                            local_myprecon[
                                elem] = eigvec[elem] / myprecon_cutoff
                        else:
                            # local_myprecon = eigvec / ( diag(H) - eigval ) = K^{-1} u
                            local_myprecon[elem] = eigvec[elem] / (
                                diag_h[elem] - eigval)
                    # alpha_myprecon = - ( r, K^{-1} u ) / ( u, K^{-1} u )
                    alpha_myprecon = -np.einsum(
                        'i,i->', local_myprecon, resid) / np.einsum(
                            'i,i->', local_myprecon, eigvec)
                    # local_myprecon = r - ( r, K^{-1} u ) / ( u, K^{-1} u ) * u
                    local_myprecon = resid + alpha_myprecon * eigvec
                    for elem in range(self.numVars + 1):
                        if (abs(diag_h[elem] - eigval) < myprecon_cutoff):
                            local_myprecon[
                                elem] = -local_myprecon[elem] / myprecon_cutoff
                        else:
                            local_myprecon[elem] = -local_myprecon[elem] / (
                                diag_h[elem] - eigval)
                    return local_myprecon

                #self.ahnr_cnt = 0
                eigenval, eigenvec = linalg_helper.davidson( aop=self.__augmented_hessian_matvec, \
                                                             x0=ini_guess, \
                                                             precond=myprecon, \
                                                             #tol=1e-14, \
                                                             #max_cycle=50, \
                                                             max_space=20, \
                                                             #lindep=1e-16, \
                                                             #max_memory=2000, \
                                                             nroots=1 )

            else:
                eigenvec = np.array(ini_guess, copy=True)
            flatx = eigenvec[:-1] / eigenvec[self.numVars]

            update_norm = np.linalg.norm(flatx)
            cost_func_prev = -self.__costfunction()
            self.__update_unitary(flatx)
            cost_func_now = -self.__costfunction()
            counter = 0
            while (counter < 8) and (cost_func_now < cost_func_prev):
                logger.debug(self, "Localizer :: Taking half a step back")
                flatx *= 0.5
                self.__update_unitary(-flatx)
                cost_func_now = -self.__costfunction()
                counter += 1

            if (cost_func_now > max_cf_encountered):
                max_cf_encountered = cost_func_now

            logger.debug(self, "Localizer :: Gradient norm = %g",
                         self.grd_norm)
            logger.debug(self, "Localizer :: Update norm   = %g", update_norm)
            logger.debug(
                self,
                "Localizer :: At iteration %d the cost function = %1.13f",
                iteration, cost_func_now)
            logger.debug(self, "             Diff. with prev. CF = %g",
                         cost_func_now - cost_func_prev)
            logger.debug(self, "             Diff. with max.  CF = %g",
                         cost_func_now - max_cf_encountered)

            if (iteration % 10 == 0):
                self.__reorthogonalize()
                cost_func_now = -self.__costfunction()

        logger.note(self, "Localization procedure converged in %d iterations.",
                    iteration)

        self.__reorder_orbitals()
        converged_coeff = np.dot(self.coeff, self.u)
        return converged_coeff
Exemple #7
0
e_hoqmo, e_luqmo = gf.as_occupied().e[-1], gf.as_virtual().e[0]
v_hoqmo, v_luqmo = gf.as_occupied().v[:,-1], gf.as_virtual().v[:,0]
print('RAGF2(2,2):')
print('IP = %.6f  weight = %.6f' % (-e_hoqmo, np.linalg.norm(v_hoqmo)))
print('EA = %.6f  weight = %.6f' % (e_luqmo, np.linalg.norm(v_luqmo)))
print('Gap = %.6f' % (e_luqmo - e_hoqmo))

# Method 3: using a more efficient solver:
f_phys = rhf.get_fock(gf2_c.rdm1, basis='mo')
nroots = 5
def aop(x): return gf2_c.se.dot(f_phys, x)
def precond(dx, e, x0): return dx / (np.concatenate([np.diag(f_phys), gf2_c.se.e]) - e)
def pick(w, v, nroots, callback): 
    mask = np.argsort(abs(w-gf2_c.chempot))
    return w[mask], v[:,mask], 0
e, v = davidson(aop, np.eye(nroots, gf2_c.nphys+gf2_c.naux), precond, nroots=nroots, max_cycle=10*nroots, pick=pick)
e_hoqmo, e_luqmo = np.max(e[e < gf2_c.chempot]), np.min(e[e >= gf2_c.chempot])
v_hoqmo, v_luqmo = v[np.argmax(e[e < gf2_c.chempot])], v[np.argmin(e[e >= gf2_c.chempot])]
print('RAGF2(3,3):')
print('IP = %.6f  weight = %.6f' % (-e_hoqmo, np.linalg.norm(v_hoqmo[:rhf.nao])))
print('EA = %.6f  weight = %.6f' % (e_luqmo, np.linalg.norm(v_luqmo[:rhf.nao])))
print('Gap = %.6f' % (e_luqmo - e_hoqmo))

# CCSD:
ccsd = cc.CCSD(rhf)
ccsd.run()
e_ip, v_ip = ccsd._pyscf.ipccsd()[:2]
e_ea, v_ea = ccsd._pyscf.eaccsd()[:2]
print('EOM-CCSD:')
print('IP = %.6f  weight = %.6f' % (e_ip, np.linalg.norm(v_ip[:rhf.nao])))
print('EA = %.6f  weight = %.6f' % (e_ea, np.linalg.norm(v_ea[:rhf.nao])))