예제 #1
0
파일: fdpw.py 프로젝트: qsnake/gpaw
    def initialize_wave_functions_from_basis_functions(self,
                                                       basis_functions,
                                                       density, hamiltonian,
                                                       spos_ac):
        if 0:
            self.timer.start('Random wavefunction initialization')
            for kpt in self.kpt_u:
                kpt.psit_nG = self.gd.zeros(self.mynbands, self.dtype)
                if extra_parameters.get('sic'):
                    kpt.W_nn = np.zeros((self.nbands, self.nbands),
                                        dtype=self.dtype)
            self.random_wave_functions(0)
            self.timer.stop('Random wavefunction initialization')
            return

        self.timer.start('LCAO initialization')
        lcaoksl, lcaobd = self.initksl, self.initksl.bd
        lcaowfs = LCAOWaveFunctions(lcaoksl, self.gd, self.nvalence,
                                    self.setups, lcaobd, self.dtype,
                                    self.world, self.kd)
        lcaowfs.basis_functions = basis_functions
        lcaowfs.timer = self.timer
        self.timer.start('Set positions (LCAO WFS)')
        lcaowfs.set_positions(spos_ac)
        self.timer.stop('Set positions (LCAO WFS)')

        eigensolver = get_eigensolver('lcao', 'lcao')
        eigensolver.initialize(self.gd, self.dtype, self.setups.nao, lcaoksl)

        # XXX when density matrix is properly distributed, be sure to
        # update the density here also
        eigensolver.iterate(hamiltonian, lcaowfs)

        # Transfer coefficients ...
        for kpt, lcaokpt in zip(self.kpt_u, lcaowfs.kpt_u):
            kpt.C_nM = lcaokpt.C_nM

        # and get rid of potentially big arrays early:
        del eigensolver, lcaowfs

        self.timer.start('LCAO to grid')
        for kpt in self.kpt_u:
            kpt.psit_nG = self.gd.zeros(self.mynbands, self.dtype)
            if extra_parameters.get('sic'):
                kpt.W_nn = np.zeros((self.nbands, self.nbands),
                                    dtype=self.dtype)
            basis_functions.lcao_to_grid(kpt.C_nM, 
                                         kpt.psit_nG[:lcaobd.mynbands], kpt.q)
            kpt.C_nM = None
        self.timer.stop('LCAO to grid')

        if self.mynbands > lcaobd.mynbands:
            # Add extra states.  If the number of atomic orbitals is
            # less than the desired number of bands, then extra random
            # wave functions are added.
            self.random_wave_functions(lcaobd.mynbands)
        self.timer.stop('LCAO initialization')
예제 #2
0
파일: fdpw.py 프로젝트: yihsuanliu/gpaw
    def initialize_wave_functions_from_basis_functions(self, basis_functions,
                                                       density, hamiltonian,
                                                       spos_ac):
        if 0:
            self.timer.start('Random wavefunction initialization')
            for kpt in self.kpt_u:
                kpt.psit_nG = self.gd.zeros(self.mynbands, self.dtype)
                if extra_parameters.get('sic'):
                    kpt.W_nn = np.zeros((self.nbands, self.nbands),
                                        dtype=self.dtype)
            self.random_wave_functions(0)
            self.timer.stop('Random wavefunction initialization')
            return

        self.timer.start('LCAO initialization')
        lcaoksl, lcaobd = self.initksl, self.initksl.bd
        lcaowfs = LCAOWaveFunctions(lcaoksl, self.gd, self.nvalence,
                                    self.setups, lcaobd, self.dtype,
                                    self.world, self.kd)
        lcaowfs.basis_functions = basis_functions
        lcaowfs.timer = self.timer
        self.timer.start('Set positions (LCAO WFS)')
        lcaowfs.set_positions(spos_ac)
        self.timer.stop('Set positions (LCAO WFS)')

        eigensolver = get_eigensolver('lcao', 'lcao')
        eigensolver.initialize(self.gd, self.dtype, self.setups.nao, lcaoksl)

        # XXX when density matrix is properly distributed, be sure to
        # update the density here also
        eigensolver.iterate(hamiltonian, lcaowfs)

        # Transfer coefficients ...
        for kpt, lcaokpt in zip(self.kpt_u, lcaowfs.kpt_u):
            kpt.C_nM = lcaokpt.C_nM

        # and get rid of potentially big arrays early:
        del eigensolver, lcaowfs

        self.timer.start('LCAO to grid')
        for kpt in self.kpt_u:
            kpt.psit_nG = self.gd.zeros(self.mynbands, self.dtype)
            if extra_parameters.get('sic'):
                kpt.W_nn = np.zeros((self.nbands, self.nbands),
                                    dtype=self.dtype)
            basis_functions.lcao_to_grid(kpt.C_nM,
                                         kpt.psit_nG[:lcaobd.mynbands], kpt.q)
            kpt.C_nM = None
        self.timer.stop('LCAO to grid')

        if self.mynbands > lcaobd.mynbands:
            # Add extra states.  If the number of atomic orbitals is
            # less than the desired number of bands, then extra random
            # wave functions are added.
            self.random_wave_functions(lcaobd.mynbands)
        self.timer.stop('LCAO initialization')
예제 #3
0
def Laplace(gd, scale=1.0, n=1, dtype=float, allocate=True):
    if n == 9:
        return FTLaplace(gd, scale, dtype)
    if extra_parameters.get('newgucstencil', True):
        return NewGUCLaplace(gd, scale, n, dtype, allocate)
    else:
        return GUCLaplace(gd, scale, n, dtype, allocate)
예제 #4
0
파일: fd_operators.py 프로젝트: qsnake/gpaw
def Laplace(gd, scale=1.0, n=1, dtype=float, allocate=True):
        if n == 9:
            return FTLaplace(gd, scale, dtype)
        if extra_parameters.get('newgucstencil', True):
            return NewGUCLaplace(gd, scale, n, dtype, allocate)
        else:
            return GUCLaplace(gd, scale, n, dtype, allocate)
예제 #5
0
    def print_chi(self, pd):
        calc = self.calc
        gd = calc.wfs.gd

        if extra_parameters.get('df_dry_run'):
            from gpaw.mpi import DryRunCommunicator
            size = extra_parameters['df_dry_run']
            world = DryRunCommunicator(size)
        else:
            world = self.world

        q_c = pd.kd.bzk_kc[0]
        nw = len(self.omega_w)
        ecut = self.ecut * Hartree
        ns = calc.wfs.nspins
        nbands = self.nbands
        nk = calc.wfs.kd.nbzkpts
        nik = calc.wfs.kd.nibzkpts
        ngmax = pd.ngmax
        eta = self.eta * Hartree
        wsize = world.size
        knsize = self.kncomm.size
        nocc = self.nocc1
        npocc = self.nocc2
        ngridpoints = gd.N_c[0] * gd.N_c[1] * gd.N_c[2]
        nstat = (ns * npocc + world.size - 1) // world.size
        occsize = nstat * ngridpoints * 16. / 1024**2
        bsize = self.blockcomm.size
        chisize = nw * pd.ngmax**2 * 16. / 1024**2 / bsize

        p = partial(print, file=self.fd)

        p('%s' % ctime())
        p('Called response.chi0.calculate with')
        p('    q_c: [%f, %f, %f]' % (q_c[0], q_c[1], q_c[2]))
        p('    Number of frequency points: %d' % nw)
        p('    Planewave cutoff: %f' % ecut)
        p('    Number of spins: %d' % ns)
        p('    Number of bands: %d' % nbands)
        p('    Number of kpoints: %d' % nk)
        p('    Number of irredicible kpoints: %d' % nik)
        p('    Number of planewaves: %d' % ngmax)
        p('    Broadening (eta): %f' % eta)
        p('    world.size: %d' % wsize)
        p('    kncomm.size: %d' % knsize)
        p('    blockcomm.size: %d' % bsize)
        p('    Number of completely occupied states: %d' % nocc)
        p('    Number of partially occupied states: %d' % npocc)
        p()
        p('    Memory estimate of potentially large arrays:')
        p('        chi0_wGG: %f M / cpu' % chisize)
        p('        Occupied states: %f M / cpu' % occsize)
        p('        Memory usage before allocation: %f M / cpu' %
          (maxrss() / 1024**2))
        p()
예제 #6
0
    def calculate(self, q_c, spin='all', A_x=None):
        wfs = self.calc.wfs

        if spin == 'all':
            spins = range(wfs.nspins)
        else:
            assert spin in range(wfs.nspins)
            spins = [spin]

        q_c = np.asarray(q_c, dtype=float)
        qd = KPointDescriptor([q_c])
        pd = PWDescriptor(self.ecut, wfs.gd, complex, qd)

        self.print_chi(pd)

        if extra_parameters.get('df_dry_run'):
            print('    Dry run exit', file=self.fd)
            raise SystemExit

        nG = pd.ngmax
        nw = len(self.omega_w)
        mynG = (nG + self.blockcomm.size - 1) // self.blockcomm.size
        self.Ga = self.blockcomm.rank * mynG
        self.Gb = min(self.Ga + mynG, nG)
        assert mynG * (self.blockcomm.size - 1) < nG

        if A_x is not None:
            nx = nw * (self.Gb - self.Ga) * nG
            chi0_wGG = A_x[:nx].reshape((nw, self.Gb - self.Ga, nG))
            chi0_wGG[:] = 0.0
        else:
            chi0_wGG = np.zeros((nw, self.Gb - self.Ga, nG), complex)

        if np.allclose(q_c, 0.0):
            chi0_wxvG = np.zeros((len(self.omega_w), 2, 3, nG), complex)
            chi0_wvv = np.zeros((len(self.omega_w), 3, 3), complex)
            self.chi0_vv = np.zeros((3, 3), complex)
        else:
            chi0_wxvG = None
            chi0_wvv = None

        print('Initializing PAW Corrections', file=self.fd)
        self.Q_aGii = self.initialize_paw_corrections(pd)

        # Do all empty bands:
        m1 = self.nocc1
        m2 = self.nbands

        self._calculate(pd, chi0_wGG, chi0_wxvG, chi0_wvv, self.Q_aGii, m1, m2,
                        spins)

        return pd, chi0_wGG, chi0_wxvG, chi0_wvv
예제 #7
0
    def calculate(self, q_c, spin='all', A_x=None):
        wfs = self.calc.wfs

        if spin == 'all':
            spins = range(wfs.nspins)
        else:
            assert spin in range(wfs.nspins)
            spins = [spin]

        q_c = np.asarray(q_c, dtype=float)
        qd = KPointDescriptor([q_c])
        pd = PWDescriptor(self.ecut, wfs.gd, complex, qd)

        self.print_chi(pd)

        if extra_parameters.get('df_dry_run'):
            print('    Dry run exit', file=self.fd)
            raise SystemExit

        nG = pd.ngmax
        nw = len(self.omega_w)
        mynG = (nG + self.blockcomm.size - 1) // self.blockcomm.size
        self.Ga = self.blockcomm.rank * mynG
        self.Gb = min(self.Ga + mynG, nG)
        assert mynG * (self.blockcomm.size - 1) < nG
        
        if A_x is not None:
            nx = nw * (self.Gb - self.Ga) * nG
            chi0_wGG = A_x[:nx].reshape((nw, self.Gb - self.Ga, nG))
            chi0_wGG[:] = 0.0
        else:
            chi0_wGG = np.zeros((nw, self.Gb - self.Ga, nG), complex)

        if np.allclose(q_c, 0.0):
            chi0_wxvG = np.zeros((len(self.omega_w), 2, 3, nG), complex)
            chi0_wvv = np.zeros((len(self.omega_w), 3, 3), complex)
            self.chi0_vv = np.zeros((3, 3), complex)
        else:
            chi0_wxvG = None
            chi0_wvv = None

        print('Initializing PAW Corrections', file=self.fd)
        self.Q_aGii = self.initialize_paw_corrections(pd)

        # Do all empty bands:
        m1 = self.nocc1
        m2 = self.nbands
        
        self._calculate(pd, chi0_wGG, chi0_wxvG, chi0_wvv, self.Q_aGii,
                        m1, m2, spins)
        
        return pd, chi0_wGG, chi0_wxvG, chi0_wvv
예제 #8
0
파일: chi0.py 프로젝트: robwarm/gpaw-symm
    def print_chi(self, pd):
        calc = self.calc
        gd = calc.wfs.gd

        ns = calc.wfs.nspins
        nk = calc.wfs.kd.nbzkpts
        nb = self.nocc2

        if extra_parameters.get("df_dry_run"):
            from gpaw.mpi import DryRunCommunicator

            size = extra_parameters["df_dry_run"]
            world = DryRunCommunicator(size)
        else:
            world = self.world

        nw = len(self.omega_w)
        q_c = pd.kd.bzk_kc[0]
        nstat = (ns * nk * nb + world.size - 1) // world.size

        print("%s" % ctime(), file=self.fd)
        print("Called response.chi0.calculate with", file=self.fd)
        print("    q_c: [%f, %f, %f]" % (q_c[0], q_c[1], q_c[2]), file=self.fd)
        print("    Number of frequency points   : %d" % nw, file=self.fd)
        print("    Planewave cutoff: %f" % (self.ecut * Hartree), file=self.fd)
        print("    Number of spins: %d" % ns, file=self.fd)
        print("    Number of bands: %d" % self.nbands, file=self.fd)
        print("    Number of kpoints: %d" % nk, file=self.fd)
        print("    Number of planewaves: %d" % pd.ngmax, file=self.fd)
        print("    Broadening (eta): %f" % (self.eta * Hartree), file=self.fd)
        print("    Keep occupied states: %s" % self.keep_occupied_states, file=self.fd)

        print("", file=self.fd)
        print("    Related to parallelization", file=self.fd)
        print("        world.size: %d" % world.size, file=self.fd)
        print("        Number of completely occupied states: %d" % self.nocc1, file=self.fd)
        print("        Number of partially occupied states: %d" % self.nocc2, file=self.fd)
        print("        Number of terms handled in chi-sum by each rank: %d" % nstat, file=self.fd)

        print("", file=self.fd)
        print("    Memory estimate:", file=self.fd)
        print("        chi0_wGG: %f M / cpu" % (nw * pd.ngmax ** 2 * 16.0 / 1024 ** 2), file=self.fd)
        print(
            "        Occupied states: %f M / cpu" % (nstat * gd.N_c[0] * gd.N_c[1] * gd.N_c[2] * 16.0 / 1024 ** 2),
            file=self.fd,
        )
        print("        Max mem sofar   : %f M / cpu" % (maxrss() / 1024 ** 2), file=self.fd)

        print("", file=self.fd)
예제 #9
0
 def endElement(self, name):
     setup = self.setup
     if self.data is None:
         return
     x_g = np.array([float(x) for x in ''.join(self.data).split()])
     if name == 'ae_core_density':
         setup.nc_g = x_g
     elif name == 'pseudo_core_density':
         setup.nct_g = x_g
     elif name == 'kinetic_energy_differences':
         setup.e_kin_jj = x_g
     elif name == 'ae_core_kinetic_energy_density':
         setup.tauc_g = x_g
     elif name == 'pseudo_valence_density':
         setup.nvt_g = x_g
     elif name == 'pseudo_core_kinetic_energy_density':
         if extra_parameters.get('mggapscore') and (x_g == 0).all():
             x = setup.rgd.r_g / 0.7
             x_g = 0.051 * (1 - x**2 * (3 - 2 * x))
             x_g[x > 1] = 0.0
         setup.tauct_g = x_g
     elif name in ['localized_potential', 'zero_potential']:  # XXX
         setup.vbar_g = x_g
     elif name.startswith('GLLB_'):
         # Add setup tags starting with GLLB_ to extra_xc_data. Remove
         # GLLB_ from front of string:
         setup.extra_xc_data[name[5:]] = x_g
     elif name == 'ae_partial_wave':
         j = len(setup.phi_jg)
         assert self.id == setup.id_j[j]
         setup.phi_jg.append(x_g)
     elif name == 'pseudo_partial_wave':
         j = len(setup.phit_jg)
         assert self.id == setup.id_j[j]
         setup.phit_jg.append(x_g)
     elif name == 'projector_function':
         j = len(setup.pt_jg)
         assert self.id == setup.id_j[j]
         setup.pt_jg.append(x_g)
     elif name == 'exact_exchange_X_matrix':
         setup.X_p = x_g
     elif name == 'yukawa_exchange_X_matrix':
         setup.X_pg = x_g
     elif name == 'core_hole_state':
         setup.phicorehole_g = x_g
예제 #10
0
파일: fdpw.py 프로젝트: eojons/gpaw-scme
    def initialize_wave_functions_from_restart_file(self):
        if not isinstance(self.kpt_u[0].psit_nG, FileReference):
            return

        # Calculation started from a restart file.  Copy data
        # from the file to memory:
        for kpt in self.kpt_u:
            file_nG = kpt.psit_nG
            kpt.psit_nG = self.empty(self.bd.mynbands, q=kpt.q)
            if extra_parameters.get("sic"):
                kpt.W_nn = np.zeros((self.bd.nbands, self.bd.nbands), dtype=self.dtype)
            # Read band by band to save memory
            for n, psit_G in enumerate(kpt.psit_nG):
                if self.gd.comm.rank == 0:
                    big_psit_G = file_nG[n][:].astype(psit_G.dtype)
                else:
                    big_psit_G = None
                self.gd.distribute(big_psit_G, psit_G)
예제 #11
0
    def __init__(self, cell_cv, pbc_c, setups, ibzk_qc, gamma):
        self.cell_cv = cell_cv
        self.pbc_c = pbc_c
        self.ibzk_qc = ibzk_qc
        self.gamma = gamma

        cutoff_I = []
        setups_I = setups.setups.values()
        I_setup = {}
        for I, setup in enumerate(setups_I):
            I_setup[setup] = I
            cutoff_I.append(max([func.get_cutoff()
                                 for func in setup.phit_j + setup.pt_j]))
        
        I_a = []
        for setup in setups:
            I_a.append(I_setup[setup])

        cutoff_a = [cutoff_I[I] for I in I_a]

        self.I_a = I_a
        self.setups_I = setups_I        
        self.atompairs = PairsWithSelfinteraction(NeighborPairs(cutoff_a,
                                                                cell_cv,
                                                                pbc_c,
                                                                True))
        self.atoms = self.atompairs.pairs.atoms # XXX compatibility

        scale = 0.01 # XXX minimal distance scale
        cutoff_close_a = [covalent_radii[s.Z] / Bohr * scale for s in setups]
        self.atoms_close = NeighborPairs(cutoff_close_a, cell_cv, pbc_c, False)

        rcmax = max(cutoff_I + [0.001])

        ng = 2**extra_parameters.get('log2ng', 10)
        transformer = FourierTransformer(rcmax, ng)
        tsoc = TwoSiteOverlapCalculator(transformer)
        self.msoc = ManySiteOverlapCalculator(tsoc, I_a, I_a)
        self.calculate_expansions()

        self.calculate = self.evaluate # XXX compatibility

        self.set_matrix_distribution(None, None)
예제 #12
0
파일: setup.py 프로젝트: qsnake/gpaw
    def get_derivative_integrals(self, rgd, phi_jg, phit_jg):
        """Calculate PAW-correction matrix elements of nabla.

        ::
        
          /  _       _  d       _     ~   _  d   ~   _
          | dr [phi (r) -- phi (r) - phi (r) -- phi (r)]
          /        1    dx    2         1    dx    2

        and similar for y and z."""

        if extra_parameters.get('fprojectors'):
            return None
        
        r_g = rgd.r_g
        dr_g = rgd.dr_g
        nabla_iiv = np.empty((self.ni, self.ni, 3))
        i1 = 0
        for j1 in range(self.nj):
            l1 = self.l_j[j1]
            nm1 = 2 * l1 + 1
            i2 = 0
            for j2 in range(self.nj):
                l2 = self.l_j[j2]
                nm2 = 2 * l2 + 1
                f1f2or = np.dot(phi_jg[j1] * phi_jg[j2] -
                                phit_jg[j1] * phit_jg[j2], r_g * dr_g)
                dphidr_g = np.empty_like(phi_jg[j2])
                rgd.derivative(phi_jg[j2], dphidr_g)
                dphitdr_g = np.empty_like(phit_jg[j2])
                rgd.derivative(phit_jg[j2], dphitdr_g)
                f1df2dr = np.dot(phi_jg[j1] * dphidr_g -
                                 phit_jg[j1] * dphitdr_g, r_g**2 * dr_g)
                for v in range(3):
                    Lv = 1 + (v + 2) % 3
                    nabla_iiv[i1:i1 + nm1, i2:i2 + nm2, v] = (
                        (4 * pi / 3)**0.5 * (f1df2dr - l2 * f1f2or) *
                        G_LLL[Lv, l2**2:l2**2 + nm2, l1**2:l1**2 + nm1].T +
                        f1f2or *
                        Y_LLv[l1**2:l1**2 + nm1, l2**2:l2**2 + nm2, v])
                i2 += nm2
            i1 += nm1
        return nabla_iiv
예제 #13
0
파일: chi0.py 프로젝트: robwarm/gpaw-symm
    def calculate(self, q_c, spin="all"):
        wfs = self.calc.wfs

        if spin == "all":
            spins = range(wfs.nspins)
        else:
            assert spin in range(wfs.nspins)
            spins = [spin]

        q_c = np.asarray(q_c, dtype=float)
        qd = KPointDescriptor([q_c])
        pd = PWDescriptor(self.ecut, wfs.gd, complex, qd)

        self.print_chi(pd)

        if extra_parameters.get("df_dry_run"):
            print("    Dry run exit", file=self.fd)
            raise SystemExit

        nG = pd.ngmax
        nw = len(self.omega_w)
        mynG = (nG + self.blockcomm.size - 1) // self.blockcomm.size
        chi0_wGG = np.zeros((nw, mynG, nG), complex)
        self.Ga = self.blockcomm.rank * mynG
        self.Gb = min(self.Ga + mynG, nG)
        chi0_wGG = chi0_wGG[:, : self.Gb - self.Ga]

        if np.allclose(q_c, 0.0):
            chi0_wxvG = np.zeros((len(self.omega_w), 2, 3, nG), complex)
            chi0_wvv = np.zeros((len(self.omega_w), 3, 3), complex)
            self.chi0_vv = np.zeros((3, 3), complex)
        else:
            chi0_wxvG = None
            chi0_wvv = None

        print("    Initializing PAW Corrections", file=self.fd)
        self.Q_aGii = self.initialize_paw_corrections(pd)
        print("        Done", file=self.fd)

        # Do all empty bands:
        m1 = self.nocc1
        m2 = self.nbands
        return self._calculate(pd, chi0_wGG, chi0_wxvG, chi0_wvv, self.Q_aGii, m1, m2, spins)
예제 #14
0
파일: fdpw.py 프로젝트: yihsuanliu/gpaw
    def initialize_wave_functions_from_restart_file(self):
        if not isinstance(self.kpt_u[0].psit_nG, TarFileReference):
            return

        # Calculation started from a restart file.  Copy data
        # from the file to memory:
        for kpt in self.kpt_u:
            file_nG = kpt.psit_nG
            kpt.psit_nG = self.gd.empty(self.mynbands, self.dtype)
            if extra_parameters.get('sic'):
                kpt.W_nn = np.zeros((self.nbands, self.nbands),
                                    dtype=self.dtype)
            # Read band by band to save memory
            for n, psit_G in enumerate(kpt.psit_nG):
                if self.gd.comm.rank == 0:
                    big_psit_G = np.array(file_nG[n][:], self.dtype)
                else:
                    big_psit_G = None
                self.gd.distribute(big_psit_G, psit_G)
예제 #15
0
파일: cg.py 프로젝트: robwarm/gpaw-symm
    def __init__(self, niter=4, rtol=0.30):
        """Construct conjugate gradient eigen solver.

        parameters:

        niter: int
            Maximum number of conjugate gradient iterations per band
        rtol: float
            If change in residual is less than rtol, iteration for band is
            not continued

        """
        Eigensolver.__init__(self)
        self.niter = niter
        self.rtol = rtol
        if extra_parameters.get('PK', False):
            self.orthonormalization_required = True
        else:
            self.orthonormalization_required = False
예제 #16
0
    def __init__(self, niter=4, rtol=0.30):
        """Construct conjugate gradient eigen solver.

        parameters:

        niter: int
            Maximum number of conjugate gradient iterations per band
        rtol: float
            If change in residual is less than rtol, iteration for band is
            not continued

        """
        Eigensolver.__init__(self)
        self.niter = niter
        self.rtol = rtol
        if extra_parameters.get('PK', False):
            self.orthonormalization_required = True
        else:
            self.orthonormalization_required = False
예제 #17
0
파일: overlap.py 프로젝트: thonmaker/gpaw
    def __init__(self, cell_cv, pbc_c, setups, ibzk_qc, gamma):
        self.cell_cv = cell_cv
        self.pbc_c = pbc_c
        self.ibzk_qc = ibzk_qc
        self.gamma = gamma

        timer.start('tci init')
        cutoff_I = []
        setups_I = setups.setups.values()
        I_setup = {}
        for I, setup in enumerate(setups_I):
            I_setup[setup] = I
            cutoff_I.append(
                max([func.get_cutoff() for func in setup.phit_j + setup.pt_j]))

        I_a = []
        for setup in setups:
            I_a.append(I_setup[setup])

        cutoff_a = [cutoff_I[I] for I in I_a]

        self.cutoff_a = cutoff_a  # convenient for writing the new new overlap
        self.I_a = I_a
        self.setups_I = setups_I
        self.atompairs = PairsWithSelfinteraction(
            NeighborPairs(cutoff_a, cell_cv, pbc_c, True))

        scale = 0.01  # XXX minimal distance scale
        cutoff_close_a = [covalent_radii[s.Z] / Bohr * scale for s in setups]
        self.atoms_close = NeighborPairs(cutoff_close_a, cell_cv, pbc_c, False)

        rcmax = max(cutoff_I + [0.001])

        ng = 2**extra_parameters.get('log2ng', 10)
        transformer = FourierTransformer(rcmax, ng)
        tsoc = TwoSiteOverlapCalculator(transformer)
        self.msoc = ManySiteOverlapCalculator(tsoc, I_a, I_a)
        self.calculate_expansions()

        self.calculate = self.evaluate  # XXX compatibility

        self.set_matrix_distribution(None, None)
        timer.stop('tci init')
예제 #18
0
    def iterate_one_k_point(self, hamiltonian, wfs, kpt):
        """Do a single RMM-DIIS iteration for the kpoint"""

        psit_nG, R_nG = self.subspace_diagonalize(hamiltonian, wfs, kpt)

        self.timer.start('RMM-DIIS')
        if self.keep_htpsit:
            self.calculate_residuals(kpt, wfs, hamiltonian, psit_nG,
                                     kpt.P_ani, kpt.eps_n, R_nG)

        def integrate(a_G, b_G):
            return np.real(wfs.integrate(a_G, b_G, global_integral=False))

        comm = wfs.gd.comm
        B = self.blocksize
        dR_xG = wfs.empty(B, q=kpt.q)
        P_axi = wfs.pt.dict(B)
        error = 0.0
        for n1 in range(0, wfs.bd.mynbands, B):
            n2 = n1 + B
            if n2 > wfs.bd.mynbands:
                n2 = wfs.bd.mynbands
                B = n2 - n1
                P_axi = dict((a, P_xi[:B]) for a, P_xi in P_axi.items())
                dR_xG = dR_xG[:B]
                
            n_x = range(n1, n2)
            psit_xG = psit_nG[n1:n2]
            
            if self.keep_htpsit:
                R_xG = R_nG[n1:n2]
            else:
                R_xG = wfs.empty(B, q=kpt.q)
                wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, psit_xG, R_xG)
                wfs.pt.integrate(psit_xG, P_axi, kpt.q)
                self.calculate_residuals(kpt, wfs, hamiltonian, psit_xG,
                                         P_axi, kpt.eps_n[n_x], R_xG, n_x)

            for n in n_x:
                if kpt.f_n is None:
                    weight = kpt.weight
                else:
                    weight = kpt.f_n[n]
                if self.nbands_converge != 'occupied':
                    if wfs.bd.global_index(n) < self.nbands_converge:
                        weight = kpt.weight
                    else:
                        weight = 0.0
                error += weight * integrate(R_xG[n - n1], R_xG[n - n1])

            # Precondition the residual:
            self.timer.start('precondition')
            ekin_x = self.preconditioner.calculate_kinetic_energy(
                psit_xG, kpt)
            dpsit_xG = self.preconditioner(R_xG, kpt, ekin_x)
            self.timer.stop('precondition')

            # Calculate the residual of dpsit_G, dR_G = (H - e S) dpsit_G:
            wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, dpsit_xG, dR_xG)
            self.timer.start('projections')
            wfs.pt.integrate(dpsit_xG, P_axi, kpt.q)
            self.timer.stop('projections')
            self.calculate_residuals(kpt, wfs, hamiltonian, dpsit_xG,
                                     P_axi, kpt.eps_n[n_x], dR_xG, n_x,
                                     calculate_change=True)
            
            # Find lam that minimizes the norm of R'_G = R_G + lam dR_G
            RdR_x = np.array([integrate(dR_G, R_G)
                              for R_G, dR_G in zip(R_xG, dR_xG)])
            dRdR_x = np.array([integrate(dR_G, dR_G) for dR_G in dR_xG])
            comm.sum(RdR_x)
            comm.sum(dRdR_x)

            lam_x = -RdR_x / dRdR_x
            if extra_parameters.get('PK', False):
                lam_x[:] = np.where(lam_x>0.0, lam_x, 0.2)   
            # Calculate new psi'_G = psi_G + lam pR_G + lam2 pR'_G
            #                      = psi_G + p((lam+lam2) R_G + lam*lam2 dR_G)
            for lam, R_G, dR_G in zip(lam_x, R_xG, dR_xG):
                if self.fixed_trial_step is None:
                    lam2 = lam
                else:
                    lam2 = self.fixed_trial_step
                R_G *= lam + lam2
                axpy(lam * lam2, dR_G, R_G)
                
            self.timer.start('precondition')
            psit_xG[:] += self.preconditioner(R_xG, kpt, ekin_x)
            self.timer.stop('precondition')
            
        self.timer.stop('RMM-DIIS')
        error = comm.sum(error)
        return error, psit_nG
예제 #19
0
    def __init__(self, data, xc, lmax=0, basis=None, filter=None):
        self.type = data.name
        
        self.HubU = None

        if max(data.l_j) > 2 and not extra_parameters.get('fprojectors'):
            msg = ('Your %s dataset has f-projectors!  ' % data.symbol +
                   'Add --gpaw=fprojectors=1 on the command-line.')
            raise RuntimeError(msg)
            
        if not data.is_compatible(xc):
            raise ValueError('Cannot use %s setup with %s functional' %
                             (data.setupname, xc.get_setup_name()))
        
        self.symbol = data.symbol
        self.data = data

        self.Nc = data.Nc
        self.Nv = data.Nv
        self.Z = data.Z
        l_j = self.l_j = data.l_j
        self.l_orb_j = data.l_orb_j
        n_j = self.n_j = data.n_j
        self.f_j = data.f_j
        self.eps_j = data.eps_j
        nj = self.nj = len(l_j)
        rcut_j = self.rcut_j = data.rcut_j

        self.ExxC = data.ExxC
        self.X_p = data.X_p

        self.orbital_free = data.orbital_free
        
        pt_jg = data.pt_jg
        phit_jg = data.phit_jg
        phi_jg = data.phi_jg

        self.fingerprint = data.fingerprint
        self.filename = data.filename

        rgd = self.rgd = data.rgd
        r_g = rgd.r_g
        dr_g = rgd.dr_g

        self.lmax = lmax
            
        rcutmax = max(rcut_j)
        rcut2 = 2 * rcutmax
        gcut2 = rgd.ceil(rcut2)
        self.gcut2 = gcut2

        self.gcutmin = rgd.ceil(min(rcut_j))

        if data.generator_version < 2:
            # Find Fourier-filter cutoff radius:
            gcutfilter = data.get_max_projector_cutoff()
        elif filter:
            rc = rcutmax
            filter(rgd, rc, data.vbar_g)

            for l, pt_g in zip(l_j, pt_jg):
                filter(rgd, rc, pt_g, l)

            for l in range(max(l_j) + 1):
                J = [j for j, lj in enumerate(l_j) if lj == l]
                A_nn = [[rgd.integrate(phit_jg[j1] * pt_jg[j2]) / 4 / pi
                         for j1 in J] for j2 in J]
                B_nn = np.linalg.inv(A_nn)
                pt_ng = np.dot(B_nn, [pt_jg[j] for j in J])
                for n, j in enumerate(J):
                    pt_jg[j] = pt_ng[n]
            gcutfilter = data.get_max_projector_cutoff()
        else:
            rcutfilter = max(rcut_j)
            gcutfilter = rgd.ceil(rcutfilter)
        
        self.rcutfilter = rcutfilter = r_g[gcutfilter]
        assert (data.vbar_g[gcutfilter:] == 0).all()

        ni = 0
        i = 0
        j = 0
        jlL_i = []
        for l, n in zip(l_j, n_j):
            for m in range(2 * l + 1):
                jlL_i.append((j, l, l**2 + m))
                i += 1
            j += 1
        ni = i
        self.ni = ni

        _np = ni * (ni + 1) // 2
        self.nq = nq = nj * (nj + 1) // 2

        lcut = max(l_j)
        if 2 * lcut < lmax:
            lcut = (lmax + 1) // 2
        self.lcut = lcut

        self.B_ii = self.calculate_projector_overlaps(pt_jg)

        self.fcorehole = data.fcorehole
        self.lcorehole = data.lcorehole
        if data.phicorehole_g is not None:
            if self.lcorehole == 0:
                self.calculate_oscillator_strengths(phi_jg)
            else:
                self.A_ci = None

        # Construct splines:
        self.vbar = rgd.spline(data.vbar_g, rcutfilter)

        rcore, nc_g, nct_g, nct = self.construct_core_densities(data)
        self.rcore = rcore
        self.nct = nct

        # Construct splines for core kinetic energy density:
        tauct_g = data.tauct_g
        self.tauct = rgd.spline(tauct_g, self.rcore)

        self.pt_j = self.create_projectors(rcutfilter)

        if basis is None:
            basis = self.create_basis_functions(phit_jg, rcut2, gcut2)
        phit_j = basis.tosplines()
        self.phit_j = phit_j
        self.basis = basis

        self.nao = 0
        for phit in self.phit_j:
            l = phit.get_angular_momentum_number()
            self.nao += 2 * l + 1

        rgd2 = self.rgd2 = AERadialGridDescriptor(rgd.a, rgd.b, gcut2)
        r_g = rgd2.r_g
        dr_g = rgd2.dr_g
        phi_jg = np.array([phi_g[:gcut2].copy() for phi_g in phi_jg])
        phit_jg = np.array([phit_g[:gcut2].copy() for phit_g in phit_jg])
        self.nc_g = nc_g = nc_g[:gcut2].copy()
        self.nct_g = nct_g = nct_g[:gcut2].copy()
        vbar_g = data.vbar_g[:gcut2].copy()

        extra_xc_data = dict(data.extra_xc_data)
        # Cut down the GLLB related extra data
        for key, item in extra_xc_data.iteritems():
            if len(item) == rgd.N:
                extra_xc_data[key] = item[:gcut2].copy()
        self.extra_xc_data = extra_xc_data

        self.phicorehole_g = data.phicorehole_g
        if self.phicorehole_g is not None:
            self.phicorehole_g = self.phicorehole_g[:gcut2].copy()

        T_Lqp = self.calculate_T_Lqp(lcut, nq, _np, nj, jlL_i)
        (g_lg, n_qg, nt_qg, Delta_lq, self.Lmax, self.Delta_pL, Delta0,
         self.N0_p) = self.get_compensation_charges(phi_jg, phit_jg, _np,
                                                    T_Lqp)
        self.Delta0 = Delta0
        self.g_lg = g_lg

        # Solves the radial poisson equation for density n_g
        def H(n_g, l):
            return rgd2.poisson(n_g, l) * r_g * dr_g

        wnc_g = H(nc_g, l=0)
        wnct_g = H(nct_g, l=0)

        self.wg_lg = wg_lg = [H(g_lg[l], l) for l in range(lmax + 1)]

        wn_lqg = [np.array([H(n_qg[q], l) for q in range(nq)])
                  for l in range(2 * lcut + 1)]
        wnt_lqg = [np.array([H(nt_qg[q], l) for q in range(nq)])
                   for l in range(2 * lcut + 1)]

        rdr_g = r_g * dr_g
        dv_g = r_g * rdr_g
        A = 0.5 * np.dot(nc_g, wnc_g)
        A -= sqrt(4 * pi) * self.Z * np.dot(rdr_g, nc_g)
        mct_g = nct_g + Delta0 * g_lg[0]
        wmct_g = wnct_g + Delta0 * wg_lg[0]
        A -= 0.5 * np.dot(mct_g, wmct_g)
        self.M = A
        self.MB = -np.dot(dv_g * nct_g, vbar_g)
        
        AB_q = -np.dot(nt_qg, dv_g * vbar_g)
        self.MB_p = np.dot(AB_q, T_Lqp[0])
        
        # Correction for average electrostatic potential:
        #
        #   dEH = dEH0 + dot(D_p, dEH_p)
        #
        self.dEH0 = sqrt(4 * pi) * (wnc_g - wmct_g -
                                    sqrt(4 * pi) * self.Z * r_g * dr_g).sum()
        dEh_q = (wn_lqg[0].sum(1) - wnt_lqg[0].sum(1) -
                 Delta_lq[0] * wg_lg[0].sum())
        self.dEH_p = np.dot(dEh_q, T_Lqp[0]) * sqrt(4 * pi)
        
        M_p, M_pp = self.calculate_coulomb_corrections(lcut, n_qg, wn_lqg,
                                                       lmax, Delta_lq,
                                                       wnt_lqg, g_lg,
                                                       wg_lg, nt_qg,
                                                       _np, T_Lqp, nc_g,
                                                       wnc_g, rdr_g, mct_g,
                                                       wmct_g)
        self.M_p = M_p
        self.M_pp = M_pp

        if xc.type == 'GLLB':
            if 'core_f' in self.extra_xc_data:
                self.wnt_lqg = wnt_lqg
                self.wn_lqg = wn_lqg
                self.fc_j = self.extra_xc_data['core_f']
                self.lc_j = self.extra_xc_data['core_l']
                self.njcore = len(self.lc_j)
                if self.njcore > 0:
                    self.uc_jg = self.extra_xc_data['core_states'].reshape(
                        (self.njcore, -1))
                    self.uc_jg = self.uc_jg[:, :gcut2]
                self.phi_jg = phi_jg
            
        self.Kc = data.e_kinetic_core - data.e_kinetic
        self.M -= data.e_electrostatic
        self.E = data.e_total

        Delta0_ii = unpack(self.Delta_pL[:, 0].copy())
        self.dO_ii = data.get_overlap_correction(Delta0_ii)
        self.dC_ii = self.get_inverse_overlap_coefficients(self.B_ii,
                                                           self.dO_ii)
        
        self.Delta_iiL = np.zeros((ni, ni, self.Lmax))
        for L in range(self.Lmax):
            self.Delta_iiL[:, :, L] = unpack(self.Delta_pL[:, L].copy())

        self.Nct = data.get_smooth_core_density_integral(Delta0)
        self.K_p = data.get_linear_kinetic_correction(T_Lqp[0])
        
        r = 0.02 * rcut2 * np.arange(51, dtype=float)
        alpha = data.rcgauss**-2
        self.ghat_l = data.get_ghat(lmax, alpha, r, rcut2)#;print 'use g_lg!'
        self.rcgauss = data.rcgauss
        
        self.xc_correction = data.get_xc_correction(rgd2, xc, gcut2, lcut)
        self.nabla_iiv = self.get_derivative_integrals(rgd2, phi_jg, phit_jg)
        self.rnabla_iiv = self.get_magnetic_integrals(rgd2, phi_jg, phit_jg)
        self.rxp_iiv = self.get_magnetic_integrals_new(rgd2, phi_jg, phit_jg)
예제 #20
0
파일: lfc.py 프로젝트: yihsuanliu/gpaw
        return self.lfs_a[a].ni

    def estimate_memory(self, mem):
        count = 0        
        for spline_j in self.spline_aj:
            for spline in spline_j:
                l = spline.get_angular_momentum_number()
                sidelength = 2 * spline.get_cutoff()
                count += (2 * l + 1) * sidelength**3 / self.gd.dv
        bytes = count * mem.floatsize / self.gd.comm.size
        mem.subnode('Boxes', bytes)
        if self.forces:
            mem.subnode('Derivatives', 3 * bytes)
        mem.subnode('Work', bytes)

if extra_parameters.get('usenewlfc', True):
    LocalizedFunctionsCollection = NewLocalizedFunctionsCollection


def LFC(gd, spline_aj, kpt_comm=None,
        cut=False, dtype=float,
        integral=None, forces=False):
    if isinstance(gd, GridDescriptor):
        return LocalizedFunctionsCollection(gd, spline_aj, kpt_comm,
                                            cut, dtype, integral, forces)
    else:
        return gd.get_lfc(gd, spline_aj)
    
                  
def test():
    from gpaw.grid_descriptor import GridDescriptor
예제 #21
0
파일: kspot.py 프로젝트: yihsuanliu/gpaw
        t('Calculated core eigenvalues of atom ' + str(a) + ':' + symbol)
        t('state      eigenvalue         ekin         rmax')
        t('-----------------------------------------------')
        for m, l, f, e, u in zip(atom.n_j, atom.l_j, atom.f_j, atom.e_j,
                                 atom.u_j):
            # Find kinetic energy:
            k = e - np.sum((
                np.where(abs(u) < 1e-160, 0, u)**2 *  #XXXNumeric!
                atom.vr * atom.dr)[1:] / atom.r[1:])

            # Find outermost maximum:
            g = atom.N - 4
            while u[g - 1] >= u[g]:
                g -= 1
            x = atom.r[g - 1:g + 2]
            y = u[g - 1:g + 2]
            A = np.transpose(np.array([x**i for i in range(3)]))
            c, b, a = np.linalg.solve(A, y)
            assert a < 0.0
            rmax = -0.5 * b / a

            s = 'spdf'[l]
            t('%d%s^%-4.1f: %12.6f %12.6f %12.3f' % (m, s, f, e, k, rmax))
        t('-----------------------------------------------')
        t('(units: Bohr and Hartree)')
        return atom.e_j


if not extra_parameters.get('usenewxc'):
    raise "New XC-corrections required. Add --gpaw usenewxc=1 to command line and try again."
예제 #22
0
    def print_chi(self, pd):
        calc = self.calc
        gd = calc.wfs.gd

        if extra_parameters.get('df_dry_run'):
            from gpaw.mpi import DryRunCommunicator
            size = extra_parameters['df_dry_run']
            world = DryRunCommunicator(size)
        else:
            world = self.world

        print('%s' % ctime(), file=self.fd)
        print('Called response.chi0.calculate with', file=self.fd)

        q_c = pd.kd.bzk_kc[0]
        print('    q_c: [%f, %f, %f]' % (q_c[0], q_c[1], q_c[2]), file=self.fd)

        nw = len(self.omega_w)
        print('    Number of frequency points: %d' % nw, file=self.fd)

        ecut = self.ecut * Hartree
        print('    Planewave cutoff: %f' % ecut, file=self.fd)

        ns = calc.wfs.nspins
        print('    Number of spins: %d' % ns, file=self.fd)

        nbands = self.nbands
        print('    Number of bands: %d' % nbands, file=self.fd)

        nk = calc.wfs.kd.nbzkpts
        print('    Number of kpoints: %d' % nk, file=self.fd)

        nik = calc.wfs.kd.nibzkpts
        print('    Number of irredicible kpoints: %d' % nik, file=self.fd)
        
        ngmax = pd.ngmax
        print('    Number of planewaves: %d' % ngmax, file=self.fd)

        eta = self.eta * Hartree
        print('    Broadening (eta): %f' % eta, file=self.fd)
        
        wsize = world.size
        print('    world.size: %d' % wsize, file=self.fd)

        knsize = self.kncomm.size
        print('    kncomm.size: %d' % knsize, file=self.fd)

        bsize = self.blockcomm.size
        print('    blockcomm.size: %d' % bsize, file=self.fd)
        
        nocc = self.nocc1
        print('    Number of completely occupied states: %d'
              % nocc, file=self.fd)
        
        npocc = self.nocc2
        print('    Number of partially occupied states: %d'
              % npocc, file=self.fd)

        keep = self.keep_occupied_states
        print('    Keep occupied states: %s' % keep, file=self.fd)

        print('', file=self.fd)
        print('    Memory estimate of potentially large arrays:', file=self.fd)

        chisize = nw * pd.ngmax**2 * 16. / 1024**2
        print('        chi0_wGG: %f M / cpu' % chisize, file=self.fd)

        ngridpoints = gd.N_c[0] * gd.N_c[1] * gd.N_c[2]

        if self.keep_occupied_states:
            nstat = (ns * nk * npocc + world.size - 1) // world.size
        else:
            nstat = (ns * npocc + world.size - 1) // world.size

        occsize = nstat * ngridpoints * 16. / 1024**2
        print('        Occupied states: %f M / cpu' % occsize,
              file=self.fd)

        print('        Memory usage before allocation: %f M / cpu'
              % (maxrss() / 1024**2), file=self.fd)

        print('', file=self.fd)
예제 #23
0
    def initialize(self, simple_version=False):

        self.printtxt('')
        self.printtxt('-----------------------------------------')
        self.printtxt('Response function calculation started at:')
        self.starttime = time()
        self.printtxt(ctime())

        BASECHI.initialize(self)

        # Frequency init
        self.dw = None
        if len(self.w_w) == 1:
            self.hilbert_trans = False

        if self.hilbert_trans:
            self.dw = self.w_w[1] - self.w_w[0]
#            assert ((self.w_w[1:] - self.w_w[:-1] - self.dw) < 1e-10).all() # make sure its linear w grid
            assert self.w_w.max() == self.w_w[-1]
            
            self.dw /= Hartree
            self.w_w /= Hartree
            self.wmax = self.w_w[-1] 
            self.wcut = self.wmax + 5. / Hartree
#            self.Nw  = int(self.wmax / self.dw) + 1
            self.Nw = len(self.w_w)
            self.NwS = int(self.wcut / self.dw) + 1
        else:
            self.Nw = len(self.w_w)
            self.NwS = 0
            if len(self.w_w) > 2:
                self.dw = self.w_w[1] - self.w_w[0]
                assert ((self.w_w[1:] - self.w_w[:-1] - self.dw) < 1e-10).all()
                self.dw /= Hartree

        self.nvalbands = self.nbands
        tmpn = np.zeros(self.nspins, dtype=int)
        for spin in range(self.nspins):
            for n in range(self.nbands):
                if (self.f_skn[spin][:, n] - self.ftol < 0).all():
                    tmpn[spin] = n
                    break
        if tmpn.max() > 0:
            self.nvalbands = tmpn.max()

        # Parallelization initialize
        self.parallel_init()

        # Printing calculation information
        self.print_chi()

        if extra_parameters.get('df_dry_run'):
            raise SystemExit

        calc = self.calc

        # For LCAO wfs
        if calc.input_parameters['mode'] == 'lcao':
            calc.initialize_positions()        
        self.printtxt('     Max mem sofar   : %f M / cpu' %(maxrss() / 1024**2))

        if simple_version is True:
            return
        # PAW part init
        # calculate <phi_i | e**(-i(q+G).r) | phi_j>
        # G != 0 part
        self.phi_aGp, self.phiG0_avp = self.get_phi_aGp(alldir=True)
        self.printtxt('Finished phi_aGp !')
        mem = np.array([self.phi_aGp[i].size * 16 /1024.**2 for i in range(len(self.phi_aGp))])
        self.printtxt('     Phi_aGp         : %f M / cpu' %(mem.sum()))

        # Calculate ALDA kernel (not used in chi0)
        R_av = calc.atoms.positions / Bohr
        if self.xc == 'RPA': #type(self.w_w[0]) is float:
            self.Kc_GG = None
            self.printtxt('RPA calculation.')
        elif self.xc == 'ALDA' or self.xc == 'ALDA_X':
            #self.Kc_GG = calculate_Kc(self.q_c,
            #                          self.Gvec_Gc,
            #                          self.acell_cv,
            #                          self.bcell_cv,
            #                          self.calc.atoms.pbc,
            #                          self.vcut)
            # Initialize a CoulombKernel instance
            kernel = CoulombKernel(vcut=self.vcut,
                                   pbc=self.calc.atoms.pbc,
                                   cell=self.acell_cv)
            self.Kc_GG = kernel.calculate_Kc(self.q_c,
                                             self.Gvec_Gc,
                                             self.bcell_cv)
            
            self.Kxc_sGG = calculate_Kxc(self.gd, # global grid
                                         self.gd.zero_pad(calc.density.nt_sG),
                                         self.npw, self.Gvec_Gc,
                                         self.gd.N_c, self.vol,
                                         self.bcell_cv, R_av,
                                         calc.wfs.setups,
                                         calc.density.D_asp,
                                         functional=self.xc,
                                         density_cut=self.density_cut)
            
            self.printtxt('Finished %s kernel ! ' % self.xc)
                
        return
예제 #24
0
    def initialize(self, simple_version=False):

        self.printtxt("")
        self.printtxt("-----------------------------------------")
        self.printtxt("Response function calculation started at:")
        self.starttime = time()
        self.printtxt(ctime())

        BASECHI.initialize(self)

        # Frequency init
        self.dw = None
        if len(self.w_w) == 1:
            self.hilbert_trans = False

        if self.hilbert_trans:
            self.dw = self.w_w[1] - self.w_w[0]
            #            assert ((self.w_w[1:] - self.w_w[:-1] - self.dw) < 1e-10).all() # make sure its linear w grid
            assert self.w_w.max() == self.w_w[-1]

            self.dw /= Hartree
            self.w_w /= Hartree
            self.wmax = self.w_w[-1]
            self.wcut = self.wmax + 5.0 / Hartree
            #            self.Nw  = int(self.wmax / self.dw) + 1
            self.Nw = len(self.w_w)
            self.NwS = int(self.wcut / self.dw) + 1
        else:
            self.Nw = len(self.w_w)
            self.NwS = 0
            if len(self.w_w) > 2:
                self.dw = self.w_w[1] - self.w_w[0]
                assert ((self.w_w[1:] - self.w_w[:-1] - self.dw) < 1e-10).all()
                self.dw /= Hartree

        self.nvalbands = self.nbands
        tmpn = np.zeros(self.nspins, dtype=int)
        for spin in range(self.nspins):
            for n in range(self.nbands):
                if (self.f_skn[spin][:, n] - self.ftol < 0).all():
                    tmpn[spin] = n
                    break
        if tmpn.max() > 0:
            self.nvalbands = tmpn.max()

        # Parallelization initialize
        self.parallel_init()

        # Printing calculation information
        self.print_chi()

        if extra_parameters.get("df_dry_run"):
            raise SystemExit

        calc = self.calc

        # For LCAO wfs
        if calc.input_parameters["mode"] == "lcao":
            calc.initialize_positions()
        self.printtxt("     Max mem sofar   : %f M / cpu" % (maxrss() / 1024 ** 2))

        if simple_version is True:
            return
        # PAW part init
        # calculate <phi_i | e**(-i(q+G).r) | phi_j>
        # G != 0 part
        self.phi_aGp, self.phiG0_avp = self.get_phi_aGp(alldir=True)
        self.printtxt("Finished phi_aGp !")
        mem = np.array([self.phi_aGp[i].size * 16 / 1024.0 ** 2 for i in range(len(self.phi_aGp))])
        self.printtxt("     Phi_aGp         : %f M / cpu" % (mem.sum()))

        # Calculate ALDA kernel (not used in chi0)
        R_av = calc.atoms.positions / Bohr
        if self.xc == "RPA":  # type(self.w_w[0]) is float:
            self.Kc_GG = None
            self.printtxt("RPA calculation.")
        elif self.xc == "ALDA" or self.xc == "ALDA_X":
            # self.Kc_GG = calculate_Kc(self.q_c,
            #                          self.Gvec_Gc,
            #                          self.acell_cv,
            #                          self.bcell_cv,
            #                          self.calc.atoms.pbc,
            #                          self.vcut)
            # Initialize a CoulombKernel instance
            kernel = CoulombKernel(vcut=self.vcut, pbc=self.calc.atoms.pbc, cell=self.acell_cv)
            self.Kc_GG = kernel.calculate_Kc(self.q_c, self.Gvec_Gc, self.bcell_cv)

            self.Kxc_sGG = calculate_Kxc(
                self.gd,  # global grid
                self.gd.zero_pad(calc.density.nt_sG),
                self.npw,
                self.Gvec_Gc,
                self.gd.N_c,
                self.vol,
                self.bcell_cv,
                R_av,
                calc.wfs.setups,
                calc.density.D_asp,
                functional=self.xc,
                density_cut=self.density_cut,
            )

            self.printtxt("Finished %s kernel ! " % self.xc)

        return
예제 #25
0
파일: vdw.py 프로젝트: thonmaker/gpaw
    def calculate_6d_integral(self,
                              n_g,
                              q0_g,
                              a2_g=None,
                              e_LDAc_g=None,
                              v_LDAc_g=None,
                              v_g=None,
                              deda2_g=None):
        self.timer.start('VdW-DF integral')
        self.timer.start('splines')
        if self.C_aip is None:
            self.initialize_more_things()
            self.construct_cubic_splines()
            self.construct_fourier_transformed_kernels()
        self.timer.stop('splines')

        gd = self.gd
        N = self.Nalpha

        world = self.world
        vdwcomm = self.vdwcomm

        if self.alphas:
            self.timer.start('hmm1')
            i_g = (np.log(q0_g / self.q_a[1] * (self.lambd - 1) + 1) /
                   log(self.lambd)).astype(int)
            dq0_g = q0_g - self.q_a[i_g]
            self.timer.stop('hmm1')
        else:
            i_g = None
            dq0_g = None

        if self.verbose:
            print('VDW: fft:', end=' ')

        theta_ak = {}
        p_ag = {}
        for a in self.alphas:
            self.timer.start('hmm2')
            C_pg = self.C_aip[a, i_g].transpose((3, 0, 1, 2))
            pa_g = (C_pg[0] + dq0_g * (C_pg[1] + dq0_g *
                                       (C_pg[2] + dq0_g * C_pg[3])))
            self.timer.stop('hmm2')
            del C_pg
            self.timer.start('FFT')
            theta_ak[a] = rfftn(n_g * pa_g, self.shape).copy()
            if extra_parameters.get('vdw0'):
                theta_ak[a][0, 0, 0] = 0.0
            self.timer.stop()

            if not self.energy_only:
                p_ag[a] = pa_g
            del pa_g
            if self.verbose:
                print(a, end=' ')
                sys.stdout.flush()

        if self.energy_only:
            del i_g
            del dq0_g

        if self.verbose:
            print()
            print('VDW: convolution:', end=' ')

        F_ak = {}
        dj_k = self.dj_k
        energy = 0.0
        for a in range(N):
            if vdwcomm is not None:
                vdw_ranka = a * vdwcomm.size // N
                F_k = np.zeros(
                    (self.shape[0], self.shape[1], self.shape[2] // 2 + 1),
                    complex)
            self.timer.start('Convolution')
            for b in self.alphas:
                _gpaw.vdw2(self.phi_aajp[a, b], self.j_k, dj_k, theta_ak[b],
                           F_k)
            self.timer.stop()

            if vdwcomm is not None:
                self.timer.start('gather')
                for F in F_k:
                    vdwcomm.sum(F, vdw_ranka)
                self.timer.stop('gather')

            if vdwcomm is not None and vdwcomm.rank == vdw_ranka:
                if not self.energy_only:
                    F_ak[a] = F_k
                energy += np.vdot(theta_ak[a][:, :, 0], F_k[:, :, 0]).real
                energy += np.vdot(theta_ak[a][:, :, -1], F_k[:, :, -1]).real
                energy += 2 * np.vdot(theta_ak[a][:, :, 1:-1], F_k[:, :,
                                                                   1:-1]).real

            if self.verbose:
                print(a, end=' ')
                sys.stdout.flush()

        del theta_ak

        if self.verbose:
            print()

        if not self.energy_only:
            F_ag = {}
            for a in self.alphas:
                n1, n2, n3 = gd.get_size_of_global_array()
                self.timer.start('iFFT')
                F_ag[a] = irfftn(F_ak[a]).real[:n1, :n2, :n3].copy()
                self.timer.stop()
            del F_ak

            self.timer.start('potential')
            self.calculate_potential(n_g, a2_g, i_g, dq0_g, p_ag, F_ag,
                                     e_LDAc_g, v_LDAc_g, v_g, deda2_g)
            self.timer.stop()

        self.timer.stop()
        return 0.5 * world.sum(energy) * gd.dv / self.shape.prod()
예제 #26
0
파일: cg.py 프로젝트: robwarm/gpaw-symm
    def iterate_one_k_point(self, hamiltonian, wfs, kpt):
        """Do conjugate gradient iterations for the k-point"""

        niter = self.niter

        phi_G = wfs.empty(q=kpt.q)
        phi_old_G = wfs.empty(q=kpt.q)

        comm = wfs.gd.comm

        psit_nG, Htpsit_nG = self.subspace_diagonalize(hamiltonian, wfs, kpt)
        # Note that psit_nG is now in self.operator.work1_nG and
        # Htpsit_nG is in kpt.psit_nG!

        R_nG = reshape(self.Htpsit_nG, psit_nG.shape)
        Htphi_G = R_nG[0]

        R_nG[:] = Htpsit_nG
        self.timer.start('Residuals')
        self.calculate_residuals(kpt, wfs, hamiltonian, psit_nG,
                                 kpt.P_ani, kpt.eps_n, R_nG)
        self.timer.stop('Residuals')

        self.timer.start('CG')

        total_error = 0.0
        for n in range(self.nbands):
            if extra_parameters.get('PK', False):
               N = n+1
            else:
               N = psit_nG.shape[0]+1
            R_G = R_nG[n]
            Htpsit_G = Htpsit_nG[n]
            gamma_old = 1.0
            phi_old_G[:] = 0.0
            error = np.real(wfs.integrate(R_G, R_G))
            for nit in range(niter):
                if (error * Hartree**2 < self.tolerance / self.nbands):
                    break

                ekin = self.preconditioner.calculate_kinetic_energy(
                    psit_nG[n:n + 1], kpt)

                pR_G = self.preconditioner(R_nG[n:n + 1], kpt, ekin)

                # New search direction
                gamma = comm.sum(np.vdot(pR_G, R_G).real)
                phi_G[:] = -pR_G - gamma / gamma_old * phi_old_G
                gamma_old = gamma
                phi_old_G[:] = phi_G[:]

                # Calculate projections
                P2_ai = wfs.pt.dict()
                wfs.pt.integrate(phi_G, P2_ai, kpt.q)

                # Orthonormalize phi_G to all bands
                self.timer.start('CG: orthonormalize')
                self.timer.start('CG: overlap')
                overlap_n = wfs.integrate(psit_nG[:N], phi_G,
                                        global_integral=False)
                self.timer.stop('CG: overlap')
                self.timer.start('CG: overlap2')
                for a, P2_i in P2_ai.items():
                    P_ni = kpt.P_ani[a]
                    dO_ii = wfs.setups[a].dO_ii
                    gemv(1.0, P_ni[:N].conjugate(), np.inner(dO_ii, P2_i), 
                         1.0, overlap_n)
                self.timer.stop('CG: overlap2')
                comm.sum(overlap_n)

                # phi_G -= overlap_n * kpt.psit_nG
                wfs.matrixoperator.gd.gemv(-1.0, psit_nG[:N], overlap_n, 
                                           1.0, phi_G, 'n')
                for a, P2_i in P2_ai.items():
                    P_ni = kpt.P_ani[a]
                    gemv(-1.0, P_ni[:N], overlap_n, 1.0, P2_i, 'n')

                norm = wfs.integrate(phi_G, phi_G, global_integral=False)
                for a, P2_i in P2_ai.items():
                    dO_ii = wfs.setups[a].dO_ii
                    norm += np.vdot(P2_i, np.inner(dO_ii, P2_i))
                norm = comm.sum(np.real(norm).item())
                phi_G /= sqrt(norm)
                for P2_i in P2_ai.values():
                    P2_i /= sqrt(norm)
                self.timer.stop('CG: orthonormalize')

                # find optimum linear combination of psit_G and phi_G
                an = kpt.eps_n[n]
                wfs.apply_pseudo_hamiltonian(kpt, hamiltonian,
                                             phi_G.reshape((1,) + phi_G.shape),
                                             Htphi_G.reshape((1,) +
                                                             Htphi_G.shape))
                b = wfs.integrate(phi_G, Htpsit_G, global_integral=False)
                c = wfs.integrate(phi_G, Htphi_G, global_integral=False)
                for a, P2_i in P2_ai.items():
                    P_i = kpt.P_ani[a][n]
                    dH_ii = unpack(hamiltonian.dH_asp[a][kpt.s])
                    b += dot(P2_i, dot(dH_ii, P_i.conj()))
                    c += dot(P2_i, dot(dH_ii, P2_i.conj()))
                b = comm.sum(np.real(b).item())
                c = comm.sum(np.real(c).item())

                theta = 0.5 * atan2(2 * b, an - c)
                enew = (an * cos(theta)**2 +
                        c * sin(theta)**2 +
                        b * sin(2.0 * theta))
                # theta can correspond either minimum or maximum
                if (enew - kpt.eps_n[n]) > 0.0:  # we were at maximum
                    theta += pi / 2.0
                    enew = (an * cos(theta)**2 +
                            c * sin(theta)**2 +
                            b * sin(2.0 * theta))

                kpt.eps_n[n] = enew
                psit_nG[n] *= cos(theta)
                # kpt.psit_nG[n] += sin(theta) * phi_G
                axpy(sin(theta), phi_G, psit_nG[n])
                for a, P2_i in P2_ai.items():
                    P_i = kpt.P_ani[a][n]
                    P_i *= cos(theta)
                    P_i += sin(theta) * P2_i

                if nit < niter - 1:
                    Htpsit_G *= cos(theta)
                    # Htpsit_G += sin(theta) * Htphi_G
                    axpy(sin(theta), Htphi_G, Htpsit_G)
                    #adjust residuals
                    R_G[:] = Htpsit_G - kpt.eps_n[n] * psit_nG[n]

                    coef_ai = wfs.pt.dict()
                    for a, coef_i in coef_ai.items():
                        P_i = kpt.P_ani[a][n]
                        dO_ii = wfs.setups[a].dO_ii
                        dH_ii = unpack(hamiltonian.dH_asp[a][kpt.s])
                        coef_i[:] = (dot(P_i, dH_ii) -
                                     dot(P_i * kpt.eps_n[n], dO_ii))
                    wfs.pt.add(R_G, coef_ai, kpt.q)
                    error_new = np.real(wfs.integrate(R_G, R_G))
                    if error_new / error < self.rtol:
                        # print >> self.f, "cg:iters", n, nit+1
                        break
                    if (self.nbands_converge == 'occupied' and
                        kpt.f_n is not None and kpt.f_n[n] == 0.0):
                        # print >> self.f, "cg:iters", n, nit+1
                        break
                    error = error_new

            if kpt.f_n is None:
                weight = 1.0
            else:
                weight = kpt.f_n[n]
            if self.nbands_converge != 'occupied':
                weight = kpt.weight * float(n < self.nbands_converge)
            total_error += weight * error
            # if nit == 3:
            #   print >> self.f, "cg:iters", n, nit+1

        self.timer.stop('CG')
        return total_error, psit_nG
예제 #27
0
    def iterate_one_k_point(self, hamiltonian, wfs, kpt):
        """Do a single RMM-DIIS iteration for the kpoint"""

        psit_nG, R_nG = self.subspace_diagonalize(hamiltonian, wfs, kpt)

        self.timer.start('RMM-DIIS')
        if self.keep_htpsit:
            self.calculate_residuals(kpt, wfs, hamiltonian, psit_nG, kpt.P_ani,
                                     kpt.eps_n, R_nG)

        def integrate(a_G, b_G):
            return np.real(wfs.integrate(a_G, b_G, global_integral=False))

        comm = wfs.gd.comm
        B = self.blocksize
        dR_xG = wfs.empty(B, q=kpt.q)
        P_axi = wfs.pt.dict(B)
        error = 0.0
        for n1 in range(0, wfs.bd.mynbands, B):
            n2 = n1 + B
            if n2 > wfs.bd.mynbands:
                n2 = wfs.bd.mynbands
                B = n2 - n1
                P_axi = dict((a, P_xi[:B]) for a, P_xi in P_axi.items())
                dR_xG = dR_xG[:B]

            n_x = np.arange(n1, n2)
            psit_xG = psit_nG[n1:n2]

            if self.keep_htpsit:
                R_xG = R_nG[n1:n2]
            else:
                R_xG = wfs.empty(B, q=kpt.q)
                wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, psit_xG, R_xG)
                wfs.pt.integrate(psit_xG, P_axi, kpt.q)
                self.calculate_residuals(kpt, wfs, hamiltonian, psit_xG, P_axi,
                                         kpt.eps_n[n_x], R_xG, n_x)

            for n in n_x:
                if kpt.f_n is None:
                    weight = kpt.weight
                else:
                    weight = kpt.f_n[n]
                if self.nbands_converge != 'occupied':
                    if wfs.bd.global_index(n) < self.nbands_converge:
                        weight = kpt.weight
                    else:
                        weight = 0.0
                error += weight * integrate(R_xG[n - n1], R_xG[n - n1])

            # Precondition the residual:
            self.timer.start('precondition')
            ekin_x = self.preconditioner.calculate_kinetic_energy(psit_xG, kpt)
            dpsit_xG = self.preconditioner(R_xG, kpt, ekin_x)
            self.timer.stop('precondition')

            # Calculate the residual of dpsit_G, dR_G = (H - e S) dpsit_G:
            wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, dpsit_xG, dR_xG)
            self.timer.start('projections')
            wfs.pt.integrate(dpsit_xG, P_axi, kpt.q)
            self.timer.stop('projections')
            self.calculate_residuals(kpt,
                                     wfs,
                                     hamiltonian,
                                     dpsit_xG,
                                     P_axi,
                                     kpt.eps_n[n_x],
                                     dR_xG,
                                     n_x,
                                     calculate_change=True)

            # Find lam that minimizes the norm of R'_G = R_G + lam dR_G
            RdR_x = np.array(
                [integrate(dR_G, R_G) for R_G, dR_G in zip(R_xG, dR_xG)])
            dRdR_x = np.array([integrate(dR_G, dR_G) for dR_G in dR_xG])
            comm.sum(RdR_x)
            comm.sum(dRdR_x)

            lam_x = -RdR_x / dRdR_x
            if extra_parameters.get('PK', False):
                lam_x[:] = np.where(lam_x > 0.0, lam_x, 0.2)
            # Calculate new psi'_G = psi_G + lam pR_G + lam2 pR'_G
            #                      = psi_G + p((lam+lam2) R_G + lam*lam2 dR_G)
            for lam, R_G, dR_G in zip(lam_x, R_xG, dR_xG):
                if self.fixed_trial_step is None:
                    lam2 = lam
                else:
                    lam2 = self.fixed_trial_step
                R_G *= lam + lam2
                axpy(lam * lam2, dR_G, R_G)

            self.timer.start('precondition')
            psit_xG[:] += self.preconditioner(R_xG, kpt, ekin_x)
            self.timer.stop('precondition')

        self.timer.stop('RMM-DIIS')
        error = comm.sum(error)
        return error, psit_nG
예제 #28
0
파일: setup.py 프로젝트: eojons/gpaw-scme
    def __init__(self, data, xc, lmax=0, basis=None, filter=None):
        self.type = data.name
        
        self.HubU = None

        if max(data.l_j) > 2 and not extra_parameters.get('fprojectors'):
            msg = ('Your %s dataset has f-projectors!  ' % data.symbol +
                   'Add --gpaw=fprojectors=1 on the command-line.')
            raise RuntimeError(msg)
            
        if not data.is_compatible(xc):
            raise ValueError('Cannot use %s setup with %s functional' %
                             (data.setupname, xc.get_setup_name()))
        
        self.symbol = symbol = data.symbol
        self.data = data

        self.Nc = data.Nc
        self.Nv = data.Nv
        self.Z = data.Z
        l_j = self.l_j = data.l_j
        n_j = self.n_j = data.n_j
        self.f_j = data.f_j
        self.eps_j = data.eps_j
        nj = self.nj = len(l_j)
        rcut_j = self.rcut_j = data.rcut_j

        self.ExxC = data.ExxC
        self.X_p = data.X_p

        pt_jg = data.pt_jg
        phit_jg = data.phit_jg
        phi_jg = data.phi_jg

        self.fingerprint = data.fingerprint
        self.filename = data.filename

        rgd = self.rgd = data.rgd
        r_g = rgd.r_g
        dr_g = rgd.dr_g

        self.lmax = lmax
            
        rcutmax = max(rcut_j)
        rcut2 = 2 * rcutmax
        gcut2 = rgd.ceil(rcut2)
        self.gcut2 = gcut2

        self.gcutmin = rgd.ceil(min(rcut_j))

        if data.generator_version < 2:
            # Find Fourier-filter cutoff radius:
            gcutfilter = data.get_max_projector_cutoff()
        elif filter:
            rc = rcutmax
            filter(rgd, rc, data.vbar_g)

            for l, pt_g in zip(l_j, pt_jg):
                filter(rgd, rc, pt_g, l)

            for l in range(max(l_j) + 1):
                J = [j for j, lj in enumerate(l_j) if lj == l]
                A_nn = [[rgd.integrate(phit_jg[j1] * pt_jg[j2]) / 4 / pi
                         for j1 in J] for j2 in J]
                B_nn = np.linalg.inv(A_nn)
                pt_ng = np.dot(B_nn, [pt_jg[j] for j in J])
                for n, j in enumerate(J):
                    pt_jg[j] = pt_ng[n]
            gcutfilter = data.get_max_projector_cutoff()
        else:
            rcutfilter = max(rcut_j)
            gcutfilter = rgd.ceil(rcutfilter)
        
        self.rcutfilter = rcutfilter = r_g[gcutfilter]
        assert (data.vbar_g[gcutfilter:] == 0).all()

        ni = 0
        i = 0
        j = 0
        jlL_i = []
        for l, n in zip(l_j, n_j):
            for m in range(2 * l + 1):
                jlL_i.append((j, l, l**2 + m))
                i += 1
            j += 1
        ni = i
        self.ni = ni

        _np = ni * (ni + 1) // 2
        self.nq = nq = nj * (nj + 1) // 2

        lcut = max(l_j)
        if 2 * lcut < lmax:
            lcut = (lmax + 1) // 2
        self.lcut = lcut

        self.B_ii = self.calculate_projector_overlaps(pt_jg)

        self.fcorehole = data.fcorehole
        self.lcorehole = data.lcorehole
        if data.phicorehole_g is not None:
            if self.lcorehole == 0:
                self.calculate_oscillator_strengths(phi_jg)
            else:
                self.A_ci = None

        # Construct splines:
        self.vbar = rgd.spline(data.vbar_g, rcutfilter)

        rcore, nc_g, nct_g, nct = self.construct_core_densities(data)
        self.rcore = rcore
        self.nct = nct

        # Construct splines for core kinetic energy density:
        tauct_g = data.tauct_g
        if tauct_g is None:
            tauct_g = np.zeros(ng)
            # FIXME: ng is not defined! 
        self.tauct = rgd.spline(tauct_g, self.rcore)

        self.pt_j = self.create_projectors(rcutfilter)

        if basis is None:
            basis = self.create_basis_functions(phit_jg, rcut2, gcut2)
        phit_j = basis.tosplines()
        self.phit_j = phit_j
        self.basis = basis #?

        self.nao = 0
        for phit in self.phit_j:
            l = phit.get_angular_momentum_number()
            self.nao += 2 * l + 1

        rgd2 = self.rgd2 = AERadialGridDescriptor(rgd.a, rgd.b, gcut2)
        r_g = rgd2.r_g
        dr_g = rgd2.dr_g
        phi_jg = np.array([phi_g[:gcut2].copy() for phi_g in phi_jg])
        phit_jg = np.array([phit_g[:gcut2].copy() for phit_g in phit_jg])
        self.nc_g = nc_g = nc_g[:gcut2].copy()
        self.nct_g = nct_g = nct_g[:gcut2].copy()
        vbar_g = data.vbar_g[:gcut2].copy()
        tauc_g = data.tauc_g[:gcut2].copy()

        extra_xc_data = dict(data.extra_xc_data)
        # Cut down the GLLB related extra data
        for key, item in extra_xc_data.iteritems():
            if len(item) == rgd.N:
                extra_xc_data[key] = item[:gcut2].copy()
        self.extra_xc_data = extra_xc_data

        self.phicorehole_g = data.phicorehole_g
        if self.phicorehole_g is not None:
            self.phicorehole_g = self.phicorehole_g[:gcut2].copy()

        T_Lqp = self.calculate_T_Lqp(lcut, nq, _np, nj, jlL_i)
        (g_lg, n_qg, nt_qg, Delta_lq, self.Lmax, self.Delta_pL, Delta0, 
         self.N0_p) = self.get_compensation_charges(phi_jg, phit_jg, _np,
                                                    T_Lqp)
        self.Delta0 = Delta0
        self.g_lg = g_lg

        # Solves the radial poisson equation for density n_g
        def H(n_g, l):
            return rgd2.poisson(n_g, l) * r_g * dr_g

        wnc_g = H(nc_g, l=0)
        wnct_g = H(nct_g, l=0)

        self.wg_lg = wg_lg = [H(g_lg[l], l) for l in range(lmax + 1)]

        wn_lqg = [np.array([H(n_qg[q], l) for q in range(nq)])
                  for l in range(2 * lcut + 1)]
        wnt_lqg = [np.array([H(nt_qg[q], l) for q in range(nq)])
                   for l in range(2 * lcut + 1)]

        rdr_g = r_g * dr_g
        dv_g = r_g * rdr_g
        A = 0.5 * np.dot(nc_g, wnc_g)
        A -= sqrt(4 * pi) * self.Z * np.dot(rdr_g, nc_g)
        mct_g = nct_g + Delta0 * g_lg[0]
        wmct_g = wnct_g + Delta0 * wg_lg[0]
        A -= 0.5 * np.dot(mct_g, wmct_g)
        self.M = A
        self.MB = -np.dot(dv_g * nct_g, vbar_g)
        
        AB_q = -np.dot(nt_qg, dv_g * vbar_g)
        self.MB_p = np.dot(AB_q, T_Lqp[0])
        
        # Correction for average electrostatic potential:
        #
        #   dEH = dEH0 + dot(D_p, dEH_p)
        #
        self.dEH0 = sqrt(4 * pi) * (wnc_g - wmct_g -
                                    sqrt(4 * pi) * self.Z * r_g * dr_g).sum()
        dEh_q = (wn_lqg[0].sum(1) - wnt_lqg[0].sum(1) -
                 Delta_lq[0] * wg_lg[0].sum())
        self.dEH_p = np.dot(dEh_q, T_Lqp[0]) * sqrt(4 * pi)
        
        M_p, M_pp = self.calculate_coulomb_corrections(lcut, n_qg, wn_lqg,
                                                       lmax, Delta_lq,
                                                       wnt_lqg, g_lg, 
                                                       wg_lg, nt_qg,
                                                       _np, T_Lqp, nc_g,
                                                       wnc_g, rdr_g, mct_g,
                                                       wmct_g)
        self.M_p = M_p
        self.M_pp = M_pp

        if xc.type == 'GLLB':
            if 'core_f' in self.extra_xc_data:
                self.wnt_lqg = wnt_lqg
                self.wn_lqg = wn_lqg
                self.fc_j = self.extra_xc_data['core_f']
                self.lc_j = self.extra_xc_data['core_l']
                self.njcore = len(self.lc_j)
                if self.njcore > 0:
                    self.uc_jg = self.extra_xc_data['core_states'].reshape(
                        (self.njcore, -1))
                    self.uc_jg = self.uc_jg[:, :gcut2]
                self.phi_jg = phi_jg
            
        self.Kc = data.e_kinetic_core - data.e_kinetic
        self.M -= data.e_electrostatic
        self.E = data.e_total

        Delta0_ii = unpack(self.Delta_pL[:, 0].copy())
        self.dO_ii = data.get_overlap_correction(Delta0_ii)
        self.dC_ii = self.get_inverse_overlap_coefficients(self.B_ii,
                                                           self.dO_ii)
        
        self.Delta_iiL = np.zeros((ni, ni, self.Lmax))
        for L in range(self.Lmax):
            self.Delta_iiL[:, :, L] = unpack(self.Delta_pL[:, L].copy())

        self.Nct = data.get_smooth_core_density_integral(Delta0)
        self.K_p = data.get_linear_kinetic_correction(T_Lqp[0])
        
        r = 0.02 * rcut2 * np.arange(51, dtype=float)
        alpha = data.rcgauss**-2
        self.ghat_l = data.get_ghat(lmax, alpha, r, rcut2)#;print 'use g_lg!'
        self.rcgauss = data.rcgauss
        
        self.xc_correction = data.get_xc_correction(rgd2, xc, gcut2, lcut)
        self.nabla_iiv = self.get_derivative_integrals(rgd2, phi_jg, phit_jg)
        self.rnabla_iiv = self.get_magnetic_integrals(rgd2, phi_jg, phit_jg)
        self.rxp_iiv = self.get_magnetic_integrals_new(rgd2, phi_jg, phit_jg)
예제 #29
0
파일: setup.py 프로젝트: eojons/gpaw-scme
    def get_magnetic_integrals_new(self, rgd, phi_jg, phit_jg):
        """Calculate PAW-correction matrix elements of r x nabla.

        ::
        
          /  _       _          _     ~   _      ~   _
          | dr [phi (r) O  phi (r) - phi (r) O  phi (r)]
          /        1     x    2         1     x    2

                       d      d
          where O  = y -- - z --
                 x     dz     dy

        and similar for y and z."""
        
        if extra_parameters.get('fprojectors'):
            return None

        # utility functions

        # from Y_L to Y_lm where Y_lm is a spherical harmonic and m= -l, ..., +l
        def YL_to_Ylm(L):
            # (c,l,m)
            if L == 0:
                return [(1.0, 0, 0)]
            if L == 1: # y
                return [ ( 1j/sqrt(2.), 1, -1),
                         ( 1j/sqrt(2.), 1,  1) ]
            if L == 2: # z
                return [(1.0, 1, 0)]
            if L == 3: # x
                return [ (  1/np.sqrt(2.), 1, -1),
                         ( -1/np.sqrt(2.), 1,  1) ]
            if L == 4: # xy
                return [ ( 1j/np.sqrt(2.), 2, -2),
                         (-1j/np.sqrt(2.), 2,  2) ]
            if L == 5: # yz
                return [ ( 1j/np.sqrt(2.), 2, -1),
                         ( 1j/np.sqrt(2.), 2,  1) ]
            if L == 6: # 3z2-r2
                return [(1.0, 2, 0)]
            if L == 7: # zx
                return [ ( 1/np.sqrt(2.), 2, -1),
                         (-1/np.sqrt(2.), 2,  1) ]
            if L == 8: # x2-y2
                return [ ( 1/np.sqrt(2.), 2, -2),
                         ( 1/np.sqrt(2.), 2,  2) ]

            raise RuntimeError('Error in get_magnetic_integrals_new: YL_to_Ylm not implemented for l>2 yet.')

        # <YL1| Lz |YL2>
        # with help of YL_to_Ylm 
        # Lz |lm> = hbar m |lm>
        def YL1_Lz_YL2(L1,L2):
            Yl1m1 = YL_to_Ylm(L1)
            Yl2m2 = YL_to_Ylm(L2)

            sum = 0.j
            for (c1,l1,m1) in Yl1m1:
                for (c2,l2,m2) in Yl2m2:
            #print '--------', c1, l1, m1, c2, l2, m2
                    lz = m2
                    if l1 == l2 and m1 == m2:
                        sum += lz * np.conjugate(c1) * c2

            return sum

        # <YL1| L+ |YL2>
        # with help of YL_to_Ylm 
        # and using L+ |lm> = hbar sqrt( l(l+1) - m(m+1) ) |lm+1>
        def YL1_Lp_YL2(L1,L2):
            Yl1m1 = YL_to_Ylm(L1)
            Yl2m2 = YL_to_Ylm(L2)

            sum = 0.j
            for (c1,l1,m1) in Yl1m1:
                for (c2,l2,m2) in Yl2m2:
            #print '--------', c1, l1, m1, c2, l2, m2
                    lp = sqrt(l2*(l2+1) - m2*(m2+1))
                    if abs(lp) < 1e-5: continue
                    if l1 == l2 and m1 == m2+1:
                        sum += lp * np.conjugate(c1) * c2

            return sum

        # <YL1| L- |YL2>
        # with help of YL_to_Ylm 
        # and using L- |lm> = hbar sqrt( l(l+1) - m(m-1) ) |lm-1>
        def YL1_Lm_YL2(L1,L2):
            Yl1m1 = YL_to_Ylm(L1)
            Yl2m2 = YL_to_Ylm(L2)

            sum = 0.j
            for (c1,l1,m1) in Yl1m1:
                for (c2,l2,m2) in Yl2m2:
            #print '--------', c1, l1, m1, c2, l2, m2
                    lp = sqrt(l2*(l2+1) - m2*(m2-1))
                    if abs(lp) < 1e-5: continue
                    if l1 == l2 and m1 == m2-1:
                        sum += lp * np.conjugate(c1) * c2

            return sum

        # <YL1| Lx |YL2>
        # using Lx = (L+ + L-)/2
        def YL1_Lx_YL2(L1,L2):
            return .5 * ( YL1_Lp_YL2(L1,L2) + YL1_Lm_YL2(L1,L2) )

        # <YL1| Lx |YL2>
        # using Ly = -i(L+ - L-)/2
        def YL1_Ly_YL2(L1,L2):
            return -.5j * ( YL1_Lp_YL2(L1,L2) - YL1_Lm_YL2(L1,L2) )


        # r x p for [i-index 1, i-index 2, (x,y,z)]
        rxp_iiv = np.zeros((self.ni, self.ni, 3))

        # loops over all j1=(l1,m1) values
        i1 = 0
        for j1, l1 in enumerate(self.l_j):
            for m1 in range(2 * l1 + 1):
                L1 = l1**2 + m1
                # loops over all j2=(l2,m2) values
                i2 = 0
                for j2, l2 in enumerate(self.l_j):
                    # radial part, which is common for same j values
                    # int_0^infty phi_l1,m1,g(r) phi_l2,m2,g(r) * 4*pi*r**2 dr
                    # 4 pi here?????
                    radial_part = rgd.integrate(phi_jg[j1] * phi_jg[j2] -
                                                phit_jg[j1] * phit_jg[j2]) / (4*pi)
                    for m2 in range(2 * l2 + 1):
                        L2 = l2**2 + m2
                        # Lx
                        Lx = (1j * YL1_Lx_YL2(L1,L2))
                        #print '%8.3lf %8.3lf | ' % (Lx.real, Lx.imag),
                        rxp_iiv[i1,i2,0] = Lx.real * radial_part

                        # Ly
                        Ly = (1j * YL1_Ly_YL2(L1,L2))
                        #print '%8.3lf %8.3lf | ' % (Ly.real, Ly.imag),
                        rxp_iiv[i1,i2,1] = Ly.real * radial_part
                        # Lz
                        Lz = (1j * YL1_Lz_YL2(L1,L2))
                        #print '%8.3lf %8.3lf | ' % (Lz.real, Lz.imag),
                        rxp_iiv[i1,i2,2] = Lz.real * radial_part

                        #print

                        # increase index 2
                        i2 += 1

                # increase index 1
                i1 += 1

        return rxp_iiv
예제 #30
0
    def iterate_one_k_point(self, ham, wfs, kpt):
        """Do conjugate gradient iterations for the k-point"""
        self.timer.start('CG')

        niter = self.niter

        phi_G, phi_old_G, Htphi_G = wfs.empty(3, q=kpt.q)

        comm = wfs.gd.comm
        if self.tw_coeff:
            # Wait!  What business does the eigensolver have changing
            # the properties of the Hamiltonian?  We are not updating
            # the Hamiltonian here.  Moreover, what is supposed to
            # happen if this function is called multiple times per
            # iteration?  Then we keep dividing the potential by the
            # same number.  What on earth is the meaning of this?
            #
            # Also the parameter tw_coeff is undocumented.  What is it?
            ham.vt_sG /= self.tw_coeff
            # Assuming the ordering in dH_asp and wfs is the same
            for a in ham.dH_asp.keys():
                ham.dH_asp[a] /= self.tw_coeff

        psit = kpt.psit
        R = psit.new(buf=wfs.work_array)
        P = kpt.projections
        P2 = P.new()

        self.subspace_diagonalize(ham, wfs, kpt)

        Htpsit = psit.new(buf=self.Htpsit_nG)

        R.array[:] = Htpsit.array
        self.calculate_residuals(kpt, wfs, ham, psit,
                                 P, kpt.eps_n, R, P2)

        total_error = 0.0
        for n in range(self.nbands):
            if extra_parameters.get('PK', False):
                N = n + 1
            else:
                N = self.nbands
            R_G = R.array[n]
            Htpsit_G = Htpsit.array[n]
            psit_G = psit.array[n]
            gamma_old = 1.0
            phi_old_G[:] = 0.0
            error = np.real(wfs.integrate(R_G, R_G))
            for nit in range(niter):
                if (error * Hartree**2 < self.tolerance / self.nbands):
                    break

                ekin = self.preconditioner.calculate_kinetic_energy(psit_G,
                                                                    kpt)

                pR_G = self.preconditioner(R_G, kpt, ekin)

                # New search direction
                gamma = comm.sum(np.vdot(pR_G, R_G).real)
                phi_G[:] = -pR_G - gamma / gamma_old * phi_old_G
                gamma_old = gamma
                phi_old_G[:] = phi_G[:]

                # Calculate projections
                P2_ai = wfs.pt.dict()
                wfs.pt.integrate(phi_G, P2_ai, kpt.q)

                # Orthonormalize phi_G to all bands
                self.timer.start('CG: orthonormalize')
                self.timer.start('CG: overlap')
                overlap_n = wfs.integrate(psit.array[:N], phi_G,
                                          global_integral=False)
                self.timer.stop('CG: overlap')
                self.timer.start('CG: overlap2')
                for a, P2_i in P2_ai.items():
                    P_ni = kpt.P_ani[a]
                    dO_ii = wfs.setups[a].dO_ii
                    overlap_n += np.dot(P_ni[:N].conjugate(),
                                        np.dot(dO_ii, P2_i))
                self.timer.stop('CG: overlap2')
                comm.sum(overlap_n)

                gemv(-1.0, psit.array[:N].view(wfs.dtype), overlap_n,
                     1.0, phi_G.view(wfs.dtype), 'n')

                for a, P2_i in P2_ai.items():
                    P_ni = kpt.P_ani[a]
                    P2_i -= np.dot(overlap_n, P_ni[:N])

                norm = wfs.integrate(phi_G, phi_G, global_integral=False)
                for a, P2_i in P2_ai.items():
                    dO_ii = wfs.setups[a].dO_ii
                    norm += np.vdot(P2_i, np.dot(dO_ii, P2_i))
                norm = comm.sum(float(np.real(norm)))
                phi_G /= sqrt(norm)
                for P2_i in P2_ai.values():
                    P2_i /= sqrt(norm)
                self.timer.stop('CG: orthonormalize')

                # find optimum linear combination of psit_G and phi_G
                an = kpt.eps_n[n]
                wfs.apply_pseudo_hamiltonian(kpt, ham,
                                             phi_G.reshape((1,) + phi_G.shape),
                                             Htphi_G.reshape((1,) +
                                                             Htphi_G.shape))
                b = wfs.integrate(phi_G, Htpsit_G, global_integral=False)
                c = wfs.integrate(phi_G, Htphi_G, global_integral=False)
                for a, P2_i in P2_ai.items():
                    P_i = kpt.P_ani[a][n]
                    dH_ii = unpack(ham.dH_asp[a][kpt.s])
                    b += dot(P2_i, dot(dH_ii, P_i.conj()))
                    c += dot(P2_i, dot(dH_ii, P2_i.conj()))
                b = comm.sum(float(np.real(b)))
                c = comm.sum(float(np.real(c)))

                theta = 0.5 * atan2(2 * b, an - c)
                enew = (an * cos(theta)**2 +
                        c * sin(theta)**2 +
                        b * sin(2.0 * theta))
                # theta can correspond either minimum or maximum
                if (enew - kpt.eps_n[n]) > 0.0:  # we were at maximum
                    theta += pi / 2.0
                    enew = (an * cos(theta)**2 +
                            c * sin(theta)**2 +
                            b * sin(2.0 * theta))

                kpt.eps_n[n] = enew
                psit_G *= cos(theta)
                # kpt.psit_nG[n] += sin(theta) * phi_G
                axpy(sin(theta), phi_G, psit_G)
                for a, P2_i in P2_ai.items():
                    P_i = kpt.P_ani[a][n]
                    P_i *= cos(theta)
                    P_i += sin(theta) * P2_i

                if nit < niter - 1:
                    Htpsit_G *= cos(theta)
                    # Htpsit_G += sin(theta) * Htphi_G
                    axpy(sin(theta), Htphi_G, Htpsit_G)
                    # adjust residuals
                    R_G[:] = Htpsit_G - kpt.eps_n[n] * psit_G

                    coef_ai = wfs.pt.dict()
                    for a, coef_i in coef_ai.items():
                        P_i = kpt.P_ani[a][n]
                        dO_ii = wfs.setups[a].dO_ii
                        dH_ii = unpack(ham.dH_asp[a][kpt.s])
                        coef_i[:] = (dot(P_i, dH_ii) -
                                     dot(P_i * kpt.eps_n[n], dO_ii))
                    wfs.pt.add(R_G, coef_ai, kpt.q)
                    error_new = np.real(wfs.integrate(R_G, R_G))
                    if error_new / error < self.rtol:
                        # print >> self.f, "cg:iters", n, nit+1
                        break
                    if (self.nbands_converge == 'occupied' and
                        kpt.f_n is not None and kpt.f_n[n] == 0.0):
                        # print >> self.f, "cg:iters", n, nit+1
                        break
                    error = error_new

            if kpt.f_n is None:
                weight = 1.0
            else:
                weight = kpt.f_n[n]
            if self.nbands_converge != 'occupied':
                weight = kpt.weight * float(n < self.nbands_converge)
            total_error += weight * error
            # if nit == 3:
            #   print >> self.f, "cg:iters", n, nit+1
        if self.tw_coeff:  # undo the scaling for calculating energies
            for i in range(len(kpt.eps_n)):
                kpt.eps_n[i] *= self.tw_coeff
            ham.vt_sG *= self.tw_coeff
            # Assuming the ordering in dH_asp and wfs is the same
            for a in ham.dH_asp.keys():
                ham.dH_asp[a] *= self.tw_coeff

        self.timer.stop('CG')
        return total_error
예제 #31
0
    def calculate(self, q_c, spin='all', A_x=None):
        """Calculate response function.

        Parameters
        ----------
        q_c : list or ndarray
            Momentum vector.
        spin : str or int
            If 'all' then include all spins.
            If 0 or 1, only include this specific spin.
            (not used in transverse reponse functions)
        A_x : ndarray
            Output array. If None, the output array is created.

        Returns
        -------
        pd : Planewave descriptor
            Planewave descriptor for q_c.
        chi0_wGG : ndarray
            The response function.
        chi0_wxvG : ndarray or None
            (Only in optical limit) Wings of the density response function.
        chi0_wvv : ndarray or None
            (Only in optical limit) Head of the density response function.

        """
        wfs = self.calc.wfs

        if self.response == 'density':
            if spin == 'all':
                spins = range(wfs.nspins)
            else:
                assert spin in range(wfs.nspins)
                spins = [spin]
        else:
            if self.response == '+-':
                spins = [0]
            elif self.response == '-+':
                spins = [1]
            else:
                raise ValueError('Invalid response %s' % self.response)

        q_c = np.asarray(q_c, dtype=float)
        optical_limit = np.allclose(q_c, 0.0) and self.response == 'density'

        pd = self.get_PWDescriptor(q_c, self.gammacentered)

        self.print_chi(pd)

        if extra_parameters.get('df_dry_run'):
            print('    Dry run exit', file=self.fd)
            raise SystemExit

        nG = pd.ngmax + 2 * optical_limit
        nw = len(self.omega_w)
        mynG = (nG + self.blockcomm.size - 1) // self.blockcomm.size
        self.Ga = min(self.blockcomm.rank * mynG, nG)
        self.Gb = min(self.Ga + mynG, nG)
        # if self.blockcomm.rank == 0:
        #     assert self.Gb - self.Ga >= 3
        # assert mynG * (self.blockcomm.size - 1) < nG
        if A_x is not None:
            nx = nw * (self.Gb - self.Ga) * nG
            chi0_wGG = A_x[:nx].reshape((nw, self.Gb - self.Ga, nG))
            chi0_wGG[:] = 0.0
        else:
            chi0_wGG = np.zeros((nw, self.Gb - self.Ga, nG), complex)

        if optical_limit:
            chi0_wxvG = np.zeros((len(self.omega_w), 2, 3, nG), complex)
            chi0_wvv = np.zeros((len(self.omega_w), 3, 3), complex)
            self.plasmafreq_vv = np.zeros((3, 3), complex)
        else:
            chi0_wxvG = None
            chi0_wvv = None
            self.plasmafreq_vv = None

        if self.response == 'density':
            # Do all empty bands:
            m1 = self.nocc1
        else:
            # Do all bands
            m1 = 0

        m2 = self.nbands

        pd, chi0_wGG, chi0_wxvG, chi0_wvv = self._calculate(
            pd, chi0_wGG, chi0_wxvG, chi0_wvv, m1, m2, spins)

        return pd, chi0_wGG, chi0_wxvG, chi0_wvv
예제 #32
0
    def parallel_init(self):
        """Parallel initialization. By default, only use kcomm and wcomm.

        Parameters:

            kcomm:
                 kpoint communicator
            wScomm:
                 spectral function communicator
            wcomm:
                 frequency communicator
        """

        if extra_parameters.get('df_dry_run'):
            from gpaw.mpi import DryRunCommunicator
            size = extra_parameters['df_dry_run']
            world = DryRunCommunicator(size)
            rank = world.rank
            self.comm = world
        else:
            world = self.comm
            rank = self.comm.rank
            size = self.comm.size

        wcommsize = int(self.NwS * self.npw**2 * 16. / 1024**2) // 1500 # megabyte
        wcommsize += 1
        if size < wcommsize:
            raise ValueError('Number of cpus are not enough ! ')
        if self.kcommsize is None:
            self.kcommsize = world.size
        if wcommsize > size // self.kcommsize: # if matrix too large, overwrite kcommsize and distribute matrix
            self.printtxt('kcommsize is over written ! ')
            while size % wcommsize != 0:
                wcommsize += 1
            self.kcommsize = size // wcommsize
            assert self.kcommsize * wcommsize == size
            if self.kcommsize < 1:
                raise ValueError('Number of cpus are not enough ! ')

        self.kcomm, self.wScomm, self.wcomm = set_communicator(world, rank, size, self.kcommsize)

        if self.kd.nbzkpts >= world.size:
            self.nkpt_reshape = self.kd.nbzkpts
            self.nkpt_reshape, self.nkpt_local, self.kstart, self.kend = parallel_partition(
                               self.nkpt_reshape, self.kcomm.rank, self.kcomm.size, reshape=True, positive=True)
            self.mband_local = self.nvalbands
            self.mlist = np.arange(self.nbands)
        else:
            # if number of kpoints == 1, use band parallelization
            self.nkpt_local = self.kd.nbzkpts
            self.kstart = 0
            self.kend = self.kd.nbzkpts
            self.nkpt_reshape = self.kd.nbzkpts

            self.nbands, self.mband_local, self.mlist = parallel_partition_list(
                               self.nbands, self.kcomm.rank, self.kcomm.size)

        if self.NwS % size != 0:
            self.NwS -= self.NwS % size
            
        self.NwS, self.NwS_local, self.wS1, self.wS2 = parallel_partition(
                               self.NwS, self.wScomm.rank, self.wScomm.size, reshape=False)

        if self.hilbert_trans:
            self.Nw, self.Nw_local, self.wstart, self.wend =  parallel_partition(
                               self.Nw, self.wcomm.rank, self.wcomm.size, reshape=True)
        else:
            if self.Nw > 1:
#                assert self.Nw % (self.comm.size / self.kcomm.size) == 0
                self.wcomm = self.wScomm
                self.Nw, self.Nw_local, self.wstart, self.wend =  parallel_partition(
                               self.Nw, self.wcomm.rank, self.wcomm.size, reshape=False)
            else:
                # if frequency point is too few, then dont parallelize
                self.wcomm = serial_comm
                self.wstart = 0
                self.wend = self.Nw
                self.Nw_local = self.Nw

        return
예제 #33
0
파일: coreeig.py 프로젝트: qsnake/gpaw
#!/usr/bin/env python
from ase import Atoms
from gpaw import GPAW, restart, extra_parameters
from gpaw.test import equal

usenewxc = extra_parameters.get('usenewxc')
extra_parameters['usenewxc'] = True

from gpaw.utilities.kspot import CoreEigenvalues
try:
    a = 7.0
    calc = GPAW(h=0.1)
    system = Atoms('Ne', calculator=calc)
    system.center(vacuum=a / 2)
    e0 = system.get_potential_energy()
    niter0 = calc.get_number_of_iterations()
    calc.write('Ne.gpw')

    del calc, system

    atoms, calc = restart('Ne.gpw')
    calc.restore_state()
    e_j = CoreEigenvalues(calc).get_core_eigenvalues(0)
    assert abs(e_j[0] - (-30.344066)) * 27.21 < 0.1 # Error smaller than 0.1 eV

    energy_tolerance = 0.0004
    equal(e0, -0.0107707223, energy_tolerance)
except:
    extra_parameters['usenewxc'] = usenewxc
    raise
else:
예제 #34
0
파일: chi.py 프로젝트: qsnake/gpaw
    def initialize(self):

        self.printtxt('')
        self.printtxt('-----------------------------------------')
        self.printtxt('Response function calculation started at:')
        self.starttime = time()
        self.printtxt(ctime())

        BASECHI.initialize(self)

        # Frequency init
        self.dw = None
        if len(self.w_w) == 1:
            self.HilberTrans = False

        if self.hilbert_trans:
            self.dw = self.w_w[1] - self.w_w[0]
            assert ((self.w_w[1:] - self.w_w[:-1] - self.dw) < 1e-10).all() # make sure its linear w grid
            assert self.w_w.max() == self.w_w[-1]
            
            self.dw /= Hartree
            self.w_w  /= Hartree
            self.wmax = self.w_w[-1] 
            self.wcut = self.wmax + 5. / Hartree
            self.Nw  = int(self.wmax / self.dw) + 1
            self.NwS = int(self.wcut / self.dw) + 1
        else:
            self.Nw = len(self.w_w)
            self.NwS = 0
            if len(self.w_w) > 1:
                self.dw = self.w_w[1] - self.w_w[0]
                assert ((self.w_w[1:] - self.w_w[:-1] - self.dw) < 1e-10).all()
                self.dw /= Hartree
                
        if self.hilbert_trans:
            # for band parallelization.
            for n in range(self.nbands):
                if (self.f_kn[:, n] - self.ftol < 0).all():
                    self.nvalbands = n
                    break
        else:
            # if not hilbert transform, all the bands should be used.
            self.nvalbands = self.nbands

        # Parallelization initialize
        self.parallel_init()

        # Printing calculation information
        self.print_chi()

        if extra_parameters.get('df_dry_run'):
            raise SystemExit

        calc = self.calc

        # For LCAO wfs
        if calc.input_parameters['mode'] == 'lcao':
            calc.initialize_positions()        
        self.printtxt('     GS calculator   : %f M / cpu' %(maxrss() / 1024**2))
        # PAW part init
        # calculate <phi_i | e**(-i(q+G).r) | phi_j>
        # G != 0 part
        self.get_phi_aGp()

        # Calculate ALDA kernel for EELS spectrum
        # Use RPA kernel for Optical spectrum and rpa correlation energy
        if not self.optical_limit and np.dtype(self.w_w[0]) == float:
            R_av = calc.atoms.positions / Bohr
            self.Kxc_GG = calculate_Kxc(self.gd, # global grid
                                    calc.density.nt_sG,
                                    self.npw, self.Gvec_Gc,
                                    self.nG, self.vol,
                                    self.bcell_cv, R_av,
                                    calc.wfs.setups,
                                    calc.density.D_asp)

            self.printtxt('Finished ALDA kernel ! ')
        else:
            self.Kxc_GG = np.zeros((self.npw, self.npw))
            self.printtxt('Use RPA for optical spectrum ! ')
            self.printtxt('')
            
        return
예제 #35
0
    def iterate_one_k_point(self, hamiltonian, wfs, kpt):
        """Do conjugate gradient iterations for the k-point"""

        niter = self.niter

        phi_G = wfs.empty(q=kpt.q)
        phi_old_G = wfs.empty(q=kpt.q)

        comm = wfs.gd.comm

        psit_nG, Htpsit_nG = self.subspace_diagonalize(hamiltonian, wfs, kpt)
        # Note that psit_nG is now in self.operator.work1_nG and
        # Htpsit_nG is in kpt.psit_nG!

        R_nG = reshape(self.Htpsit_nG, psit_nG.shape)
        Htphi_G = R_nG[0]

        R_nG[:] = Htpsit_nG
        self.timer.start('Residuals')
        self.calculate_residuals(kpt, wfs, hamiltonian, psit_nG, kpt.P_ani,
                                 kpt.eps_n, R_nG)
        self.timer.stop('Residuals')

        self.timer.start('CG')

        total_error = 0.0
        for n in range(self.nbands):
            if extra_parameters.get('PK', False):
                N = n + 1
            else:
                N = psit_nG.shape[0] + 1
            R_G = R_nG[n]
            Htpsit_G = Htpsit_nG[n]
            gamma_old = 1.0
            phi_old_G[:] = 0.0
            error = np.real(wfs.integrate(R_G, R_G))
            for nit in range(niter):
                if (error * Hartree**2 < self.tolerance / self.nbands):
                    break

                ekin = self.preconditioner.calculate_kinetic_energy(
                    psit_nG[n:n + 1], kpt)

                pR_G = self.preconditioner(R_nG[n:n + 1], kpt, ekin)

                # New search direction
                gamma = comm.sum(np.vdot(pR_G, R_G).real)
                phi_G[:] = -pR_G - gamma / gamma_old * phi_old_G
                gamma_old = gamma
                phi_old_G[:] = phi_G[:]

                # Calculate projections
                P2_ai = wfs.pt.dict()
                wfs.pt.integrate(phi_G, P2_ai, kpt.q)

                # Orthonormalize phi_G to all bands
                self.timer.start('CG: orthonormalize')
                self.timer.start('CG: overlap')
                overlap_n = wfs.integrate(psit_nG[:N],
                                          phi_G,
                                          global_integral=False)
                self.timer.stop('CG: overlap')
                self.timer.start('CG: overlap2')
                for a, P2_i in P2_ai.items():
                    P_ni = kpt.P_ani[a]
                    dO_ii = wfs.setups[a].dO_ii
                    gemv(1.0, P_ni[:N].conjugate(), np.inner(dO_ii, P2_i), 1.0,
                         overlap_n)
                self.timer.stop('CG: overlap2')
                comm.sum(overlap_n)

                # phi_G -= overlap_n * kpt.psit_nG
                wfs.matrixoperator.gd.gemv(-1.0, psit_nG[:N], overlap_n, 1.0,
                                           phi_G, 'n')
                for a, P2_i in P2_ai.items():
                    P_ni = kpt.P_ani[a]
                    gemv(-1.0, P_ni[:N], overlap_n, 1.0, P2_i, 'n')

                norm = wfs.integrate(phi_G, phi_G, global_integral=False)
                for a, P2_i in P2_ai.items():
                    dO_ii = wfs.setups[a].dO_ii
                    norm += np.vdot(P2_i, np.inner(dO_ii, P2_i))
                norm = comm.sum(np.real(norm).item())
                phi_G /= sqrt(norm)
                for P2_i in P2_ai.values():
                    P2_i /= sqrt(norm)
                self.timer.stop('CG: orthonormalize')

                # find optimum linear combination of psit_G and phi_G
                an = kpt.eps_n[n]
                wfs.apply_pseudo_hamiltonian(
                    kpt, hamiltonian, phi_G.reshape((1, ) + phi_G.shape),
                    Htphi_G.reshape((1, ) + Htphi_G.shape))
                b = wfs.integrate(phi_G, Htpsit_G, global_integral=False)
                c = wfs.integrate(phi_G, Htphi_G, global_integral=False)
                for a, P2_i in P2_ai.items():
                    P_i = kpt.P_ani[a][n]
                    dH_ii = unpack(hamiltonian.dH_asp[a][kpt.s])
                    b += dot(P2_i, dot(dH_ii, P_i.conj()))
                    c += dot(P2_i, dot(dH_ii, P2_i.conj()))
                b = comm.sum(np.real(b).item())
                c = comm.sum(np.real(c).item())

                theta = 0.5 * atan2(2 * b, an - c)
                enew = (an * cos(theta)**2 + c * sin(theta)**2 +
                        b * sin(2.0 * theta))
                # theta can correspond either minimum or maximum
                if (enew - kpt.eps_n[n]) > 0.0:  # we were at maximum
                    theta += pi / 2.0
                    enew = (an * cos(theta)**2 + c * sin(theta)**2 +
                            b * sin(2.0 * theta))

                kpt.eps_n[n] = enew
                psit_nG[n] *= cos(theta)
                # kpt.psit_nG[n] += sin(theta) * phi_G
                axpy(sin(theta), phi_G, psit_nG[n])
                for a, P2_i in P2_ai.items():
                    P_i = kpt.P_ani[a][n]
                    P_i *= cos(theta)
                    P_i += sin(theta) * P2_i

                if nit < niter - 1:
                    Htpsit_G *= cos(theta)
                    # Htpsit_G += sin(theta) * Htphi_G
                    axpy(sin(theta), Htphi_G, Htpsit_G)
                    #adjust residuals
                    R_G[:] = Htpsit_G - kpt.eps_n[n] * psit_nG[n]

                    coef_ai = wfs.pt.dict()
                    for a, coef_i in coef_ai.items():
                        P_i = kpt.P_ani[a][n]
                        dO_ii = wfs.setups[a].dO_ii
                        dH_ii = unpack(hamiltonian.dH_asp[a][kpt.s])
                        coef_i[:] = (dot(P_i, dH_ii) -
                                     dot(P_i * kpt.eps_n[n], dO_ii))
                    wfs.pt.add(R_G, coef_ai, kpt.q)
                    error_new = np.real(wfs.integrate(R_G, R_G))
                    if error_new / error < self.rtol:
                        # print >> self.f, "cg:iters", n, nit+1
                        break
                    if (self.nbands_converge == 'occupied'
                            and kpt.f_n is not None and kpt.f_n[n] == 0.0):
                        # print >> self.f, "cg:iters", n, nit+1
                        break
                    error = error_new

            if kpt.f_n is None:
                weight = 1.0
            else:
                weight = kpt.f_n[n]
            if self.nbands_converge != 'occupied':
                weight = kpt.weight * float(n < self.nbands_converge)
            total_error += weight * error
            # if nit == 3:
            #   print >> self.f, "cg:iters", n, nit+1

        self.timer.stop('CG')
        return total_error, psit_nG
예제 #36
0
    def parallel_init(self):
        """Parallel initialization. By default, only use kcomm and wcomm.

        Parameters:

            kcomm:
                 kpoint communicator
            wScomm:
                 spectral function communicator
            wcomm:
                 frequency communicator
        """

        if extra_parameters.get("df_dry_run"):
            from gpaw.mpi import DryRunCommunicator

            size = extra_parameters["df_dry_run"]
            world = DryRunCommunicator(size)
            rank = world.rank
            self.comm = world
        else:
            world = self.comm
            rank = self.comm.rank
            size = self.comm.size

        wcommsize = int(self.NwS * self.npw ** 2 * 16.0 / 1024 ** 2) // 1500  # megabyte
        wcommsize += 1
        if size < wcommsize:
            raise ValueError("Number of cpus are not enough ! ")
        if self.kcommsize is None:
            self.kcommsize = world.size
        if wcommsize > size // self.kcommsize:  # if matrix too large, overwrite kcommsize and distribute matrix
            self.printtxt("kcommsize is over written ! ")
            while size % wcommsize != 0:
                wcommsize += 1
            self.kcommsize = size // wcommsize
            assert self.kcommsize * wcommsize == size
            if self.kcommsize < 1:
                raise ValueError("Number of cpus are not enough ! ")

        self.kcomm, self.wScomm, self.wcomm = set_communicator(world, rank, size, self.kcommsize)

        if self.kd.nbzkpts >= world.size:
            self.nkpt_reshape = self.kd.nbzkpts
            self.nkpt_reshape, self.nkpt_local, self.kstart, self.kend = parallel_partition(
                self.nkpt_reshape, self.kcomm.rank, self.kcomm.size, reshape=True, positive=True
            )
            self.mband_local = self.nvalbands
            self.mlist = np.arange(self.nbands)
        else:
            # if number of kpoints == 1, use band parallelization
            self.nkpt_local = self.kd.nbzkpts
            self.kstart = 0
            self.kend = self.kd.nbzkpts
            self.nkpt_reshape = self.kd.nbzkpts

            self.nbands, self.mband_local, self.mlist = parallel_partition_list(
                self.nbands, self.kcomm.rank, self.kcomm.size
            )

        if self.NwS % size != 0:
            self.NwS -= self.NwS % size

        self.NwS, self.NwS_local, self.wS1, self.wS2 = parallel_partition(
            self.NwS, self.wScomm.rank, self.wScomm.size, reshape=False
        )

        if self.hilbert_trans:
            self.Nw, self.Nw_local, self.wstart, self.wend = parallel_partition(
                self.Nw, self.wcomm.rank, self.wcomm.size, reshape=True
            )
        else:
            if self.Nw > 1:
                #                assert self.Nw % (self.comm.size / self.kcomm.size) == 0
                self.wcomm = self.wScomm
                self.Nw, self.Nw_local, self.wstart, self.wend = parallel_partition(
                    self.Nw, self.wcomm.rank, self.wcomm.size, reshape=False
                )
            else:
                # if frequency point is too few, then dont parallelize
                self.wcomm = serial_comm
                self.wstart = 0
                self.wend = self.Nw
                self.Nw_local = self.Nw

        return
예제 #37
0
파일: overlap.py 프로젝트: eojons/gpaw-scme
    def orthonormalize(self, wfs, kpt, psit_nG=None):
        """Orthonormalizes the vectors a_nG with respect to the overlap.

        First, a Cholesky factorization C is done for the overlap
        matrix S_nn = <a_nG | S | a_nG> = C*_nn C_nn Cholesky matrix C
        is inverted and orthonormal vectors a_nG' are obtained as::

          psit_nG' = inv(C_nn) psit_nG
                    __
           ~   _   \    -1   ~   _
          psi (r) = )  C    psi (r)
             n     /__  nm     m
                    m

        Parameters
        ----------

        psit_nG: ndarray, input/output
            On input the set of vectors to orthonormalize,
            on output the overlap-orthonormalized vectors.
        kpt: KPoint object:
            k-point object from kpoint.py.
        work_nG: ndarray
            Optional work array for overlap matrix times psit_nG.
        work_nn: ndarray
            Optional work array for overlap matrix.

        """
        self.timer.start('Orthonormalize')
        if psit_nG is None:
            psit_nG = kpt.psit_nG
        P_ani = kpt.P_ani
        self.timer.start('projections')
        wfs.pt.integrate(psit_nG, P_ani, kpt.q)
        self.timer.stop('projections')

        # Construct the overlap matrix:
        operator = wfs.matrixoperator

        def S(psit_G):
            return psit_G
        
        def dS(a, P_ni):
            return np.dot(P_ni, wfs.setups[a].dO_ii)

        self.timer.start('calc_s_matrix')
        S_nn = operator.calculate_matrix_elements(psit_nG, P_ani, S, dS)
        self.timer.stop('calc_s_matrix')

        orthonormalization_string = repr(self.ksl)
        self.timer.start(orthonormalization_string)
        #
        if extra_parameters.get('sic', False):
            #
            # symmetric Loewdin Orthonormalization
            tri2full(S_nn, UL='L', map=np.conj)
            nrm_n = np.empty(S_nn.shape[0])
            diagonalize(S_nn, nrm_n)
            nrm_nn = np.diag(1.0/np.sqrt(nrm_n))
            S_nn = np.dot(np.dot(S_nn.T.conj(), nrm_nn), S_nn)
        else:
            #
            self.ksl.inverse_cholesky(S_nn)
        # S_nn now contains the inverse of the Cholesky factorization.
        # Let's call it something different:
        C_nn = S_nn
        del S_nn
        self.timer.stop(orthonormalization_string)

        self.timer.start('rotate_psi')
        operator.matrix_multiply(C_nn, psit_nG, P_ani, out_nG=kpt.psit_nG)
        self.timer.stop('rotate_psi')
        self.timer.stop('Orthonormalize')
예제 #38
0
from ase import Atoms
from gpaw import GPAW, restart, extra_parameters
from gpaw.test import equal

usenewxc = extra_parameters.get('usenewxc')
extra_parameters['usenewxc'] = True
from gpaw.utilities.kspot import AllElectronPotential
try:
    if 1:
        be = Atoms(symbols='Be', positions=[(0, 0, 0)])
        be.center(vacuum=5)
        calc = GPAW(gpts=(64, 64, 64), xc='LDA',
                    nbands=1)  #0.1 required for accuracy
        be.set_calculator(calc)
        e = be.get_potential_energy()
        niter = calc.get_number_of_iterations()
        #calc.write("be.gpw")

        energy_tolerance = 0.00001
        niter_tolerance = 0
        equal(e, 0.00246471, energy_tolerance)
        equal(niter, 16, niter_tolerance)

    #be, calc = restart("be.gpw")
    AllElectronPotential(calc).write_spherical_ks_potentials('bepot.txt')
    f = open('bepot.txt')
    lines = f.readlines()
    f.close()
    mmax = 0
    for l in lines:
        mmax = max(abs(eval(l.split(' ')[3])), mmax)
예제 #39
0
    def orthonormalize(self, wfs, kpt, psit_nG=None):
        """Orthonormalizes the vectors a_nG with respect to the overlap.

        First, a Cholesky factorization C is done for the overlap
        matrix S_nn = <a_nG | S | a_nG> = C*_nn C_nn Cholesky matrix C
        is inverted and orthonormal vectors a_nG' are obtained as::

          psit_nG' = inv(C_nn) psit_nG
                    __
           ~   _   \    -1   ~   _
          psi (r) = )  C    psi (r)
             n     /__  nm     m
                    m

        Parameters
        ----------

        psit_nG: ndarray, input/output
            On input the set of vectors to orthonormalize,
            on output the overlap-orthonormalized vectors.
        kpt: KPoint object:
            k-point object from kpoint.py.
        work_nG: ndarray
            Optional work array for overlap matrix times psit_nG.
        work_nn: ndarray
            Optional work array for overlap matrix.

        """
        self.timer.start('Orthonormalize')
        if psit_nG is None:
            psit_nG = kpt.psit_nG
        P_ani = kpt.P_ani
        self.timer.start('projections')
        wfs.pt.integrate(psit_nG, P_ani, kpt.q)
        self.timer.stop('projections')

        # Construct the overlap matrix:
        operator = wfs.matrixoperator

        def S(psit_G):
            return psit_G
        
        def dS(a, P_ni):
            return np.dot(P_ni, wfs.setups[a].dO_ii)

        self.timer.start('calc_s_matrix')
        S_nn = operator.calculate_matrix_elements(psit_nG, P_ani, S, dS)
        self.timer.stop('calc_s_matrix')

        orthonormalization_string = repr(self.ksl)
        self.timer.start(orthonormalization_string)
        #
        if extra_parameters.get('sic', False):
            #
            # symmetric Loewdin Orthonormalization
            tri2full(S_nn, UL='L', map=np.conj)
            nrm_n = np.empty(S_nn.shape[0])
            diagonalize(S_nn, nrm_n)
            nrm_nn = np.diag(1.0/np.sqrt(nrm_n))
            S_nn = np.dot(np.dot(S_nn.T.conj(), nrm_nn), S_nn)
        else:
            #
            self.ksl.inverse_cholesky(S_nn)
        # S_nn now contains the inverse of the Cholesky factorization.
        # Let's call it something different:
        C_nn = S_nn
        del S_nn
        self.timer.stop(orthonormalization_string)

        self.timer.start('rotate_psi')
        operator.matrix_multiply(C_nn, psit_nG, P_ani, out_nG=kpt.psit_nG)
        self.timer.stop('rotate_psi')
        self.timer.stop('Orthonormalize')
예제 #40
0
파일: vdw.py 프로젝트: robwarm/gpaw-symm
    def calculate_6d_integral(self, n_g, q0_g, a2_g=None, e_LDAc_g=None, v_LDAc_g=None, v_g=None, deda2_g=None):
        self.timer.start("VdW-DF integral")
        self.timer.start("splines")
        if self.C_aip is None:
            self.construct_cubic_splines()
            self.construct_fourier_transformed_kernels()
        self.timer.stop("splines")

        gd = self.gd
        N = self.Nalpha

        world = self.world
        vdwcomm = self.vdwcomm

        if self.alphas:
            self.timer.start("hmm1")
            i_g = (np.log(q0_g / self.q_a[1] * (self.lambd - 1) + 1) / log(self.lambd)).astype(int)
            dq0_g = q0_g - self.q_a[i_g]
            self.timer.stop("hmm1")
        else:
            i_g = None
            dq0_g = None

        if self.verbose:
            print "VDW: fft:",

        theta_ak = {}
        p_ag = {}
        for a in self.alphas:
            self.timer.start("hmm2")
            C_pg = self.C_aip[a, i_g].transpose((3, 0, 1, 2))
            pa_g = C_pg[0] + dq0_g * (C_pg[1] + dq0_g * (C_pg[2] + dq0_g * C_pg[3]))
            self.timer.stop("hmm2")
            del C_pg
            self.timer.start("FFT")
            theta_ak[a] = rfftn(n_g * pa_g, self.shape).copy()
            if extra_parameters.get("vdw0"):
                theta_ak[a][0, 0, 0] = 0.0
            self.timer.stop()

            if not self.energy_only:
                p_ag[a] = pa_g
            del pa_g
            if self.verbose:
                print a,
                sys.stdout.flush()

        if self.energy_only:
            del i_g
            del dq0_g

        if self.verbose:
            print
            print "VDW: convolution:",

        F_ak = {}
        dj_k = self.dj_k
        energy = 0.0
        for a in range(N):
            if vdwcomm is not None:
                vdw_ranka = a * vdwcomm.size // N
                F_k = np.zeros((self.shape[0], self.shape[1], self.shape[2] // 2 + 1), complex)
            self.timer.start("Convolution")
            for b in self.alphas:
                _gpaw.vdw2(self.phi_aajp[a, b], self.j_k, dj_k, theta_ak[b], F_k)
            self.timer.stop()

            if vdwcomm is not None:
                self.timer.start("gather")
                for F in F_k:
                    vdwcomm.sum(F, vdw_ranka)
                # vdwcomm.sum(F_k, vdw_ranka)
                self.timer.stop("gather")

            if vdwcomm is not None and vdwcomm.rank == vdw_ranka:
                if not self.energy_only:
                    F_ak[a] = F_k
                energy += np.vdot(theta_ak[a][:, :, 0], F_k[:, :, 0]).real
                energy += np.vdot(theta_ak[a][:, :, -1], F_k[:, :, -1]).real
                energy += 2 * np.vdot(theta_ak[a][:, :, 1:-1], F_k[:, :, 1:-1]).real

            if self.verbose:
                print a,
                sys.stdout.flush()

        del theta_ak

        if self.verbose:
            print

        if not self.energy_only:
            F_ag = {}
            for a in self.alphas:
                n1, n2, n3 = gd.get_size_of_global_array()
                self.timer.start("iFFT")
                F_ag[a] = irfftn(F_ak[a]).real[:n1, :n2, :n3].copy()
                self.timer.stop()
            del F_ak

            self.timer.start("potential")
            self.calculate_potential(n_g, a2_g, i_g, dq0_g, p_ag, F_ag, e_LDAc_g, v_LDAc_g, v_g, deda2_g)
            self.timer.stop()

        self.timer.stop()
        return 0.5 * world.sum(energy) * gd.dv / self.shape.prod()
예제 #41
0
파일: kspot.py 프로젝트: qsnake/gpaw
         print a

      t('Calculated core eigenvalues of atom '+str(a)+':'+symbol)
      t('state      eigenvalue         ekin         rmax')
      t('-----------------------------------------------')
      for m, l, f, e, u in zip(atom.n_j, atom.l_j, atom.f_j, atom.e_j, atom.u_j):
         # Find kinetic energy:
         k = e - np.sum((np.where(abs(u) < 1e-160, 0, u)**2 * #XXXNumeric!
                            atom.vr * atom.dr)[1:] / atom.r[1:])

         # Find outermost maximum:
         g = atom.N - 4
         while u[g - 1] >= u[g]:
            g -= 1
         x = atom.r[g - 1:g + 2]
         y = u[g - 1:g + 2]
         A = np.transpose(np.array([x**i for i in range(3)]))
         c, b, a = np.linalg.solve(A, y)
         assert a < 0.0
         rmax = -0.5 * b / a

         s = 'spdf'[l]
         t('%d%s^%-4.1f: %12.6f %12.6f %12.3f' % (m, s, f, e, k, rmax))
      t('-----------------------------------------------')
      t('(units: Bohr and Hartree)')
      return atom.e_j
      
if not extra_parameters.get('usenewxc'):
    raise "New XC-corrections required. Add --gpaw usenewxc=1 to command line and try again."

예제 #42
0
class Density:
    """Density object.
    
    Attributes:
     =============== =====================================================
     ``gd``          Grid descriptor for coarse grids.
     ``finegd``      Grid descriptor for fine grids.
     ``interpolate`` Function for interpolating the electron density.
     ``mixer``       ``DensityMixer`` object.
     =============== =====================================================

    Soft and smooth pseudo functions on uniform 3D grids:
     ========== =========================================
     ``nt_sG``  Electron density on the coarse grid.
     ``nt_sg``  Electron density on the fine grid.
     ``nt_g``   Electron density on the fine grid.
     ``rhot_g`` Charge density on the fine grid.
     ``nct_G``  Core electron-density on the coarse grid.
     ========== =========================================
    """
    
    def __init__(self, gd, finegd, nspins, charge):
        """Create the Density object."""

        self.gd = gd
        self.finegd = finegd
        self.nspins = nspins
        self.charge = float(charge)

        self.charge_eps = 1e-7
        
        self.D_asp = None
        self.Q_aL = None

        self.nct_G = None
        self.nt_sG = None
        self.rhot_g = None
        self.nt_sg = None
        self.nt_g = None

        self.rank_a = None

        self.mixer = BaseMixer()
        self.timer = nulltimer
        self.allocated = False
        
    def initialize(self, setups, stencil, timer, magmom_a, hund):
        self.timer = timer
        self.setups = setups
        self.hund = hund
        self.magmom_a = magmom_a
        
        # Interpolation function for the density:
        self.interpolator = Transformer(self.gd, self.finegd, stencil,
                                        allocate=False)
        
        spline_aj = []
        for setup in setups:
            if setup.nct is None:
                spline_aj.append([])
            else:
                spline_aj.append([setup.nct])
        self.nct = LFC(self.gd, spline_aj,
                       integral=[setup.Nct for setup in setups],
                       forces=True, cut=True)
        self.ghat = LFC(self.finegd, [setup.ghat_l for setup in setups],
                        integral=sqrt(4 * pi), forces=True)
        if self.allocated:
            self.allocated = False
            self.allocate()

    def allocate(self):
        assert not self.allocated
        self.interpolator.allocate()
        self.allocated = True

    def reset(self):
        # TODO: reset other parameters?
        self.nt_sG = None

    def set_positions(self, spos_ac, rank_a=None):
        if not self.allocated:
            self.allocate()
        self.nct.set_positions(spos_ac)
        self.ghat.set_positions(spos_ac)
        self.mixer.reset()

        self.nct_G = self.gd.zeros()
        self.nct.add(self.nct_G, 1.0 / self.nspins)
        #self.nt_sG = None
        self.nt_sg = None
        self.nt_g = None
        self.rhot_g = None
        self.Q_aL = None

        # If both old and new atomic ranks are present, start a blank dict if
        # it previously didn't exist but it will needed for the new atoms.
        if (self.rank_a is not None and rank_a is not None and
            self.D_asp is None and (rank_a == self.gd.comm.rank).any()):
            self.D_asp = {}

        if self.rank_a is not None and self.D_asp is not None:
            self.timer.start('Redistribute')
            requests = []
            flags = (self.rank_a != rank_a)
            my_incoming_atom_indices = np.argwhere(np.bitwise_and(flags, \
                rank_a == self.gd.comm.rank)).ravel()
            my_outgoing_atom_indices = np.argwhere(np.bitwise_and(flags, \
                self.rank_a == self.gd.comm.rank)).ravel()

            for a in my_incoming_atom_indices:
                # Get matrix from old domain:
                ni = self.setups[a].ni
                D_sp = np.empty((self.nspins, ni * (ni + 1) // 2))
                requests.append(self.gd.comm.receive(D_sp, self.rank_a[a],
                                                     tag=a, block=False))
                assert a not in self.D_asp
                self.D_asp[a] = D_sp

            for a in my_outgoing_atom_indices:
                # Send matrix to new domain:
                D_sp = self.D_asp.pop(a)
                requests.append(self.gd.comm.send(D_sp, rank_a[a],
                                                  tag=a, block=False))
            self.gd.comm.waitall(requests)
            self.timer.stop('Redistribute')

        self.rank_a = rank_a

    def calculate_pseudo_density(self, wfs):
        """Calculate nt_sG from scratch.

        nt_sG will be equal to nct_G plus the contribution from
        wfs.add_to_density().
        """
        wfs.calculate_density_contribution(self.nt_sG)
        self.nt_sG += self.nct_G

    def update(self, wfs):
        self.timer.start('Density')
        self.timer.start('Pseudo density')
        self.calculate_pseudo_density(wfs)
        self.timer.stop('Pseudo density')
        self.timer.start('Atomic density matrices')
        wfs.calculate_atomic_density_matrices(self.D_asp)
        self.timer.stop('Atomic density matrices')
        self.timer.start('Multipole moments')
        comp_charge = self.calculate_multipole_moments()
        self.timer.stop('Multipole moments')
        
        if isinstance(wfs, LCAOWaveFunctions):
            self.timer.start('Normalize')
            self.normalize(comp_charge)
            self.timer.stop('Normalize')

        self.timer.start('Mix')
        self.mix(comp_charge)
        self.timer.stop('Mix')
        self.timer.stop('Density')

    def normalize(self, comp_charge=None):
        """Normalize pseudo density."""
        if comp_charge is None:
            comp_charge = self.calculate_multipole_moments()
        
        pseudo_charge = self.gd.integrate(self.nt_sG).sum()

        if pseudo_charge + self.charge + comp_charge != 0:
            if pseudo_charge != 0:
                x = -(self.charge + comp_charge) / pseudo_charge
                self.nt_sG *= x
            else:
                # Use homogeneous background:
                self.nt_sG[:] = (self.charge + comp_charge) * self.gd.dv

    def calculate_pseudo_charge(self, comp_charge):
        self.nt_g = self.nt_sg.sum(axis=0)
        self.rhot_g = self.nt_g.copy()
        self.ghat.add(self.rhot_g, self.Q_aL)

        if debug:
            charge = self.finegd.integrate(self.rhot_g) + self.charge
            if abs(charge) > self.charge_eps:
                raise RuntimeError('Charge not conserved: excess=%.9f' %
                                   charge)

    def mix(self, comp_charge):
        if not self.mixer.mix_rho:
            self.mixer.mix(self)
            comp_charge = None
          
        self.interpolate(comp_charge)
        self.calculate_pseudo_charge(comp_charge)

        if self.mixer.mix_rho:
            self.mixer.mix(self)

    def interpolate(self, comp_charge=None):
        """Interpolate pseudo density to fine grid."""
        if comp_charge is None:
            comp_charge = self.calculate_multipole_moments()

        if self.nt_sg is None:
            self.nt_sg = self.finegd.empty(self.nspins)

        for s in range(self.nspins):
            self.interpolator.apply(self.nt_sG[s], self.nt_sg[s])

        # With periodic boundary conditions, the interpolation will
        # conserve the number of electrons.
        if not self.gd.pbc_c.all():
            # With zero-boundary conditions in one or more directions,
            # this is not the case.
            pseudo_charge = -(self.charge + comp_charge)
            if abs(pseudo_charge) > 1.0e-14:
                x = pseudo_charge / self.finegd.integrate(self.nt_sg).sum()
                self.nt_sg *= x

    def calculate_multipole_moments(self):
        """Calculate multipole moments of compensation charges.

        Returns the total compensation charge in units of electron
        charge, so the number will be negative because of the
        dominating contribution from the nuclear charge."""

        comp_charge = 0.0
        self.Q_aL = {}
        for a, D_sp in self.D_asp.items():
            Q_L = self.Q_aL[a] = np.dot(D_sp.sum(0), self.setups[a].Delta_pL)
            Q_L[0] += self.setups[a].Delta0
            comp_charge += Q_L[0]
        return self.gd.comm.sum(comp_charge) * sqrt(4 * pi)

    def initialize_from_atomic_densities(self, basis_functions):
        """Initialize D_asp, nt_sG and Q_aL from atomic densities.

        nt_sG is initialized from atomic orbitals, and will
        be constructed with the specified magnetic moments and
        obeying Hund's rules if ``hund`` is true."""

        # XXX does this work with blacs?  What should be distributed?
        # Apparently this doesn't use blacs at all, so it's serial
        # with respect to the blacs distribution.  That means it works
        # but is not particularly efficient (not that this is a time
        # consuming step)

        f_sM = np.empty((self.nspins, basis_functions.Mmax))
        self.D_asp = {}
        f_asi = {}
        for a in basis_functions.atom_indices:
            c = self.charge / len(self.setups)  # distribute on all atoms
            f_si = self.setups[a].calculate_initial_occupation_numbers(
                    self.magmom_a[a], self.hund, charge=c, nspins=self.nspins)
            if a in basis_functions.my_atom_indices:
                self.D_asp[a] = self.setups[a].initialize_density_matrix(f_si)
            f_asi[a] = f_si

        self.nt_sG = self.gd.zeros(self.nspins)
        basis_functions.add_to_density(self.nt_sG, f_asi)
        self.nt_sG += self.nct_G
        self.calculate_normalized_charges_and_mix()

    def initialize_from_wavefunctions(self, wfs):
        """Initialize D_asp, nt_sG and Q_aL from wave functions."""
        self.nt_sG = self.gd.empty(self.nspins)
        self.calculate_pseudo_density(wfs)
        self.D_asp = {}
        my_atom_indices = np.argwhere(wfs.rank_a == self.gd.comm.rank).ravel()
        for a in my_atom_indices:
            ni = self.setups[a].ni
            self.D_asp[a] = np.empty((self.nspins, ni * (ni + 1) // 2))
        wfs.calculate_atomic_density_matrices(self.D_asp)
        self.calculate_normalized_charges_and_mix()

    def initialize_directly_from_arrays(self, nt_sG, D_asp):
        """Set D_asp and nt_sG directly."""
        self.nt_sG = nt_sG
        self.D_asp = D_asp
        #self.calculate_normalized_charges_and_mix()
        # No calculate multipole moments?  Tests will fail because of
        # improperly initialized mixer

    def calculate_normalized_charges_and_mix(self):
        comp_charge = self.calculate_multipole_moments()
        self.normalize(comp_charge)
        self.mix(comp_charge)

    def set_mixer(self, mixer):
        if mixer is not None:
            if self.nspins == 1 and isinstance(mixer, MixerSum):
                raise RuntimeError('Cannot use MixerSum with nspins==1')
            self.mixer = mixer
        else:
            if self.gd.pbc_c.any():
                beta = 0.1
                weight = 50.0
            else:
                beta = 0.25
                weight = 1.0
                
            if self.nspins == 2:
                self.mixer = MixerSum(beta=beta, weight=weight)
            else:
                self.mixer = Mixer(beta=beta, weight=weight)

        self.mixer.initialize(self)
        
    def estimate_magnetic_moments(self):
        magmom_a = np.zeros_like(self.magmom_a)
        if self.nspins == 2:
            for a, D_sp in self.D_asp.items():
                magmom_a[a] = np.dot(D_sp[0] - D_sp[1], self.setups[a].N0_p)
            self.gd.comm.sum(magmom_a)
        return magmom_a

    def get_correction(self, a, spin):
        """Integrated atomic density correction.

        Get the integrated correction to the pseuso density relative to
        the all-electron density.
        """
        setup = self.setups[a]
        return sqrt(4 * pi) * (
            np.dot(self.D_asp[a][spin], setup.Delta_pL[:, 0])
            + setup.Delta0 / self.nspins)

    def get_density_array(self):
        XXX
        # XXX why not replace with get_spin_density and get_total_density?
        """Return pseudo-density array."""
        if self.nspins == 2:
            return self.nt_sG
        else:
            return self.nt_sG[0]
    
    def get_all_electron_density(self, atoms, gridrefinement=2):
        """Return real all-electron density array."""

        # Refinement of coarse grid, for representation of the AE-density
        if gridrefinement == 1:
            gd = self.gd
            n_sg = self.nt_sG.copy()
        elif gridrefinement == 2:
            gd = self.finegd
            if self.nt_sg is None:
                self.interpolate()
            n_sg = self.nt_sg.copy()
        elif gridrefinement == 4:
            # Extra fine grid
            gd = self.finegd.refine()
            
            # Interpolation function for the density:
            interpolator = Transformer(self.finegd, gd, 3)

            # Transfer the pseudo-density to the fine grid:
            n_sg = gd.empty(self.nspins)
            if self.nt_sg is None:
                self.interpolate()
            for s in range(self.nspins):
                interpolator.apply(self.nt_sg[s], n_sg[s])
        else:
            raise NotImplementedError

        # Add corrections to pseudo-density to get the AE-density
        splines = {}
        phi_aj = []
        phit_aj = []
        nc_a = []
        nct_a = []
        for a, id in enumerate(self.setups.id_a):
            if id in splines:
                phi_j, phit_j, nc, nct = splines[id]
            else:
                # Load splines:
                phi_j, phit_j, nc, nct = self.setups[a].get_partial_waves()[:4]
                splines[id] = (phi_j, phit_j, nc, nct)
            phi_aj.append(phi_j)
            phit_aj.append(phit_j)
            nc_a.append([nc])
            nct_a.append([nct])

        # Create localized functions from splines
        phi = LFC(gd, phi_aj)
        phit = LFC(gd, phit_aj)
        nc = LFC(gd, nc_a)
        nct = LFC(gd, nct_a)
        spos_ac = atoms.get_scaled_positions() % 1.0
        phi.set_positions(spos_ac)
        phit.set_positions(spos_ac)
        nc.set_positions(spos_ac)
        nct.set_positions(spos_ac)

        all_D_asp = []
        for a, setup in enumerate(self.setups):
            D_sp = self.D_asp.get(a)
            if D_sp is None:
                ni = setup.ni
                D_sp = np.empty((self.nspins, ni * (ni + 1) // 2))
            if gd.comm.size > 1:
                gd.comm.broadcast(D_sp, self.rank_a[a])
            all_D_asp.append(D_sp)

        for s in range(self.nspins):
            I_a = np.zeros(len(atoms))
            nc.add1(n_sg[s], 1.0 / self.nspins, I_a)
            nct.add1(n_sg[s], -1.0 / self.nspins, I_a)
            phi.add2(n_sg[s], all_D_asp, s, 1.0, I_a)
            phit.add2(n_sg[s], all_D_asp, s, -1.0, I_a)
            for a, D_sp in self.D_asp.items():
                setup = self.setups[a]
                I_a[a] -= ((setup.Nc - setup.Nct) / self.nspins +
                           sqrt(4 * pi) *
                           np.dot(D_sp[s], setup.Delta_pL[:, 0]))
            gd.comm.sum(I_a)
            N_c = gd.N_c
            g_ac = np.around(N_c * spos_ac).astype(int) % N_c - gd.beg_c
            for I, g_c in zip(I_a, g_ac):
                if (g_c >= 0).all() and (g_c < gd.n_c).all():
                    n_sg[s][tuple(g_c)] -= I / gd.dv

        return n_sg, gd

    def new_get_all_electron_density(self, atoms, gridrefinement=2):
        """Return real all-electron density array."""

        # Refinement of coarse grid, for representation of the AE-density
        if gridrefinement == 1:
            gd = self.gd
            n_sg = self.nt_sG.copy()
        elif gridrefinement == 2:
            gd = self.finegd
            if self.nt_sg is None:
                self.interpolate()
            n_sg = self.nt_sg.copy()
        elif gridrefinement == 4:
            # Extra fine grid
            gd = self.finegd.refine()
            
            # Interpolation function for the density:
            interpolator = Transformer(self.finegd, gd, 3)

            # Transfer the pseudo-density to the fine grid:
            n_sg = gd.empty(self.nspins)
            if self.nt_sg is None:
                self.interpolate()
            for s in range(self.nspins):
                interpolator.apply(self.nt_sg[s], n_sg[s])
        else:
            raise NotImplementedError

        # Add corrections to pseudo-density to get the AE-density
        splines = {}
        phi_aj = []
        phit_aj = []
        nc_a = []
        nct_a = []
        for a, id in enumerate(self.setups.id_a):
            if id in splines:
                phi_j, phit_j, nc, nct = splines[id]
            else:
                # Load splines:
                phi_j, phit_j, nc, nct = self.setups[a].get_partial_waves()[:4]
                splines[id] = (phi_j, phit_j, nc, nct)
            phi_aj.append(phi_j)
            phit_aj.append(phit_j)
            nc_a.append([nc])
            nct_a.append([nct])

        # Create localized functions from splines
        phi = BasisFunctions(gd, phi_aj)
        phit = BasisFunctions(gd, phit_aj)
        nc = LFC(gd, nc_a)
        nct = LFC(gd, nct_a)
        spos_ac = atoms.get_scaled_positions() % 1.0
        phi.set_positions(spos_ac)
        phit.set_positions(spos_ac)
        nc.set_positions(spos_ac)
        nct.set_positions(spos_ac)

        I_sa = np.zeros((self.nspins, len(atoms)))
        a_W =  np.empty(len(phi.M_W), np.int32)
        W = 0
        for a in phi.atom_indices:
            nw = len(phi.sphere_a[a].M_w)
            a_W[W:W + nw] = a
            W += nw
        rho_MM = np.zeros((phi.Mmax, phi.Mmax))
        for s, I_a in enumerate(I_sa):
            M1 = 0
            for a, setup in enumerate(self.setups):
                ni = setup.ni
                D_sp = self.D_asp.get(a)
                if D_sp is None:
                    D_sp = np.empty((self.nspins, ni * (ni + 1) // 2))
                else:
                    I_a[a] = ((setup.Nct - setup.Nc) / self.nspins -
                              sqrt(4 * pi) *
                              np.dot(D_sp[s], setup.Delta_pL[:, 0]))
                if gd.comm.size > 1:
                    gd.comm.broadcast(D_sp, self.rank_a[a])
                M2 = M1 + ni
                rho_MM[M1:M2, M1:M2] = unpack2(D_sp[s])
                M1 = M2

            phi.lfc.ae_valence_density_correction(rho_MM, n_sg[s], a_W, I_a)
            phit.lfc.ae_valence_density_correction(-rho_MM, n_sg[s], a_W, I_a)

        a_W =  np.empty(len(nc.M_W), np.int32)
        W = 0
        for a in nc.atom_indices:
            nw = len(nc.sphere_a[a].M_w)
            a_W[W:W + nw] = a
            W += nw
        scale = 1.0 / self.nspins
        for s, I_a in enumerate(I_sa):
            nc.lfc.ae_core_density_correction(scale, n_sg[s], a_W, I_a)
            nct.lfc.ae_core_density_correction(-scale, n_sg[s], a_W, I_a)
            gd.comm.sum(I_a)
            N_c = gd.N_c
            g_ac = np.around(N_c * spos_ac).astype(int) % N_c - gd.beg_c
            for I, g_c in zip(I_a, g_ac):
                if (g_c >= 0).all() and (g_c < gd.n_c).all():
                    n_sg[s][tuple(g_c)] -= I / gd.dv
        return n_sg, gd

    if extra_parameters.get('usenewlfc', True):
        get_all_electron_density = new_get_all_electron_density
        
    def estimate_memory(self, mem):
        nspins = self.nspins
        nbytes = self.gd.bytecount()
        nfinebytes = self.finegd.bytecount()

        arrays = mem.subnode('Arrays')
        for name, size in [('nt_sG', nbytes * nspins),
                           ('nt_sg', nfinebytes * nspins),
                           ('nt_g', nfinebytes),
                           ('rhot_g', nfinebytes),
                           ('nct_G', nbytes)]:
            arrays.subnode(name, size)

        lfs = mem.subnode('Localized functions')
        for name, obj in [('nct', self.nct),
                          ('ghat', self.ghat)]:
            obj.estimate_memory(lfs.subnode(name))
        self.mixer.estimate_memory(mem.subnode('Mixer'), self.gd)

        # TODO
        # The implementation of interpolator memory use is not very
        # accurate; 20 MiB vs 13 MiB estimated in one example, probably
        # worse for parallel calculations.
        
        self.interpolator.estimate_memory(mem.subnode('Interpolator'))

    def get_spin_contamination(self, atoms, majority_spin=0):
        """Calculate the spin contamination.

        Spin contamination is defined as the integral over the
        spin density difference, where it is negative (i.e. the
        minority spin density is larger than the majority spin density.
        """

        if majority_spin == 0:
            smaj = 0
            smin = 1
        else:
            smaj = 1
            smin = 0
        nt_sg, gd = self.get_all_electron_density(atoms)
        dt_sg = nt_sg[smin] - nt_sg[smaj]
        dt_sg = np.where(dt_sg > 0, dt_sg, 0.0)
        return gd.integrate(dt_sg)
예제 #43
0
    def print_chi(self, pd):
        calc = self.calc
        gd = calc.wfs.gd

        if extra_parameters.get('df_dry_run'):
            from gpaw.mpi import DryRunCommunicator
            size = extra_parameters['df_dry_run']
            world = DryRunCommunicator(size)
        else:
            world = self.world

        print('%s' % ctime(), file=self.fd)
        print('Called response.chi0.calculate with', file=self.fd)

        q_c = pd.kd.bzk_kc[0]
        print('    q_c: [%f, %f, %f]' % (q_c[0], q_c[1], q_c[2]), file=self.fd)

        nw = len(self.omega_w)
        print('    Number of frequency points: %d' % nw, file=self.fd)

        ecut = self.ecut * Hartree
        print('    Planewave cutoff: %f' % ecut, file=self.fd)

        ns = calc.wfs.nspins
        print('    Number of spins: %d' % ns, file=self.fd)

        nbands = self.nbands
        print('    Number of bands: %d' % nbands, file=self.fd)

        nk = calc.wfs.kd.nbzkpts
        print('    Number of kpoints: %d' % nk, file=self.fd)

        nik = calc.wfs.kd.nibzkpts
        print('    Number of irredicible kpoints: %d' % nik, file=self.fd)

        ngmax = pd.ngmax
        print('    Number of planewaves: %d' % ngmax, file=self.fd)

        eta = self.eta * Hartree
        print('    Broadening (eta): %f' % eta, file=self.fd)

        wsize = world.size
        print('    world.size: %d' % wsize, file=self.fd)

        knsize = self.kncomm.size
        print('    kncomm.size: %d' % knsize, file=self.fd)

        bsize = self.blockcomm.size
        print('    blockcomm.size: %d' % bsize, file=self.fd)

        nocc = self.nocc1
        print('    Number of completely occupied states: %d' % nocc,
              file=self.fd)

        npocc = self.nocc2
        print('    Number of partially occupied states: %d' % npocc,
              file=self.fd)

        keep = self.keep_occupied_states
        print('    Keep occupied states: %s' % keep, file=self.fd)

        print('', file=self.fd)
        print('    Memory estimate of potentially large arrays:', file=self.fd)

        chisize = nw * pd.ngmax**2 * 16. / 1024**2
        print('        chi0_wGG: %f M / cpu' % chisize, file=self.fd)

        ngridpoints = gd.N_c[0] * gd.N_c[1] * gd.N_c[2]

        if self.keep_occupied_states:
            nstat = (ns * nk * npocc + world.size - 1) // world.size
        else:
            nstat = (ns * npocc + world.size - 1) // world.size

        occsize = nstat * ngridpoints * 16. / 1024**2
        print('        Occupied states: %f M / cpu' % occsize, file=self.fd)

        print('        Memory usage before allocation: %f M / cpu' %
              (maxrss() / 1024**2),
              file=self.fd)

        print('', file=self.fd)
예제 #44
0
파일: gaunt.py 프로젝트: yihsuanliu/gpaw
                for c1, n1 in YL[L1]:
                    for c2, n2 in YL[L2]:
                        n = [0, 0, 0]
                        n[0] = n1[0] + n2[0]
                        n[1] = n1[1] + n2[1]
                        n[2] = n1[2] + n2[2]
                        if n2[v] > 0:
                            # apply derivative
                            n[v] -= 1
                            # add integral
                            r += n2[v] * c1 * c2 * gam(n[0], n[1], n[2])
                Y_LLv[L1, L2, v] = r
    return Y_LLv

from gpaw import extra_parameters
if extra_parameters.get('fprojectors'):
    gaunt = make_gaunt(3)
    Y_LLv = make_nabla(3)

if __name__ == '__main__':
    # XXX
    # There are 9*9*25=2025 elements.
    # Of these, 162 are non-zero, and only 30 are distinct, i.e. only 1.5%.
    # This should be stored more efficiently. See e.g.
    #   "Efficiente storage scheme for precalculated wigner 3J, 6J and Gaunt
    #   coefficients", Rasch and Yu,
    #   Siam J. Sci. Comput., Vol 25, No. 4, pp. 1416-1428, 2003
    # XXX
    print 'Constructing "Gaunt.py" ...'
    gaunt = make_gaunt(2)
    print 'gaunt = np.array(%s)' % gaunt.tolist()
예제 #45
0
    def initialize(self):

        self.printtxt('')
        self.printtxt('-----------------------------------------')
        self.printtxt('Response function calculation started at:')
        self.starttime = time()
        self.printtxt(ctime())

        BASECHI.initialize(self)

        # Frequency init
        self.dw = None
        if len(self.w_w) == 1:
            self.HilberTrans = False

        if self.hilbert_trans:
            self.dw = self.w_w[1] - self.w_w[0]
            assert ((self.w_w[1:] - self.w_w[:-1] - self.dw) <
                    1e-10).all()  # make sure its linear w grid
            assert self.w_w.max() == self.w_w[-1]

            self.dw /= Hartree
            self.w_w /= Hartree
            self.wmax = self.w_w[-1]
            self.wcut = self.wmax + 5. / Hartree
            self.Nw = int(self.wmax / self.dw) + 1
            self.NwS = int(self.wcut / self.dw) + 1
        else:
            self.Nw = len(self.w_w)
            self.NwS = 0
            if len(self.w_w) > 1:
                self.dw = self.w_w[1] - self.w_w[0]
                assert ((self.w_w[1:] - self.w_w[:-1] - self.dw) < 1e-10).all()
                self.dw /= Hartree

        if self.hilbert_trans:
            # for band parallelization.
            for n in range(self.nbands):
                if (self.f_kn[:, n] - self.ftol < 0).all():
                    self.nvalbands = n
                    break
        else:
            # if not hilbert transform, all the bands should be used.
            self.nvalbands = self.nbands

        # Parallelization initialize
        self.parallel_init()

        # Printing calculation information
        self.print_chi()

        if extra_parameters.get('df_dry_run'):
            raise SystemExit

        calc = self.calc

        # For LCAO wfs
        if calc.input_parameters['mode'] == 'lcao':
            calc.initialize_positions()
        self.printtxt('     GS calculator   : %f M / cpu' %
                      (maxrss() / 1024**2))
        # PAW part init
        # calculate <phi_i | e**(-i(q+G).r) | phi_j>
        # G != 0 part
        self.get_phi_aGp()

        # Calculate ALDA kernel for EELS spectrum
        # Use RPA kernel for Optical spectrum and rpa correlation energy
        if not self.optical_limit and np.dtype(self.w_w[0]) == float:
            R_av = calc.atoms.positions / Bohr
            self.Kxc_GG = calculate_Kxc(
                self.gd,  # global grid
                calc.density.nt_sG,
                self.npw,
                self.Gvec_Gc,
                self.nG,
                self.vol,
                self.bcell_cv,
                R_av,
                calc.wfs.setups,
                calc.density.D_asp)

            self.printtxt('Finished ALDA kernel ! ')
        else:
            self.Kxc_GG = np.zeros((self.npw, self.npw))
            self.printtxt('Use RPA for optical spectrum ! ')
            self.printtxt('')

        return