Пример #1
0
    def __init__(self, SK=None, hdf_datafile=None):
        """
        Initialization of the class. There are two ways to do so:

        - existing SumkLDA class :  when you have an existing SumkLDA instance
        - from hdf5 archive :  when you want to use data from hdf5 archive

        Giving the class instance overrides giving the string for the hdf5 archive.

        Parameters
        ----------
        SK : class SumkLDA, optional
             Existing instance of SumkLDA class.
        hdf5_datafile : string, optional
                        Name of hdf5 archive to be used.

        """

        if SK is None:
            # build our own SK instance
            if hdf_datafile is None:
                mpi.report("trans_basis: give SK instance or HDF filename!")
                return 0

            Converter = Wien2kConverter(filename=hdf_datafile, repacking=False)
            Converter.convert_dft_input()
            del Converter

            self.SK = SumkDFT(hdf_file=hdf_datafile + '.h5',
                              use_dft_blocks=False)
        else:
            self.SK = SK

        self.T = copy.deepcopy(self.SK.T[0])
        self.w = numpy.identity(SK.corr_shells[0]['dim'])
Пример #2
0
Файл: bse.py Проект: TRIQS/tprf
def get_chi0_nk_at_specific_w(g_wk, nw_index=1, nwf=None):
    r""" Compute the generalized bare lattice susceptibility 
    :math:`\chi^{0}_{\bar{a}b\bar{c}d}(i\omega_{n=\mathrm{nw\_index}}, i\nu_n, \mathbf{k})` from the single-particle
    Green's function :math:`G_{a\bar{b}}(i\nu_n, \mathbf{k})` for a specific :math:`i\omega_{n=\mathrm{nw\_index}}`.

    Parameters
    ----------

    g_wk : Gf,
           Single-particle Green's function :math:`G_{a\bar{b}}(i\nu_n, \mathbf{k})`.
    nw_index : int,
               The bosonic Matsubara frequency index :math:`i\omega_{n=\mathrm{nw\_index}}`
               at which :math:`\chi^0` is calculated.
    nwf : int,
          Number of fermionic frequencies in :math:`\chi^0`.    

    Returns
    -------

    chi0_nk : Gf,
               Generalized bare lattice susceptibility
               :math:`\chi^{0}_{\bar{a}b\bar{c}d}(i\omega_{n=\mathrm{nw\_index}}, i\nu_n, \mathbf{k})`.
    """

    fmesh = g_wk.mesh.components[0]
    kmesh = g_wk.mesh.components[1]

    if nwf is None:
        nwf = len(fmesh) // 2

    mpi.barrier()
    mpi.report('g_wk ' + str(g_wk[Idx(2), Idx(0, 1, 2)][0, 0]))
    n = np.sum(g_wk.data) // len(kmesh)
    mpi.report('n ' + str(n))
    mpi.barrier()

    mpi.report('--> g_wr from g_wk')
    g_wr = fourier_wk_to_wr(g_wk)

    mpi.report('--> chi0_wnr from g_wr')
    chi0_nr = chi0_nr_from_gr_PH_at_specific_w(nw_index=nw_index,
                                               nn=nwf,
                                               g_nr=g_wr)
    del g_wr

    mpi.report('--> chi0_wnk from chi0_wnr')
    # Create a 'fake' bosonic mesh to be able to use 'chi0q_from_chi0r'
    chi0_wnr = add_fake_bosonic_mesh(chi0_nr)
    del chi0_nr

    chi0_wnk = chi0q_from_chi0r(chi0_wnr)
    del chi0_wnr

    chi0_nk = chi0_wnk[Idx(0), :, :]
    del chi0_wnk

    return chi0_nk
Пример #3
0
    def index_works(self, shells):
        """
        Return True if the index version of upfold/downfold can be used.
        If True, proj_index is set (necessary for running the index version).
        """
        if shells == 'corr':
            # (Re)make proj_index if proj_index has not been computed or if proj_mat has been updated
            if not hasattr(self, 'projmat_id') or self.projmat_id != id(
                    self.proj_mat):
                self.proj_index = self.make_proj_index()
                self.projmat_id = id(
                    self.proj_mat
                )  # Store ID(proj_mat) to skip making the index next time
                if self.proj_index is not None:
                    mpi.report(
                        "The fancy-index version of upfold/downfold is used.")
            projindex = self.proj_index
        elif shells == 'all':
            # TODO: make self.proj_index_all from self.proj_mat_all
            projindex = None

        return projindex is not None
Пример #4
0
def solve_self_consistent_dmft(p):

    ps = []
    for dmft_iter in range(p.n_iter):
        mpi.report('--> DMFT Iteration: {:d}'.format(p.iter))
        p = dmft_self_consistent_step(p)
        ps.append(p)
        mpi.report('--> DMFT Convergence: dG_l = {:f}'.format(p.dG_l))
        if p.dG_l < p.G_l_tol: break

    if dmft_iter >= p.n_iter - 1:
        mpi.report('--> Warning: DMFT Not converged!')
    else:
        mpi.report('--> DMFT Converged: dG_l = {:f}'.format(p.dG_l))
    return ps
Пример #5
0
    def repack(self):
        """
        Calls the h5repack routine in order to reduce the file size of the hdf5 archive.

        Note
        ----
        Should only be used before the first invokation of HDFArchive in the program, 
        otherwise the hdf5 linking will be broken.

        """

        import subprocess

        if not (mpi.is_master_node()):
            return
        mpi.report("Repacking the file %s" % self.hdf_file)

        retcode = subprocess.call(
            ["h5repack", "-i%s" % self.hdf_file, "-otemphgfrt.h5"])
        if retcode != 0:
            mpi.report("h5repack failed!")
        else:
            subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % self.hdf_file])
Пример #6
0
def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version):
    """
    """
    mpi.report("  Waiting for VASP lock to appear...")
    while not is_vasp_lock_present():
        time.sleep(1)

    vasp_running = True

    iter = 0
    while vasp_running:
        if debug: print(bcolors.RED + "rank %s" % (mpi.rank) + bcolors.ENDC)
        mpi.report("  Waiting for VASP lock to disappear...")
        mpi.barrier()
        while is_vasp_lock_present():
            time.sleep(1)
            #            if debug: print bcolors.YELLOW + " waiting: rank %s"%(mpi.rank) + bcolors.ENDC
            if not is_vasp_running(vasp_pid):
                mpi.report("  VASP stopped")
                vasp_running = False
                break


# Tell VASP to stop if the maximum number of iterations is reached

        if debug:
            print(bcolors.MAGENTA + "rank %s" % (mpi.rank) + bcolors.ENDC)
        err = 0
        exc = None
        if debug:
            print(bcolors.BLUE + "plovasp: rank %s" % (mpi.rank) +
                  bcolors.ENDC)
        if mpi.is_master_node():
            converter.generate_and_output_as_text(cfg_file, vasp_dir='./')
            # Read energy from OSZICAR
            dft_energy = get_dft_energy()
        mpi.barrier()

        if debug: print(bcolors.GREEN + "rank %s" % (mpi.rank) + bcolors.ENDC)
        corr_energy, dft_dc = dmft_cycle()
        mpi.barrier()

        if mpi.is_master_node():
            total_energy = dft_energy + corr_energy - dft_dc
            print()
            print("=" * 80)
            print("  Total energy: ", total_energy)
            print("  DFT energy: ", dft_energy)
            print("  Corr. energy: ", corr_energy)
            print("  DFT DC: ", dft_dc)
            print("=" * 80)
            print()

        # check if we should do additional VASP calculations
        # in the standard VASP version, VASP writes out GAMMA itself
        # so that if we want to keep GAMMA fixed we have to copy it to
        # GAMMA_recent and copy it back after VASP has completed an iteration
        # if we are using a hacked Version of VASP where the write out is
        # disabled we can skip this step.
        # the hack consists of removing the call of LPRJ_LDApU in VASP src file
        # electron.F around line 644
        iter_dft = 0

        if vasp_version == 'standard':
            copyfile(src='GAMMA', dst='GAMMA_recent')
        while iter_dft < n_iter_dft:
            if mpi.is_master_node():
                open('./vasp.lock', 'a').close()
            while is_vasp_lock_present():
                time.sleep(1)
                if not is_vasp_running(vasp_pid):
                    mpi.report("  VASP stopped")
                    vasp_running = False
                    break
            iter_dft += 1
            if vasp_version == 'standard':
                copyfile(src='GAMMA_recent', dst='GAMMA')
        iter += 1
        if iter == n_iter:
            print("\n  Maximum number of iterations reached.")
            print("  Aborting VASP iterations...\n")
            f_stop = open('STOPCAR', 'wt')
            f_stop.write("LABORT = .TRUE.\n")
            f_stop.close()
    if mpi.is_master_node():
        total_energy = dft_energy + corr_energy - dft_dc
        with open('TOTENERGY', 'w') as f:
            f.write("  Total energy: %s\n" % (total_energy))
            f.write("  DFT energy: %s\n" % (dft_energy))
            f.write("  Corr. energy: %s\n" % (corr_energy))
            f.write("  DFT DC: %s\n" % (dft_dc))
            f.write("  Energy correction: %s\n" % (corr_energy - dft_dc))

    mpi.report("***Done")
Пример #7
0
    def convert_dft_input(self,
                          first_real_part_matrix=True,
                          only_upper_triangle=False,
                          weights_in_file=False):
        """
        Reads the appropriate files and stores the data for the dft_subgrp in the hdf5 archive. 

        Parameters
        ----------
        first_real_part_matrix : boolean, optional
                                 Should all the real components for given k be read in first, followed by the imaginary parts?
        only_upper_triangle : boolean, optional
                              Should only the upper triangular part of H(k) be read in? 
        weights_in_file : boolean, optional
                          Are the k-point weights to be read in?

        """

        # Read and write only on the master node
        if not (mpi.is_master_node()):
            return
        mpi.report("Reading input from %s..." % self.dft_file)

        # R is a generator : each R.Next() will return the next number in the
        # file
        R = ConverterTools.read_fortran_file(self, self.dft_file,
                                             self.fortran_to_replace)
        try:
            # the energy conversion factor is 1.0, we assume eV in files
            energy_unit = 1.0
            # read the number of k points
            n_k = int(next(R))
            k_dep_projection = 0
            SP = 0  # no spin-polarision
            SO = 0  # no spin-orbit
            # total charge below energy window is set to 0
            charge_below = 0.0
            # density required, for setting the chemical potential
            density_required = next(R)
            symm_op = 0  # No symmetry groups for the k-sum

            # the information on the non-correlated shells is needed for
            # defining dimension of matrices:
            # number of shells considered in the Wanniers
            n_shells = int(next(R))
            # corresponds to index R in formulas
            # now read the information about the shells (atom, sort, l, dim):
            shell_entries = ['atom', 'sort', 'l', 'dim']
            shells = [{name: int(val)
                       for name, val in zip(shell_entries, R)}
                      for ish in range(n_shells)]

            # number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
            n_corr_shells = int(next(R))
            # corresponds to index R in formulas
            # now read the information about the shells (atom, sort, l, dim, SO
            # flag, irep):
            corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
            corr_shells = [{
                name: int(val)
                for name, val in zip(corr_shell_entries, R)
            } for icrsh in range(n_corr_shells)]

            # determine the number of inequivalent correlated shells and maps,
            # needed for further reading
            [n_inequiv_shells, corr_to_inequiv, inequiv_to_corr
             ] = ConverterTools.det_shell_equivalence(self, corr_shells)

            use_rotations = 0
            rot_mat = [
                numpy.identity(corr_shells[icrsh]['dim'], numpy.complex_)
                for icrsh in range(n_corr_shells)
            ]
            rot_mat_time_inv = [0 for i in range(n_corr_shells)]

            # Representative representations are read from file
            n_reps = [1 for i in range(n_inequiv_shells)]
            dim_reps = [0 for i in range(n_inequiv_shells)]
            T = []
            for ish in range(n_inequiv_shells):
                # number of representatives ("subsets"), e.g. t2g and eg
                n_reps[ish] = int(next(R))
                dim_reps[ish] = [int(next(R)) for i in range(n_reps[ish])
                                 ]  # dimensions of the subsets

                # The transformation matrix:
                # is of dimension 2l+1, it is taken to be standard d (as in
                # Wien2k)
                ll = 2 * corr_shells[inequiv_to_corr[ish]]['l'] + 1
                lmax = ll * (corr_shells[inequiv_to_corr[ish]]['SO'] + 1)
                T.append(numpy.zeros([lmax, lmax], numpy.complex_))

                T[ish] = numpy.array(
                    [[0.0, 0.0, 1.0, 0.0, 0.0],
                     [1.0 / sqrt(2.0), 0.0, 0.0, 0.0, 1.0 / sqrt(2.0)],
                     [-1.0 / sqrt(2.0), 0.0, 0.0, 0.0, 1.0 / sqrt(2.0)],
                     [0.0, 1.0 / sqrt(2.0), 0.0, -1.0 / sqrt(2.0), 0.0],
                     [0.0, 1.0 / sqrt(2.0), 0.0, 1.0 / sqrt(2.0), 0.0]])

            # Spin blocks to be read:
            # number of spins to read for Norbs and Ham, NOT Projectors
            n_spin_blocs = SP + 1 - SO

            # define the number of n_orbitals for all k points: it is the
            # number of total bands and independent of k!
            n_orbitals = numpy.ones([n_k, n_spin_blocs], numpy.int) * sum(
                [sh['dim'] for sh in shells])

            # Initialise the projectors:
            proj_mat = numpy.zeros([
                n_k, n_spin_blocs, n_corr_shells,
                max([crsh['dim'] for crsh in corr_shells]),
                numpy.max(n_orbitals)
            ], numpy.complex_)

            # Read the projectors from the file:
            for ik in range(n_k):
                for icrsh in range(n_corr_shells):
                    for isp in range(n_spin_blocs):

                        # calculate the offset:
                        offset = 0
                        n_orb = 0
                        for ish in range(n_shells):
                            if (n_orb == 0):
                                if (shells[ish]['atom']
                                        == corr_shells[icrsh]['atom']) and (
                                            shells[ish]['sort']
                                            == corr_shells[icrsh]['sort']):
                                    n_orb = corr_shells[icrsh]['dim']
                                else:
                                    offset += shells[ish]['dim']

                        proj_mat[ik, isp, icrsh, 0:n_orb,
                                 offset:offset + n_orb] = numpy.identity(n_orb)

            # now define the arrays for weights and hopping ...
            # w(k_index),  default normalisation
            bz_weights = numpy.ones([n_k], numpy.float_) / float(n_k)
            hopping = numpy.zeros([
                n_k, n_spin_blocs,
                numpy.max(n_orbitals),
                numpy.max(n_orbitals)
            ], numpy.complex_)

            if (weights_in_file):
                # weights in the file
                for ik in range(n_k):
                    bz_weights[ik] = next(R)

            # if the sum over spins is in the weights, take it out again!!
            sm = sum(bz_weights)
            bz_weights[:] /= sm

            # Grab the H
            for isp in range(n_spin_blocs):
                for ik in range(n_k):
                    n_orb = n_orbitals[ik, isp]

                    # first read all real components for given k, then read
                    # imaginary parts
                    if (first_real_part_matrix):

                        for i in range(n_orb):
                            if (only_upper_triangle):
                                istart = i
                            else:
                                istart = 0
                            for j in range(istart, n_orb):
                                hopping[ik, isp, i, j] = next(R)

                        for i in range(n_orb):
                            if (only_upper_triangle):
                                istart = i
                            else:
                                istart = 0
                            for j in range(istart, n_orb):
                                hopping[ik, isp, i, j] += next(R) * 1j
                                if ((only_upper_triangle) and (i != j)):
                                    hopping[ik, isp, j,
                                            i] = hopping[ik, isp, i,
                                                         j].conjugate()

                    else:  # read (real,im) tuple

                        for i in range(n_orb):
                            if (only_upper_triangle):
                                istart = i
                            else:
                                istart = 0
                            for j in range(istart, n_orb):
                                hopping[ik, isp, i, j] = next(R)
                                hopping[ik, isp, i, j] += next(R) * 1j

                                if ((only_upper_triangle) and (i != j)):
                                    hopping[ik, isp, j,
                                            i] = hopping[ik, isp, i,
                                                         j].conjugate()
            # keep some things that we need for reading parproj:
            things_to_set = [
                'n_shells', 'shells', 'n_corr_shells', 'corr_shells',
                'n_spin_blocs', 'n_orbitals', 'n_k', 'SO', 'SP', 'energy_unit'
            ]
            for it in things_to_set:
                setattr(self, it, locals()[it])
        except StopIteration:  # a more explicit error if the file is corrupted.
            raise "HK Converter : reading file dft_file failed!"

        R.close()

        # Save to the HDF5:
        with HDFArchive(self.hdf_file, 'a') as ar:
            if not (self.dft_subgrp in ar):
                ar.create_group(self.dft_subgrp)
            things_to_save = [
                'energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO',
                'charge_below', 'density_required', 'symm_op', 'n_shells',
                'shells', 'n_corr_shells', 'corr_shells', 'use_rotations',
                'rot_mat', 'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T',
                'n_orbitals', 'proj_mat', 'bz_weights', 'hopping',
                'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr'
            ]
            for it in things_to_save:
                ar[self.dft_subgrp][it] = locals()[it]
Пример #8
0
    S.Sigma_iw << mpi.bcast(S.Sigma_iw)
    chemical_potential = mpi.bcast(chemical_potential)
    dc_imp = mpi.bcast(dc_imp)
    dc_energ = mpi.bcast(dc_energ)
    SK.set_mu(chemical_potential)
    SK.set_dc(dc_imp, dc_energ)

for iteration_number in range(1, loops + 1):
    if mpi.is_master_node(): print("Iteration = ", iteration_number)

    SK.symm_deg_gf(S.Sigma_iw, orb=0)  # symmetrise Sigma
    SK.set_Sigma([S.Sigma_iw])  # set Sigma into the SumK class
    chemical_potential = SK.calc_mu(
        precision=prec_mu)  # find the chemical potential for given density
    S.G_iw << SK.extract_G_loc()[0]  # calc the local Green function
    mpi.report("Total charge of Gloc : %.6f" % S.G_iw.total_density())

    # Init the DC term and the real part of Sigma, if no previous runs found:
    if (iteration_number == 1 and previous_present == False):
        dm = S.G_iw.density()
        SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0, use_dc_formula=dc_type)
        S.Sigma_iw << SK.dc_imp[0]['up'][0, 0]

    # Calculate new G0_iw to input into the solver:
    S.G0_iw << inverse(S.Sigma_iw + inverse(S.G_iw))

    # Solve the impurity problem:
    S.solve(h_int=h_int, **p)

    # Solved. Now do post-processing:
    mpi.report("Total charge of impurity problem : %.6f" %
Пример #9
0
def dichotomy(function, x_init, y_value, precision_on_y, delta_x,
              max_loops = 1000, x_name="", y_name="", verbosity=1):
    
    r""" Finds :math:`x` that solves :math:`y = f(x)`.
    
    Starting at ``x_init``, which is used as the lower upper/bound, 
    dichotomy finds first the upper/lower bound by adding/subtracting ``delta_x``. 
    Then bisection is used to refine :math:`x` until
    ``abs(f(x) - y_value) < precision_on_y`` or ``max_loops`` is reached.
   
    Parameters
    ----------

    function : function, real valued 
        Function :math:`f(x)`. It must take only one real parameter.
    x_init : double
        Initial guess for x. On success, returns the new value of x.
    y_value : double
        Target value for y.
    precision_on_y : double
        Stops if ``abs(f(x) - y_value) < precision_on_y``.
    delta_x : double
        :math:`\Delta x` added/subtracted from ``x_init`` until the second bound is found.
    max_loops : integer, optional
        Maximum number of loops (default is 1000).
    x_name : string, optional
        Name of variable x used for printing.
    y_name : string, optional
        Name of variable y used for printing.
    verbosity : integer, optional
        Verbosity level. 

    Returns
    -------

    (x,y) : (double, double)
        :math:`x` and :math:`y=f(x)`. Returns (None, None) if dichotomy failed.
    """
    
    mpi.report("Dichotomy adjustment of %(x_name)s to obtain %(y_name)s = %(y_value)f +/- %(precision_on_y)f"%locals() )
    PR = "    "
    if x_name == "" or y_name == ""  : verbosity = max(verbosity,1)
    x=x_init;delta_x= abs(delta_x)

    # First find the bounds
    y1 = function(x)
    eps = np.sign(y1-y_value)
    x1=x;y2=y1;x2=x1
    nbre_loop=0
    while (nbre_loop<= max_loops) and (y2-y_value)*eps>0 and abs(y2-y_value)>precision_on_y :
        nbre_loop +=1
        x2 -=  eps*delta_x
        y2 = function(x2)
        if x_name!="" and verbosity>2:
            mpi.report("%(PR)s%(x_name)s = %(x2)f  \n%(PR)s%(y_name)s = %(y2)f"%locals())
    
    # Make sure that x2 > x1
    if x1 > x2:
        x1,x2 = x2,x1
        y1,y2 = y2,y1
    
    mpi.report("%(PR)s%(x1)f < %(x_name)s < %(x2)f"%locals())
    mpi.report("%(PR)s%(y1)f < %(y_name)s < %(y2)f"%locals())

    # We found bounds.
    # If one of the two bounds is already close to the solution
    # the bisection will not run. For this case we set x and yfound.
    if abs(y1-y_value) < abs(y2-y_value) :
        yfound = y1
        x = x1
    else:
        yfound = y2
        x = x2
        
    #Now let's refine between the bounds
    while (nbre_loop<= max_loops) and (abs(yfound-y_value)>precision_on_y) :
        nbre_loop +=1
        x = x1  + (x2 - x1) * (y_value - y1)/(y2-y1)
        yfound = function(x)
        if (y1-y_value)*(yfound - y_value)>0 : 
            x1 = x; y1=yfound
        else :
            x2= x;y2=yfound;
        if verbosity > 2:
            mpi.report("%(PR)s%(x1)f < %(x_name)s < %(x2)f"%locals())
            mpi.report("%(PR)s%(y1)f < %(y_name)s < %(y2)f"%locals())
    if abs(yfound - y_value) < precision_on_y :
        if verbosity>0:
            mpi.report("%(PR)s%(x_name)s found in %(nbre_loop)d iterations : "%locals())
            mpi.report("%(PR)s%(y_name)s = %(yfound)f;%(x_name)s = %(x)f"%locals())
        return (x,yfound)
    else : 
        if verbosity > 0:
            mpi.report("%(PR)sFAILURE to adjust %(x_name)s to the value %(y_value)f after %(nbre_loop)d iterations."%locals())
            mpi.report("%(PR)sFAILURE returning (None, None) due to failure."%locals())
        return (None,None)
Пример #10
0
def report(message):
    if is_mpi_loaded():
        import triqs.utility.mpi as mpi
        return mpi.report(message)
    else:
        print(message)
Пример #11
0
def gw_sigma_wk(Wr_wk, g_wk, fft_flag=False):
    r""" GW self energy :math:`\Sigma(i\omega_n, \mathbf{k})` calculator

    Fourier transforms the screened interaction and the single-particle
    Green's function to imagiary time and real space.

    .. math::
        G_{ab}(\tau, \mathbf{r}) = \mathcal{F}^{-1}
          \left\{ G_{ab}(i\omega_n, \mathbf{k}) \right\}

    .. math::
        W^{(r)}_{abcd}(\tau, \mathbf{r}) = \mathcal{F}^{-1}
          \left\{ W^{(r)}_{abcd}(i\omega_n, \mathbf{k}) \right\}

    computes the GW self-energy as the product

    .. math::
        \Sigma_{ab}(\tau, \mathbf{r}) =
          \sum_{cd} W^{(r)}_{abcd}(\tau, \mathbf{r}) G_{cd}(\tau, \mathbf{r})

    and transforms back to frequency and momentum

    .. math::
        \Sigma_{ab}(i\omega_n, \mathbf{k}) =
          \mathcal{F} \left\{ \Sigma_{ab}(\tau, \mathbf{r}) \right\}

    Parameters
    ----------

    V_k : TRIQS Green's function (rank 4) on a Brillouinzone mesh
          static bare interaction :math:`V_{abcd}(\mathbf{k})`

    Wr_wk : TRIQS Green's function (rank 4) on Matsubara and Brillouinzone meshes
            retarded screened interaction :math:`W^{(r)}_{abcd}(i\omega_n, \mathbf{k})`
    
    g_wk : TRIQS Green's function (rank 2) on Matsubara and Brillouinzone meshes
           single particle Green's function :math:`G_{ab}(i\omega_n, \mathbf{k})`

    Returns
    -------

    sigma_wk : TRIQS Green's function (rank 2) on Matsubara and Brillouinzone meshes
               GW self-energy :math:`\Sigma_{ab}(i\omega_n, \mathbf{k})`
    """

    if fft_flag:

        nw = len(g_wk.mesh.components[0]) // 2
        ntau = nw * 6 + 1

        mpi.report('g wk -> wr')
        g_wr = fourier_wk_to_wr(g_wk)
        mpi.report('g wr -> tr')
        g_tr = fourier_wr_to_tr(g_wr, nt=ntau)
        del g_wr

        mpi.report('W wk -> wr')
        Wr_wr = chi_wr_from_chi_wk(Wr_wk)
        mpi.report('W wr -> tr')
        Wr_tr = chi_tr_from_chi_wr(Wr_wr, ntau=ntau)
        del Wr_wr

        mpi.report('sigma tr')
        sigma_tr = cpp_gw_sigma_tr(Wr_tr, g_tr)
        del Wr_tr
        del g_tr

        mpi.report('sigma tr -> wr')
        sigma_wr = fourier_tr_to_wr(sigma_tr, nw=nw)

        del sigma_tr
        mpi.report('sigma wr -> wk')
        sigma_wk = fourier_wr_to_wk(sigma_wr)
        del sigma_wr

    else:
        sigma_wk = cpp_gw_sigma_wk_serial_fft(Wr_wk, g_wk)

    return sigma_wk
Пример #12
0
    def calculate_diagonalisation_matrix(self,
                                         prop_to_be_diagonal='eal',
                                         calc_in_solver_blocks=False):
        """
        Calculates the diagonalisation matrix w, and stores it as member of the class.

        Parameters
        ----------
        prop_to_be_diagonal : string, optional
                              Defines the property to be diagonalized.

                              - 'eal' : local hamiltonian (i.e. crystal field)
                              - 'dm' : local density matrix

        calc_in_solver_blocks : bool, optional
                                Whether the property shall be diagonalized in the
                                full sumk structure, or just in the solver structure.

        Returns
        -------
        wsqr : double
               Measure for the degree of rotation done by the diagonalisation. wsqr=1 means no rotation.

        """

        if prop_to_be_diagonal == 'eal':
            prop = self.SK.eff_atomic_levels()[0]
        elif prop_to_be_diagonal == 'dm':
            prop = self.SK.density_matrix(method='using_point_integration')[0]
        else:
            mpi.report(
                "trans_basis: not a valid quantitiy to be diagonal. Choices are 'eal' or 'dm'."
            )
            return 0

        if calc_in_solver_blocks:
            trafo = self.SK.block_structure.transformation
            self.SK.block_structure.transformation = None

            prop_solver = self.SK.block_structure.convert_matrix(
                prop, space_from='sumk', space_to='solver')
            v = {}
            for name in prop_solver:
                v[name] = numpy.linalg.eigh(prop_solver[name])[1]
            self.w = self.SK.block_structure.convert_matrix(
                v, space_from='solver',
                space_to='sumk')['ud' if self.SK.SO else 'up']
            self.T = numpy.dot(self.T.transpose().conjugate(),
                               self.w).conjugate().transpose()
            self.SK.block_structure.transformation = trafo
        else:
            if self.SK.SO == 0:
                self.eig, self.w = numpy.linalg.eigh(prop['up'])
                # calculate new Transformation matrix
                self.T = numpy.dot(self.T.transpose().conjugate(),
                                   self.w).conjugate().transpose()
            else:
                self.eig, self.w = numpy.linalg.eigh(prop['ud'])
                # calculate new Transformation matrix
                self.T = numpy.dot(self.T.transpose().conjugate(),
                                   self.w).conjugate().transpose()

        # measure for the 'unity' of the transformation:
        wsqr = sum(abs(self.w.diagonal())**2) / self.w.diagonal().size
        return wsqr
Пример #13
0
Файл: bse.py Проект: TRIQS/tprf
def solve_lattice_bse_at_specific_w(g_wk, gamma_wnn, nw_index):
    r""" Compute the generalized lattice susceptibility 
    :math:`\chi_{\bar{a}b\bar{c}d}(i\omega_{n=\mathrm{nw\_index}}, \mathbf{k})` using the Bethe-Salpeter 
    equation (BSE) for a specific :math:`i\omega_{n=\mathrm{nw\_index}}`.


    Parameters
    ----------

    g_wk : Gf,
           Single-particle Green's function :math:`G_{a\bar{b}}(i\nu_n, \mathbf{k})`.
    gamma_wnn : Gf,
                Local particle-hole vertex function 
                :math:`\Gamma_{a\bar{b}c\bar{d}}(i\omega_n, i\nu_n, i\nu_n')`.
    nw_index : int,
               The bosonic Matsubara frequency index :math:`i\omega_{n=\mathrm{nw\_index}}`
               at which the BSE is solved.

    Returns
    -------
    chi_k : Gf,
            Generalized lattice susceptibility 
            :math:`\chi_{\bar{a}b\bar{c}d}(i\omega_{n=\mathrm{nw\_index}}, \mathbf{k})`.

    chi0_k : Gf,
             Generalized bare lattice susceptibility 
             :math:`\chi^0_{\bar{a}b\bar{c}d}(i\omega_{n=\mathrm{nw\_index}}, \mathbf{k})`.
    """

    # Only use \Gamma at the specific \omega
    gamma_nn = gamma_wnn[Idx(nw_index), :, :]
    # Keep fake bosonic mesh for usability with other functions
    gamma_wnn = add_fake_bosonic_mesh(gamma_nn)

    fmesh_g = g_wk.mesh.components[0]
    kmesh = g_wk.mesh.components[1]

    bmesh = gamma_wnn.mesh.components[0]
    fmesh = gamma_wnn.mesh.components[1]

    nk = len(kmesh)
    nwf = len(fmesh) // 2
    nwf_g = len(fmesh_g) // 2

    if mpi.is_master_node():
        print(tprf_banner(), "\n")
        print(
            'Lattcie BSE with local vertex approximation at specific \omega.\n'
        )
        print('nk    =', nk)
        print('nw_index    =', nw_index)
        print('nwf   =', nwf)
        print('nwf_g =', nwf_g)
        print()

    mpi.report('--> chi0_wk_tail_corr')
    # Calculate chi0_wk up to the specific \omega
    chi0_wk_tail_corr = imtime_bubble_chi0_wk(g_wk,
                                              nw=np.abs(nw_index) + 1,
                                              save_memory=True)
    # Only use specific \omega, but put back on fake bosonic mesh
    chi0_k_tail_corr = chi0_wk_tail_corr[Idx(nw_index), :]
    chi0_wk_tail_corr = add_fake_bosonic_mesh(chi0_k_tail_corr,
                                              beta=bmesh.beta)

    chi0_nk = get_chi0_nk_at_specific_w(g_wk, nw_index=nw_index, nwf=nwf)
    # Keep fake bosonic mesh for usability with other functions
    chi0_wnk = add_fake_bosonic_mesh(chi0_nk)

    mpi.report('--> trace chi0_wnk')
    chi0_wk = chi0q_sum_nu(chi0_wnk)

    dchi_wk = chi0_wk_tail_corr - chi0_wk

    chi0_kw = Gf(mesh=MeshProduct(kmesh, bmesh),
                 target_shape=chi0_wk_tail_corr.target_shape)
    chi0_kw.data[:] = chi0_wk_tail_corr.data.swapaxes(0, 1)

    del chi0_wk
    del chi0_wk_tail_corr

    assert (chi0_wnk.mesh.components[0] == bmesh)
    assert (chi0_wnk.mesh.components[1] == fmesh)
    assert (chi0_wnk.mesh.components[2] == kmesh)

    # -- Lattice BSE calc with built in trace
    mpi.report('--> chi_kw from BSE')
    #mpi.report('DEBUG BSE INACTIVE'*72)
    chi_kw = chiq_sum_nu_from_chi0q_and_gamma_PH(chi0_wnk, gamma_wnn)
    #chi_kw = chi0_kw.copy()

    mpi.barrier()
    mpi.report('--> chi_kw from BSE (done)')

    del chi0_wnk

    mpi.report('--> chi_kw tail corrected (using chi0_wnk)')
    for k in kmesh:
        chi_kw[
            k, :] += dchi_wk[:,
                             k]  # -- account for high freq of chi_0 (better than nothing)

    del dchi_wk

    mpi.report('--> solve_lattice_bse, done.')

    chi_k = chi_kw[:, Idx(0)]
    del chi_kw

    chi0_k = chi0_kw[:, Idx(0)]
    del chi0_kw

    return chi_k, chi0_k
Пример #14
0
dup = delta_in['up_0'].copy()
ddn = delta_in['down_0'].copy()
Delta = BlockGf(name_list=('up', 'dn'),
                block_list=(dup, ddn),
                make_copies=False)

# set off diag to zero
if hyb_diag == True:
    for block, hyb_blocks in Delta:
        shp = hyb_blocks.target_shape
        for i_orb in range(0, shp[0]):
            for j_orb in range(0, shp[1]):
                if i_orb != j_orb:
                    hyb_blocks[i_orb, j_orb] << 0.0 + 0.0j

mpi.report('\ndiscretizing bath...')

### create bath object
bath = DiscretizeBath(Delta=Delta, NBath=Nb, gap=bath_gap)

# discretize bath
DeltaDiscrete = bath.reconstructDelta(w=ddn.mesh, eta=eta)

### setup Hloc_0
#give the local Hamiltonian the right block structure
Hloc_0 = ftps.solver_core.Hloc(MakeGFstruct(Delta))

if dc:
    mpi.report('impurity occupation for DC determination: ' +
               "{:1.4f}".format(dc_N))
    mpi.report('calculating DC:')
Пример #15
0
Файл: bse.py Проект: TRIQS/tprf
def solve_lattice_bse(g_wk, gamma_wnn):
    r""" Compute the generalized lattice susceptibility 
    :math:`\chi_{\bar{a}b\bar{c}d}(\mathbf{k}, \omega_n)` using the Bethe-Salpeter 
    equation (BSE).

    Parameters
    ----------

    g_wk : Gf,
           Single-particle Green's function :math:`G_{a\bar{b}}(i\nu_n, \mathbf{k})`.
    gamma_wnn : Gf,
                Local particle-hole vertex function 
                :math:`\Gamma_{a\bar{b}c\bar{d}}(i\omega_n, i\nu_n, i\nu_n')`.

    Returns
    -------
    chi_kw : Gf,
             Generalized lattice susceptibility 
             :math:`\chi_{\bar{a}b\bar{c}d}(\mathbf{k}, i\omega_n)`.

    chi0_kw : Gf,
              Generalized bare lattice susceptibility 
              :math:`\chi^0_{\bar{a}b\bar{c}d}(\mathbf{k}, i\omega_n)`.
    """

    fmesh_g = g_wk.mesh.components[0]
    kmesh = g_wk.mesh.components[1]

    bmesh = gamma_wnn.mesh.components[0]
    fmesh = gamma_wnn.mesh.components[1]

    nk = len(kmesh)
    nw = (len(bmesh) + 1) // 2
    nwf = len(fmesh) // 2
    nwf_g = len(fmesh_g) // 2

    if mpi.is_master_node():
        print(tprf_banner(), "\n")
        print('Lattcie BSE with local vertex approximation.\n')
        print('nk    =', nk)
        print('nw    =', nw)
        print('nwf   =', nwf)
        print('nwf_g =', nwf_g)
        print()

    mpi.report('--> chi0_wk_tail_corr')
    chi0_wk_tail_corr = imtime_bubble_chi0_wk(g_wk, nw=nw)

    mpi.barrier()
    mpi.report('B1 ' +
               str(chi0_wk_tail_corr[Idx(0), Idx(0, 0, 0)][0, 0, 0, 0]))
    mpi.barrier()

    chi0_wnk = get_chi0_wnk(g_wk, nw=nw, nwf=nwf)

    mpi.barrier()
    mpi.report('C ' + str(chi0_wnk[Idx(0), Idx(0), Idx(0, 0, 0)][0, 0, 0, 0]))
    mpi.barrier()

    mpi.report('--> trace chi0_wnk')
    chi0_wk = chi0q_sum_nu(chi0_wnk)

    mpi.barrier()
    mpi.report('D ' + str(chi0_wk[Idx(0), Idx(0, 0, 0)][0, 0, 0, 0]))
    mpi.barrier()

    dchi_wk = chi0_wk_tail_corr - chi0_wk

    chi0_kw = Gf(mesh=MeshProduct(kmesh, bmesh),
                 target_shape=chi0_wk_tail_corr.target_shape)
    chi0_kw.data[:] = chi0_wk_tail_corr.data.swapaxes(0, 1)

    del chi0_wk
    del chi0_wk_tail_corr

    assert (chi0_wnk.mesh.components[0] == bmesh)
    assert (chi0_wnk.mesh.components[1] == fmesh)
    assert (chi0_wnk.mesh.components[2] == kmesh)

    # -- Lattice BSE calc with built in trace
    mpi.report('--> chi_kw from BSE')
    #mpi.report('DEBUG BSE INACTIVE'*72)
    chi_kw = chiq_sum_nu_from_chi0q_and_gamma_PH(chi0_wnk, gamma_wnn)
    #chi_kw = chi0_kw.copy()

    mpi.barrier()
    mpi.report('--> chi_kw from BSE (done)')

    del chi0_wnk

    mpi.report('--> chi_kw tail corrected (using chi0_wnk)')
    for k in kmesh:
        chi_kw[
            k, :] += dchi_wk[:,
                             k]  # -- account for high freq of chi_0 (better than nothing)

    del dchi_wk

    mpi.report('--> solve_lattice_bse, done.')

    return chi_kw, chi0_kw
Пример #16
0
def imtime_bubble_chi0_wk(g_wk, nw=1, save_memory=False):
    ncores = multiprocessing.cpu_count()

    wmesh, kmesh = g_wk.mesh.components

    norb = g_wk.target_shape[0]
    beta = wmesh.beta
    nw_g = len(wmesh)
    nk = len(kmesh)

    ntau = 2 * nw_g

    # -- Memory Approximation

    ng_tr = ntau * np.prod(nk) * norb**2  # storing G(tau, r)
    ng_wr = nw_g * np.prod(nk) * norb**2  # storing G(w, r)
    ng_t = ntau * norb**2  # storing G(tau)

    nchi_tr = ntau * np.prod(nk) * norb**4  # storing \chi(tau, r)
    nchi_wr = nw * np.prod(nk) * norb**4  # storing \chi(w, r)
    nchi_t = ntau * norb**4  # storing \chi(tau)
    nchi_w = nw * norb**4  # storing \chi(w)
    nchi_r = np.prod(nk) * norb**4  # storing \chi(r)

    if nw == 1:
        ntot_case_1 = ng_tr + ng_wr
        ntot_case_2 = ng_tr + nchi_wr + ncores * (nchi_t + 2 * ng_t)
        ntot_case_3 = 4 * nchi_wr

        ntot = max(ntot_case_1, ntot_case_2, ntot_case_3)

    else:
        ntot_case_1 = ng_tr + nchi_tr + ncores * (nchi_t + 2 * ng_t)
        ntot_case_2 = nchi_tr + nchi_wr + ncores * (nchi_w + nchi_t)

        ntot = max(ntot_case_1, ntot_case_2)

    nbytes = ntot * np.complex128().nbytes
    ngb = nbytes / 1024.**3

    if mpi.is_master_node():
        print(tprf_banner(), "\n")
        print('beta  =', beta)
        print('nk    =', nk)
        print('nw    =', nw_g)
        print('norb  =', norb)
        print()
        print('Approx. Memory Utilization: %2.2f GB\n' % ngb)

    mpi.report('--> fourier_wk_to_wr')
    g_wr = fourier_wk_to_wr(g_wk)
    del g_wk

    mpi.report('--> fourier_wr_to_tr')
    g_tr = fourier_wr_to_tr(g_wr)
    del g_wr

    if nw == 1:
        mpi.report('--> chi0_w0r_from_grt_PH (bubble in tau & r)')
        chi0_wr = chi0_w0r_from_grt_PH(g_tr)
        del g_tr
    else:
        if not save_memory:
            mpi.report('--> chi0_tr_from_grt_PH (bubble in tau & r)')
            chi0_tr = chi0_tr_from_grt_PH(g_tr)
            del g_tr

            mpi.report('--> chi_wr_from_chi_tr')
            chi0_wr = chi_wr_from_chi_tr(chi0_tr, nw=nw)
            del chi0_tr
        elif save_memory:
            chi0_wr = chi0_wr_from_grt_PH(g_tr, nw=nw)

    mpi.report('--> chi_wk_from_chi_wr (r->k)')
    chi0_wk = chi_wk_from_chi_wr(chi0_wr)
    del chi0_wr

    return chi0_wk
Пример #17
0
Файл: bse.py Проект: TRIQS/tprf
def get_chi0_wnk(g_wk, nw=1, nwf=None):
    r""" Compute the generalized bare lattice susceptibility 
    :math:`\chi^{0}_{\bar{a}b\bar{c}d}(i\omega_n, i\nu_n, \mathbf{k})` from the single-particle
    Green's function :math:`G_{a\bar{b}}(i\nu_n, \mathbf{k})`.

    Parameters
    ----------

    g_wk : Gf,
           Single-particle Green's function :math:`G_{a\bar{b}}(i\nu_n, \mathbf{k})`.
    nw : int,
         Number of bosonic frequencies in :math:`\chi^0`.
    nwf : int,
          Number of fermionic frequencies in :math:`\chi^0`.    

    Returns
    -------

    chi0_wnk : Gf,
               Generalized bare lattice susceptibility
               :math:`\chi^{0}_{\bar{a}b\bar{c}d}(i\omega_n, i\nu_n, \mathbf{k})`.
    """

    fmesh = g_wk.mesh.components[0]
    kmesh = g_wk.mesh.components[1]

    if nwf is None:
        nwf = len(fmesh) // 2

    mpi.barrier()
    mpi.report('g_wk ' + str(g_wk[Idx(2), Idx(0, 0, 0)][0, 0]))
    n = np.sum(g_wk.data) / len(kmesh)
    mpi.report('n ' + str(n))
    mpi.barrier()

    mpi.report('--> g_wr from g_wk')
    g_wr = fourier_wk_to_wr(g_wk)

    mpi.barrier()
    mpi.report('g_wr ' + str(g_wr[Idx(2), Idx(0, 0, 0)][0, 0]))
    n_r = np.sum(g_wr.data, axis=0)[0]
    mpi.report('n_r=0 ' + str(n_r[0, 0]))
    mpi.barrier()

    mpi.report('--> chi0_wnr from g_wr')
    chi0_wnr = chi0r_from_gr_PH(nw=nw, nn=nwf, g_nr=g_wr)

    #mpi.report('--> chi0_wnr from g_wr (nompi)')
    #chi0_wnr_nompi = chi0r_from_gr_PH_nompi(nw=nw, nn=nwf, g_wr=g_wr)

    del g_wr

    #abs_diff = np.abs(chi0_wnr.data - chi0_wnr_nompi.data)
    #mpi.report('shape = ' + str(abs_diff.shape))
    #idx = np.argmax(abs_diff)
    #mpi.report('argmax = ' + str(idx))
    #diff = np.max(abs_diff)
    #mpi.report('diff = %6.6f' % diff)
    #del chi0_wnr
    #chi0_wnr = chi0_wnr_nompi

    #exit()

    mpi.barrier()
    mpi.report('chi0_wnr ' +
               str(chi0_wnr[Idx(0), Idx(0), Idx(0, 0, 0)][0, 0, 0, 0]))
    chi0_r0 = np.sum(chi0_wnr[:, :, Idx(0, 0, 0)].data)
    mpi.report('chi0_r0 ' + str(chi0_r0))
    mpi.barrier()

    mpi.report('--> chi0_wnk from chi0_wnr')
    chi0_wnk = chi0q_from_chi0r(chi0_wnr)

    del chi0_wnr

    mpi.barrier()
    mpi.report('chi0_wnk ' +
               str(chi0_wnk[Idx(0), Idx(0), Idx(0, 0, 0)][0, 0, 0, 0]))
    chi0 = np.sum(chi0_wnk.data) / len(kmesh)
    mpi.report('chi0 = ' + str(chi0))
    mpi.barrier()

    #if mpi.is_master_node():
    if False:
        from triqs_tprf.ParameterCollection import ParameterCollection
        p = ParameterCollection()
        p.g_wk = g_wk
        p.g_wr = g_wr
        p.chi0_wnr = chi0_wnr
        p.chi0_wnk = chi0_wnk

        print('--> Writing debug info for BSE')
        with HDFArchive('data_debug_bse.h5', 'w') as arch:
            arch['p'] = p

    mpi.barrier()

    return chi0_wnk
Пример #18
0
warnings.filterwarnings("ignore", category=FutureWarning)

filename = 'nio'

SK = SumkDFT(hdf_file=filename + '.h5', use_dft_blocks=False)

beta = 5.0

Sigma = SK.block_structure.create_gf(beta=beta)
SK.put_Sigma([Sigma])
G = SK.extract_G_loc()
SK.analyse_block_structure_from_gf(G, threshold=1e-3)
for i_sh in range(len(SK.deg_shells)):
    num_block_deg_orbs = len(SK.deg_shells[i_sh])
    mpi.report(
        'found {0:d} blocks of degenerate orbitals in shell {1:d}'.format(
            num_block_deg_orbs, i_sh))
    for iblock in range(num_block_deg_orbs):
        mpi.report('block {0:d} consists of orbitals:'.format(iblock))
        for keys in list(SK.deg_shells[i_sh][iblock].keys()):
            mpi.report('  ' + keys)

# Setup CTQMC Solver

n_orb = SK.corr_shells[0]['dim']
spin_names = ['up', 'down']
orb_names = [i for i in range(0, n_orb)]

#gf_struct = set_operator_structure(spin_names, orb_names, orb_hyb)
gf_struct = SK.gf_struct_solver[0]
mpi.report('Sumk to Solver: %s' % SK.sumk_to_solver)
Пример #19
0
p["fit_max_w"] = 8.0

# double counting correction:
dc_type = 0  # FLL
# DMFT loops:
n_loops = 1

#for first iteration, add the outout group:
if mpi.is_master_node():
    with HDFArchive(filename) as ar:
        if (not ar.is_group('dmft_output')):
            ar.create_group('dmft_output')

for iteration_number in range(1,n_loops+1):

    mpi.report("Iteration = %s"%iteration_number)
    
    SK.set_Sigma([ S.Sigma_iw ])                    # put Sigma into the SumK class
    chemical_potential = SK.calc_mu( precision = 0.01 )  # find the chemical potential for given density
    S.G_iw << SK.extract_G_loc()[0]

    if (iteration_number==1):
        # Put Hartree energy on Re Sigma
        dm = S.G_iw.density()
        SK.calc_dc(dm, U_interact = U, J_hund = J, orb = 0, use_dc_formula = dc_type)
        S.Sigma_iw << SK.block_structure.convert_matrix(SK.dc_imp[0],space_from='sumk',space_to='solver')['ud_0'][0,0]

    mpi.report("Orbital densities of local Green function :")
    for s,gf in S.G_iw:
        mpi.report("Orbital %s: %s"%(s,dm[s].real))
    mpi.report("Total charge of Gloc : %.6f"%S.G_iw.total_density().real)
Пример #20
0
sum_k.corr_to_inequiv = corr_to_inequiv
sum_k.rot_mat = rot_mat_den

# set an empty sigma into sumk
broadening = 0.01
Sigma = sum_k.block_structure.create_gf(gf_function=GfReFreq,
                                        window=(-10, 10),
                                        n_points=5001)
sum_k.put_Sigma([Sigma])

# orb name def
n_orb = sum_k.corr_shells[0]['dim']
orb_names = list(range(n_orb))

###
mpi.report('######################\n')
mpi.report('Sum_k setup okay - move it')

# check for previous calculation
things_to_load = ['Gloc', 'Hloc_0', 'Hint', 'atom_diag', 'eigensys']
if os.path.exists(store_h5_file):
    with HDFArchive(store_h5_file, 'r') as ar:
        for elem in things_to_load:
            if elem in ar:
                mpi.report('loading ' + elem + ' from archive ' +
                           store_h5_file)
                vars()[elem] = ar[elem]

# extract
# if not 'Gloc' in locals():
# mpi.report('extracting Gloc')
Пример #21
0
def dmft_cycle():
    filename = 'nio'
    
    Converter = VaspConverter(filename=filename)
    Converter.convert_dft_input()
    
    SK = SumkDFT(hdf_file = filename+'.h5', use_dft_blocks = False)
    
    beta = 5.0 
     
    Sigma = SK.block_structure.create_gf(beta=beta)
    SK.put_Sigma([Sigma])
    G = SK.extract_G_loc()
    SK.analyse_block_structure_from_gf(G, threshold = 1e-2)
    for i_sh in range(len(SK.deg_shells)):
        num_block_deg_orbs = len(SK.deg_shells[i_sh])
        mpi.report('found {0:d} blocks of degenerate orbitals in shell {1:d}'.format(num_block_deg_orbs, i_sh))
        for iblock in range(num_block_deg_orbs):
            mpi.report('block {0:d} consists of orbitals:'.format(iblock))
            for keys in list(SK.deg_shells[i_sh][iblock].keys()):
                mpi.report('  '+keys)
    
    # Setup CTQMC Solver
    
    n_orb = SK.corr_shells[0]['dim']
    spin_names = ['up','down']
    orb_names = [i for i in range(0,n_orb)]
    
    #gf_struct = set_operator_structure(spin_names, orb_names, orb_hyb)
    gf_struct = SK.gf_struct_solver[0]
    mpi.report('Sumk to Solver: %s'%SK.sumk_to_solver)
    mpi.report('GF struct sumk: %s'%SK.gf_struct_sumk)
    mpi.report('GF struct solver: %s'%SK.gf_struct_solver)
    
    S = Solver(beta=beta, gf_struct=gf_struct)
    
    # Construct the Hamiltonian and save it in Hamiltonian_store.txt
    H = Operator() 
    U = 8.0
    J = 1.0
    
    
    U_sph = U_matrix(l=2, U_int=U, J_hund=J)
    U_cubic = transform_U_matrix(U_sph, spherical_to_cubic(l=2, convention=''))
    Umat, Upmat = reduce_4index_to_2index(U_cubic)
    
    H = h_int_density(spin_names, orb_names, map_operator_structure=SK.sumk_to_solver[0], U=Umat, Uprime=Upmat)
    
    # Print some information on the master node
    mpi.report('Greens function structure is: %s '%gf_struct)
    mpi.report('U Matrix set to:\n%s'%Umat)
    mpi.report('Up Matrix set to:\n%s'%Upmat)
    
    # Parameters for the CTQMC Solver
    p = {}
    p["max_time"] = -1
    p["random_name"] = ""
    p["random_seed"] = 123 * mpi.rank + 567
    p["length_cycle"] = 100
    p["n_warmup_cycles"] = 2000
    p["n_cycles"] = 20000
    p["fit_max_moment"] = 4
    p["fit_min_n"] = 30
    p["fit_max_n"] = 50
    p["perform_tail_fit"] = True
    
    # Double Counting: 0 FLL, 1 Held, 2 AMF
    DC_type = 0
    DC_value = 59.0
    
    # Prepare hdf file and and check for previous iterations
    n_iterations = 1
    
    iteration_offset = 0
    if mpi.is_master_node():
        ar = HDFArchive(filename+'.h5','a')
        if not 'DMFT_results' in ar: ar.create_group('DMFT_results')
        if not 'Iterations' in ar['DMFT_results']: ar['DMFT_results'].create_group('Iterations')
        if not 'DMFT_input' in ar: ar.create_group('DMFT_input')
        if not 'Iterations' in ar['DMFT_input']: ar['DMFT_input'].create_group('Iterations')
        if not 'code_versions' in ar['DMFT_input']: ar['DMFT_input'].create_group('code_versio\
    ns')
        ar['DMFT_input']['code_versions']["triqs_version"] = triqs_version.version
        ar['DMFT_input']['code_versions']["triqs_git"] = triqs_version.git_hash
        ar['DMFT_input']['code_versions']["cthyb_version"] = cthyb_version.version
        ar['DMFT_input']['code_versions']["cthyb_git"] = cthyb_version.triqs_cthyb_hash
        ar['DMFT_input']['code_versions']["dft_tools_version"] = dft_tools_version.version
        ar['DMFT_input']['code_versions']["dft_tools_git"] = dft_tools_version.triqs_dft_tools_hash
        if 'iteration_count' in ar['DMFT_results']:
            iteration_offset = ar['DMFT_results']['iteration_count']+1
            S.Sigma_iw = ar['DMFT_results']['Iterations']['Sigma_it'+str(iteration_offset-1)]
            SK.dc_imp = ar['DMFT_results']['Iterations']['dc_imp'+str(iteration_offset-1)]
            SK.dc_energ = ar['DMFT_results']['Iterations']['dc_energ'+str(iteration_offset-1)]
            SK.chemical_potential = ar['DMFT_results']['Iterations']['chemical_potential'+str(iteration_offset-1)].real
        ar['DMFT_input']["dmft_script_it"+str(iteration_offset)] = open(sys.argv[0]).read()
    iteration_offset = mpi.bcast(iteration_offset)
    S.Sigma_iw = mpi.bcast(S.Sigma_iw)
    SK.dc_imp = mpi.bcast(SK.dc_imp)
    SK.dc_energ = mpi.bcast(SK.dc_energ)
    SK.chemical_potential = mpi.bcast(SK.chemical_potential)
    
    # Calc the first G0
    SK.symm_deg_gf(S.Sigma_iw, ish=0)
    SK.put_Sigma(Sigma_imp = [S.Sigma_iw])
    SK.calc_mu(precision=0.01)
    S.G_iw << SK.extract_G_loc()[0]
    SK.symm_deg_gf(S.G_iw, ish=0)
    
    #Init the DC term and the self-energy if no previous iteration was found
    if iteration_offset == 0:
        dm = S.G_iw.density()
        SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0, use_dc_formula=DC_type,use_dc_value=DC_value)
        S.Sigma_iw << SK.dc_imp[0]['up'][0,0]
    
    mpi.report('%s DMFT cycles requested. Starting with iteration %s.'%(n_iterations,iteration_offset))
    
    
    
    # The infamous DMFT self consistency cycle
    for it in range(iteration_offset, iteration_offset + n_iterations):
        mpi.report('Doing iteration: %s'%it)
        
        # Get G0
        S.G0_iw << inverse(S.Sigma_iw + inverse(S.G_iw))
        # Solve the impurity problem
        S.solve(h_int = H, **p)
        if mpi.is_master_node(): 
            ar['DMFT_input']['Iterations']['solver_dict_it'+str(it)] = p
            ar['DMFT_results']['Iterations']['Gimp_it'+str(it)] = S.G_iw
            ar['DMFT_results']['Iterations']['Gtau_it'+str(it)] = S.G_tau
            ar['DMFT_results']['Iterations']['Sigma_uns_it'+str(it)] = S.Sigma_iw
        # Calculate double counting
        dm = S.G_iw.density()
        SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0, use_dc_formula=DC_type,use_dc_value=DC_value)
        # Get new G
        SK.symm_deg_gf(S.Sigma_iw, ish=0)
        SK.put_Sigma(Sigma_imp=[S.Sigma_iw])
        SK.calc_mu(precision=0.01)
        S.G_iw << SK.extract_G_loc()[0]
        
        # print densities
        for sig,gf in S.G_iw:
            mpi.report("Orbital %s density: %.6f"%(sig,dm[sig][0,0]))
        mpi.report('Total charge of Gloc : %.6f'%S.G_iw.total_density())
    
        if mpi.is_master_node(): 
            ar['DMFT_results']['iteration_count'] = it
            ar['DMFT_results']['Iterations']['Sigma_it'+str(it)] = S.Sigma_iw
            ar['DMFT_results']['Iterations']['Gloc_it'+str(it)] = S.G_iw
            ar['DMFT_results']['Iterations']['G0loc_it'+str(it)] = S.G0_iw
            ar['DMFT_results']['Iterations']['dc_imp'+str(it)] = SK.dc_imp
            ar['DMFT_results']['Iterations']['dc_energ'+str(it)] = SK.dc_energ
            ar['DMFT_results']['Iterations']['chemical_potential'+str(it)] = SK.chemical_potential
    
    
    
    
    if mpi.is_master_node():
        print('calculating mu...')
    SK.chemical_potential = SK.calc_mu( precision = 0.000001 )
    
    if mpi.is_master_node():
        print('calculating GAMMA')
    SK.calc_density_correction(dm_type='vasp')
    
    if mpi.is_master_node():
        print('calculating energy corrections')
    
    correnerg = 0.5 * (S.G_iw * S.Sigma_iw).total_density()
    
    dm = S.G_iw.density() # compute the density matrix of the impurity problem
    SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0, use_dc_formula=DC_type,use_dc_value=DC_value)
    dc_energ = SK.dc_energ[0]
    
    if mpi.is_master_node(): 
        ar['DMFT_results']['Iterations']['corr_energy_it'+str(it)] = correnerg
        ar['DMFT_results']['Iterations']['dc_energy_it'+str(it)] = dc_energ
    
    if mpi.is_master_node(): del ar
        
    return correnerg, dc_energ