예제 #1
0
파일: solver.py 프로젝트: tayral/triqs_0.x
    def update_params(self, d):

      allparams = [
        ('QMC_N_cycles_MAX', 'N_Cycles'),
        ('NCycles', 'N_Cycles'),
        ('Hloc', 'H_Local'),
        ('QuantumNumbers', 'Quantum_Numbers'),
        ('Length_One_QMC_Cycle', 'Length_Cycle'),
        ('Number_Warming_Iteration', 'N_Warmup_Cycles'),
        ('Number_Frequencies_Accumulated', 'N_Frequencies_Accumulated'),
        ('Global_Move', 'Global_Moves'),
        ('UseSegmentPicture', 'Use_Segment_Picture'),
        ('Proba_Move_Insert_Remove_Kink', 'Proba_Insert_Remove'),
        ('Proba_Move_Move_Kink', 'Proba_Move'),
        ('OperatorsToAverage', 'Measured_Operators'),
        ('OpCorrToAverage', 'Measured_Time_Correlators'),
        ('KeepGF_MC_series', 'Keep_Full_MC_Series'),
        ('DecorrelationAnalysisG_NFreq', 'Decorrelation_Analysis_G_NFreq'),
        ('RecordStatisticConfigurations', 'Record_Statistics_Configurations')
      ]

      issue_warning = False
      for (old, new) in allparams:
        if old in d:
          val = d.pop(old)
          d.update({new:val})
          issue_warning = True

      msg = """
**********************************************************************************
 Warning: some parameters you used have a different name now and will be
 deprecated in future versions. Please check the documentation.
**********************************************************************************
"""
      if issue_warning: mpi.report(msg)
예제 #2
0
    def __repack(self):
        """Calls the h5repack routine, in order to reduce the file size of the hdf5 archive.
           Should only be used BEFORE the first invokation of HDFArchive in the program, otherwise
           the hdf5 linking is broken!!!"""

        import subprocess

        if not (mpi.is_master_node()): return

        mpi.report("Repacking the file %s"%self.hdf_file)

        retcode = subprocess.call(["h5repack","-i%s"%self.hdf_file, "-otemphgfrt.h5"])
        if (retcode!=0):
            mpi.report("h5repack failed!")
        else:
            subprocess.call(["mv","-f","temphgfrt.h5","%s"%self.hdf_file])
예제 #3
0
   def Self_Consistency(self) :
      S.Transform_SymmetryBasis_toRealSpace (IN= S.Sigma, OUT = Sigma) # Embedding

      # Computes sum over BZ and returns density
      F = lambda mu : SK(mu = mu, Sigma = Sigma, field = None , result = G).total_density()/4

      if Density_Required :
         self.Chemical_potential = dichotomy.dichotomy(function = F,
                                                       x_init = self.Chemical_potential, y_value =Density_Required,
                                                       precision_on_y = 0.01, delta_x=0.5,  max_loops = 100,
                                                       x_name="Chemical_Potential", y_name= "Total Density",
                                                       verbosity = 3)[0]
      else:
         mpi.report("Total density  = %.3f"%F(self.Chemical_potential))

      S.Transform_RealSpace_to_SymmetryBasis (IN = G, OUT = S.G)       # Extraction
      S.G0 = inverse(S.Sigma + inverse(S.G))                           # Finally get S.G0
예제 #4
0
  def run(self, n_loops, mixing_coeff = 0.5, max_time = 0 ):
    r"""
      Run the DMFT Loop with the following algorithm :: 
       
        while STOP_CONDITION : 
            self.Self_Consistency()
            for solver in solver_list : S.solve()
            self.post_solver() # defaults : does nothing

      where STOP_CONDITION is determined by the number of iterations.
        
      :param n_loops:    Maximum number of iteration of the loop
      :param mixing_coeff: 
      :param max_time: Maximum time of the loop.
    """

    # Set up the signal
    #   mpi.report("DMFTlab Job PID = %s"%os.getpid())
    # Set the signal handler and a 5-second alarm
    signal.signal(signal.SIGALRM, self.handler)
    signal.alarm(max_time)
 
    should_continue = True
    
    while (should_continue):
      mpi.report("------ Node : %d -------- Iteration Number = %d"%(mpi.rank,self.Iteration_Number))
      
      self.Self_Consistency()

      # call all solvers
      for n,sol in enumerate(self.SolverList) :
        if hasattr(self,"Chemical_potential") : sol.Chemical_potential=self.Chemical_potential
        sol.Iteration_Number=self.Iteration_Number
        sol.Solve()
        sol.Sigma  = sol.Sigma * mixing_coeff + sol.Sigma_Old * (1-mixing_coeff)
      
      # post-solver processing
      self.post_solver()
                         
      self.Iteration_Number +=1
      should_continue = self.__should_continue(n_loops)
 
    # end of the while loop
    mpi.report("----------- END of DMFT_Loop ----------------")
    mpi.barrier()
예제 #5
0
    def fit_tails(self): 
	"""Fits the tails using the constant value for the Re Sigma calculated from F=Sigma*G.
           Works only for blocks of size one."""
	
	#if (len(self.gf_struct)==2*self.n_orb):
        if (self.blocssizeone):
            spinblocs = [v for v in self.map]
            mpi.report("Fitting tails manually")
	
            known_coeff = numpy.zeros([1,1,2],numpy.float_)
            msh = [x.imag for x in self.G[self.map[spinblocs[0]][0]].mesh ]
            fit_start = msh[self.fitting_Frequency_Start]
            fit_stop = msh[self.N_Frequencies_Accumulated]	
            
            # Fit the tail of G just to get the density
            for n,g in self.G:
                g.fitTail([[[0,0,1]]],7,fit_start,2*fit_stop) 
            densmat = self.G.density()

            for sig1 in spinblocs:
                for i in range(self.n_orb):

                    coeff = 0.0

                    for sig2 in spinblocs:
                        for j in range(self.n_orb):
                            if (sig1==sig2):
                                coeff += self.U[self.offset+i,self.offset+j] * densmat[self.map[sig1][j]][0,0].real
                            else:
                                coeff += self.Up[self.offset+i,self.offset+j] * densmat[self.map[sig2][j]][0,0].real

                    known_coeff[0,0,1] = coeff
                    self.Sigma[self.map[sig1][i]].fitTail(fixed_coef = known_coeff, order_max = 3, fit_start = fit_start, fit_stop = fit_stop)

        else:

            for n,sig in self.Sigma:

                known_coeff = numpy.zeros([sig.N1,sig.N2,1],numpy.float_)
                msh = [x.imag for x in sig.mesh]
                fit_start = msh[self.fitting_Frequency_Start]
                fit_stop  = msh[self.N_Frequencies_Accumulated]
            
                sig.fitTail(fixed_coef = known_coeff, order_max = 3, fit_start = fit_start, fit_stop = fit_stop)
예제 #6
0
    def read_input_from_hdf(self, subgrp, things_to_read, optional_things=[]):
        """
        Reads data from the HDF file
        """
        
        retval = True
        # init variables on all nodes:
        for it in things_to_read: exec "self.%s = 0"%it
        for it in optional_things: exec "self.%s = 0"%it
        
        if (mpi.is_master_node()):
            ar=HDFArchive(self.hdf_file,'a')
            if (subgrp in ar):
                # first read the necessary things:
                for it in things_to_read:
                    if (it in ar[subgrp]):
                        exec "self.%s = ar['%s']['%s']"%(it,subgrp,it)
                    else:
                        mpi.report("Loading %s failed!"%it)
                        retval = False
                   
                if ((retval) and (len(optional_things)>0)):
                    # if necessary things worked, now read optional things:
                    retval = {}
                    for it in optional_things:
                        if (it in ar[subgrp]):
                            exec "self.%s = ar['%s']['%s']"%(it,subgrp,it)
                            retval['%s'%it] = True
                        else:
                            retval['%s'%it] = False
            else:
                mpi.report("Loading failed: No %s subgroup in HDF5!"%subgrp)
                retval = False

            del ar

        # now do the broadcasting:
        for it in things_to_read: exec "self.%s = mpi.bcast(self.%s)"%(it,it)
        for it in optional_things: exec "self.%s = mpi.bcast(self.%s)"%(it,it)
        

        retval = mpi.bcast(retval)
               
        return retval
예제 #7
0
def set_U_matrix(U_interact,J_hund,n_orb,l,use_matrix=True,T=None,sl_int=None,use_spinflip=False,dim_reps=None,irep=None):
    """ Set up the interaction vertex""" 

    offset = 0
    U4ind = None
    U = None
    Up = None
    if (use_matrix):
        if not (sl_int is None):
            Umat = Umatrix(l=l)
            assert len(sl_int)==(l+1),"sl_int has the wrong length"
            if (type(sl_int)==ListType):
                Rcl = numpy.array(sl_int)
            else:
                Rcl = sl_int
            Umat(T=T,Rcl=Rcl)
        else:
            if ((U_interact==None)and(J_hund==None)):
                mpi.report("Give U,J or Slater integrals!!!")
                assert 0
            Umat = Umatrix(U_interact=U_interact, J_hund=J_hund, l=l)
            Umat(T=T)
            
        Umat.reduce_matrix()
        if (Umat.N==Umat.Nmat):
            # Transformation T is of size 2l+1
            U = Umat.U
            Up = Umat.Up
        else:
            # Transformation is of size 2(2l+1)
            U = Umat.U
         # now we have the reduced matrices U and Up, we need it for tail fitting anyways

        if (use_spinflip):
            #Take the 4index Umatrix
            # check for imaginary matrix elements:
            if (abs(Umat.Ufull.imag)>0.0001).any():
                mpi.report("WARNING: complex interaction matrix!! Ignoring imaginary part for the moment!")
                mpi.report("If you want to change this, look into Wien2k/solver_multiband.py")
            U4ind = Umat.Ufull.real
    
        # this will be changed for arbitrary irep:
        # use only one subgroup of orbitals?
        if not (irep is None):
            #print irep, dim_reps
            assert not (dim_reps is None), "Dimensions of the representatives are missing!"
            assert n_orb==dim_reps[irep-1],"Dimensions of dimrep and n_orb do not fit!"
            for ii in range(irep-1):
                offset += dim_reps[ii]
    else:
        if ((U_interact==None)and(J_hund==None)):
            mpi.report("For Kanamori representation, give U and J!!")
            assert 0
        U  = numpy.zeros([n_orb,n_orb],numpy.float_)
        Up = numpy.zeros([n_orb,n_orb],numpy.float_)
        for i in range(n_orb):
            for j in range(n_orb):
	        if (i==j):
	            Up[i,i] = U_interact + 2.0*J_hund
	        else:
	       	    Up[i,j] = U_interact
		    U[i,j]  = U_interact - J_hund

    return U, Up, U4ind, offset
예제 #8
0
    def convert_dmft_input(self):
        """
        Reads the input files, and stores the data in the HDFfile
        """
        
                   
        if not (mpi.is_master_node()): return # do it only on master:
        mpi.report("Reading input from %s..."%self.lda_file)

        # Read and write only on Master!!!
        # R is a generator : each R.Next() will return the next number in the file
        R = read_fortran_file(self.lda_file)
        try:
            energy_unit = R.next()                         # read the energy convertion factor
            n_k = int(R.next())                            # read the number of k points
            k_dep_projection = 1                          
            SP = int(R.next())                            # flag for spin-polarised calculation
            SO = int(R.next())                            # flag for spin-orbit calculation
            charge_below = R.next()                       # total charge below energy window
            density_required = R.next()                   # total density required, for setting the chemical potential
            symm_op = 1                                   # Use symmetry groups for the k-sum

            # the information on the non-correlated shells is not important here, maybe skip:
            n_shells = int(R.next())                      # number of shells (e.g. Fe d, As p, O p) in the unit cell, 
                                                               # corresponds to index R in formulas
            # now read the information about the shells:
            shells = [ [ int(R.next()) for i in range(4) ] for icrsh in range(n_shells) ]    # reads iatom, sort, l, dim

            n_corr_shells = int(R.next())                 # number of corr. shells (e.g. Fe d, Ce f) in the unit cell, 
                                                          # corresponds to index R in formulas
            # now read the information about the shells:
            corr_shells = [ [ int(R.next()) for i in range(6) ] for icrsh in range(n_corr_shells) ]    # reads iatom, sort, l, dim, SO flag, irep

            self.inequiv_shells(corr_shells)              # determine the number of inequivalent correlated shells, has to be known for further reading...


            use_rotations = 1
            rot_mat = [numpy.identity(corr_shells[icrsh][3],numpy.complex_) for icrsh in xrange(n_corr_shells)]
           
            # read the matrices
            rot_mat_time_inv = [0 for i in range(n_corr_shells)]

            for icrsh in xrange(n_corr_shells):
                for i in xrange(corr_shells[icrsh][3]):    # read real part:
                    for j in xrange(corr_shells[icrsh][3]):
                        rot_mat[icrsh][i,j] = R.next()
                for i in xrange(corr_shells[icrsh][3]):    # read imaginary part:
                    for j in xrange(corr_shells[icrsh][3]):
                        rot_mat[icrsh][i,j] += 1j * R.next()

                if (SP==1):             # read time inversion flag:
                    rot_mat_time_inv[icrsh] = int(R.next())
                    
                  
            
            # Read here the infos for the transformation of the basis:
            n_reps = [1 for i in range(self.n_inequiv_corr_shells)]
            dim_reps = [0 for i in range(self.n_inequiv_corr_shells)]
            T = []
            for icrsh in range(self.n_inequiv_corr_shells):
                n_reps[icrsh] = int(R.next())   # number of representatives ("subsets"), e.g. t2g and eg
                dim_reps[icrsh] = [int(R.next()) for i in range(n_reps[icrsh])]   # dimensions of the subsets
            
            # The transformation matrix:
            # it is of dimension 2l+1, if no SO, and 2*(2l+1) with SO!!
            #T = []
            #for ish in xrange(self.n_inequiv_corr_shells):
                ll = 2*corr_shells[self.invshellmap[icrsh]][2]+1
                lmax = ll * (corr_shells[self.invshellmap[icrsh]][4] + 1)
                T.append(numpy.zeros([lmax,lmax],numpy.complex_))
                
                # now read it from file:
                for i in xrange(lmax):
                    for j in xrange(lmax):
                        T[icrsh][i,j] = R.next()
                for i in xrange(lmax):
                    for j in xrange(lmax):
                        T[icrsh][i,j] += 1j * R.next()

    
            # Spin blocks to be read:
            n_spin_blocs = SP + 1 - SO   # number of spins to read for Norbs and Ham, NOT Projectors
                 
        
            # read the list of n_orbitals for all k points
            n_orbitals = [ [0 for isp in range(n_spin_blocs)] for ik in xrange(n_k)]
            for isp in range(n_spin_blocs):
                for ik in xrange(n_k):
                    n_orbitals[ik][isp] = int(R.next())
            #print n_orbitals

            # Initialise the projectors:
            proj_mat = [ [ [numpy.zeros([corr_shells[icrsh][3], n_orbitals[ik][isp]], numpy.complex_) 
                            for icrsh in range (n_corr_shells)] 
                           for isp in range(n_spin_blocs)] 
                         for ik in range(n_k) ]

            # Read the projectors from the file:
            for ik in xrange(n_k):
                for icrsh in range(n_corr_shells):
                    no = corr_shells[icrsh][3]
                    # first Real part for BOTH spins, due to conventions in dmftproj:
                    for isp in range(n_spin_blocs):
                        for i in xrange(no):
                            for j in xrange(n_orbitals[ik][isp]):
                                proj_mat[ik][isp][icrsh][i,j] = R.next()
                    # now Imag part:
                    for isp in range(n_spin_blocs):
                        for i in xrange(no):
                            for j in xrange(n_orbitals[ik][isp]):
                                proj_mat[ik][isp][icrsh][i,j] += 1j * R.next()
            
          
            # now define the arrays for weights and hopping ...
            bz_weights = numpy.ones([n_k],numpy.float_)/ float(n_k)  # w(k_index),  default normalisation 
            hopping = [ [numpy.zeros([n_orbitals[ik][isp],n_orbitals[ik][isp]],numpy.complex_) 
                         for isp in range(n_spin_blocs)] for ik in xrange(n_k) ]

                            
            # weights in the file
            for ik in xrange(n_k) : bz_weights[ik] = R.next()         
                
            # if the sum over spins is in the weights, take it out again!!
            sm = sum(bz_weights)
            bz_weights[:] /= sm 
	    
            # Grab the H
            # we use now the convention of a DIAGONAL Hamiltonian!!!!
            for isp in range(n_spin_blocs):
                for ik in xrange(n_k) :
                    no = n_orbitals[ik][isp]
                    for i in xrange(no):
                        hopping[ik][isp][i,i] = R.next() * energy_unit
            
            #keep some things that we need for reading parproj:
            self.n_shells = n_shells
            self.shells = shells
            self.n_corr_shells = n_corr_shells
            self.corr_shells = corr_shells
            self.n_spin_blocs = n_spin_blocs
            self.n_orbitals = n_orbitals
            self.n_k = n_k
            self.SO = SO
            self.SP = SP
            self.energy_unit = energy_unit
        except StopIteration : # a more explicit error if the file is corrupted.
            raise "SumkLDA : reading file HMLT_file failed!"

        R.close()
        
        #print proj_mat[0]

        #-----------------------------------------
        # Store the input into HDF5:
        ar = HDFArchive(self.hdf_file,'a')
        if not (self.lda_subgrp in ar): ar.create_group(self.lda_subgrp) 
        # The subgroup containing the data. If it does not exist, it is created.
        # If it exists, the data is overwritten!!!
        
        ar[self.lda_subgrp]['energy_unit'] = energy_unit
        ar[self.lda_subgrp]['n_k'] = n_k
        ar[self.lda_subgrp]['k_dep_projection'] = k_dep_projection
        ar[self.lda_subgrp]['SP'] = SP
        ar[self.lda_subgrp]['SO'] = SO
        ar[self.lda_subgrp]['charge_below'] = charge_below
        ar[self.lda_subgrp]['density_required'] = density_required
        ar[self.lda_subgrp]['symm_op'] = symm_op
        ar[self.lda_subgrp]['n_shells'] = n_shells
        ar[self.lda_subgrp]['shells'] = shells
        ar[self.lda_subgrp]['n_corr_shells'] = n_corr_shells
        ar[self.lda_subgrp]['corr_shells'] = corr_shells
        ar[self.lda_subgrp]['use_rotations'] = use_rotations
        ar[self.lda_subgrp]['rot_mat'] = rot_mat
        ar[self.lda_subgrp]['rot_mat_time_inv'] = rot_mat_time_inv
        ar[self.lda_subgrp]['n_reps'] = n_reps
        ar[self.lda_subgrp]['dim_reps'] = dim_reps
        ar[self.lda_subgrp]['T'] = T
        ar[self.lda_subgrp]['n_orbitals'] = n_orbitals
        ar[self.lda_subgrp]['proj_mat'] = proj_mat
        ar[self.lda_subgrp]['bz_weights'] = bz_weights
        ar[self.lda_subgrp]['hopping'] = hopping
        
        del ar
              
        # Symmetries are used, 
        # Now do the symmetries for correlated orbitals:
        self.read_symmetry_input(orbits=corr_shells,symm_file=self.symm_file,symm_subgrp=self.symm_subgrp,SO=SO,SP=SP)
예제 #9
0
    def read_symmetry_input(self, orbits, symm_file, symm_subgrp, SO, SP):
        """
        Reads input for the symmetrisations from symm_file, which is case.sympar or case.symqmc.
        """

        if not (mpi.is_master_node()): return

        mpi.report("Reading symmetry input from %s..."%symm_file)

        n_orbits = len(orbits)
        R=read_fortran_file(symm_file)

        try:
            n_s = int(R.next())           # Number of symmetry operations
            n_atoms = int(R.next())       # number of atoms involved
            perm = [ [int(R.next()) for i in xrange(n_atoms)] for j in xrange(n_s) ]    # list of permutations of the atoms
            if SP: 
                time_inv = [ int(R.next()) for j in xrange(n_s) ]           # timeinversion for SO xoupling
            else:
                time_inv = [ 0 for j in xrange(n_s) ] 

            # Now read matrices:
            mat = []  
            for in_s in xrange(n_s):
                
                mat.append( [ numpy.zeros([orbits[orb][3], orbits[orb][3]],numpy.complex_) for orb in xrange(n_orbits) ] )
                for orb in range(n_orbits):
                    for i in xrange(orbits[orb][3]):
                        for j in xrange(orbits[orb][3]):
                            mat[in_s][orb][i,j] = R.next()            # real part
                    for i in xrange(orbits[orb][3]):
                        for j in xrange(orbits[orb][3]):
                            mat[in_s][orb][i,j] += 1j * R.next()      # imaginary part

            # determine the inequivalent shells:
            #SHOULD BE FINALLY REMOVED, PUT IT FOR ALL ORBITALS!!!!!
            #self.inequiv_shells(orbits)
            mat_tinv = [numpy.identity(orbits[orb][3],numpy.complex_)
                        for orb in range(n_orbits)]

            if ((SO==0) and (SP==0)):
                # here we need an additional time inversion operation, so read it:
                for orb in range(n_orbits):
                    for i in xrange(orbits[orb][3]):
                        for j in xrange(orbits[orb][3]):
                            mat_tinv[orb][i,j] = R.next()            # real part
                    for i in xrange(orbits[orb][3]):
                        for j in xrange(orbits[orb][3]):
                            mat_tinv[orb][i,j] += 1j * R.next()      # imaginary part
                


        except StopIteration : # a more explicit error if the file is corrupted.
	    raise "Symmetry : reading file failed!"
        
        R.close()

        # Save it to the HDF:
        ar=HDFArchive(self.hdf_file,'a')
        if not (symm_subgrp in ar): ar.create_group(symm_subgrp)
        thingstowrite = ['n_s','n_atoms','perm','orbits','SO','SP','time_inv','mat','mat_tinv']
        for it in thingstowrite: exec "ar['%s']['%s'] = %s"%(symm_subgrp,it,it)
        del ar
예제 #10
0
    def convert_bands_input(self, bands_subgrp = 'SumK_LDA_Bands'):
        """
        Converts the input for momentum resolved spectral functions, and stores it in bands_subgrp in the
        HDF5.
        """

        if not (mpi.is_master_node()): return

        self.bands_subgrp = bands_subgrp
        mpi.report("Reading bands input from %s..."%self.band_file)

        R = read_fortran_file(self.band_file)
        try:
            n_k = int(R.next())

            # read the list of n_orbitals for all k points
            n_orbitals = [ [0 for isp in range(self.n_spin_blocs)] for ik in xrange(n_k)]
            for isp in range(self.n_spin_blocs):
                for ik in xrange(n_k):
                    n_orbitals[ik][isp] = int(R.next())

            # Initialise the projectors:
            proj_mat = [ [ [numpy.zeros([self.corr_shells[icrsh][3], n_orbitals[ik][isp]], numpy.complex_) 
                            for icrsh in range (self.n_corr_shells)] 
                           for isp in range(self.n_spin_blocs)] 
                         for ik in range(n_k) ]

            # Read the projectors from the file:
            for ik in xrange(n_k):
                for icrsh in range(self.n_corr_shells):
                    no = self.corr_shells[icrsh][3]
                    # first Real part for BOTH spins, due to conventions in dmftproj:
                    for isp in range(self.n_spin_blocs):
                        for i in xrange(no):
                            for j in xrange(n_orbitals[ik][isp]):
                                proj_mat[ik][isp][icrsh][i,j] = R.next()
                    # now Imag part:
                    for isp in range(self.n_spin_blocs):
                        for i in xrange(no):
                            for j in xrange(n_orbitals[ik][isp]):
                                proj_mat[ik][isp][icrsh][i,j] += 1j * R.next()

            hopping = [ [numpy.zeros([n_orbitals[ik][isp],n_orbitals[ik][isp]],numpy.complex_) 
                         for isp in range(self.n_spin_blocs)] for ik in xrange(n_k) ]
         	    
            # Grab the H
            # we use now the convention of a DIAGONAL Hamiltonian!!!!
            for isp in range(self.n_spin_blocs):
                for ik in xrange(n_k) :
                    no = n_orbitals[ik][isp]
                    for i in xrange(no):
                        hopping[ik][isp][i,i] = R.next() * self.energy_unit

            # now read the partial projectors:
            n_parproj = [int(R.next()) for i in range(self.n_shells)]
            # Initialise P, here a double list of matrices:
            proj_mat_pc = [ [ [ [numpy.zeros([self.shells[ish][3], n_orbitals[ik][isp]], numpy.complex_) 
                                 for ir in range(n_parproj[ish])]
                                for ish in range (self.n_shells) ]
                              for isp in range(self.n_spin_blocs) ]
                            for ik in range(n_k) ]


            for ish in range(self.n_shells):
               
                for ik in xrange(n_k):
                    for ir in range(n_parproj[ish]):
                        for isp in range(self.n_spin_blocs):
                                    
                            for i in xrange(self.shells[ish][3]):    # read real part:
                                for j in xrange(n_orbitals[ik][isp]):
                                    proj_mat_pc[ik][isp][ish][ir][i,j] = R.next()
                            
                            for i in xrange(self.shells[ish][3]):    # read imaginary part:
                                for j in xrange(n_orbitals[ik][isp]):
                                    proj_mat_pc[ik][isp][ish][ir][i,j] += 1j * R.next()

        except StopIteration : # a more explicit error if the file is corrupted.
            raise "SumkLDA : reading file HMLT_file failed!"

        R.close()
        # reading done!

        #-----------------------------------------
        # Store the input into HDF5:
        ar = HDFArchive(self.hdf_file,'a')
        if not (self.bands_subgrp in ar): ar.create_group(self.bands_subgrp) 
        # The subgroup containing the data. If it does not exist, it is created.
        # If it exists, the data is overwritten!!!
        thingstowrite = ['n_k','n_orbitals','proj_mat','hopping','n_parproj','proj_mat_pc']
        for it in thingstowrite: exec "ar['%s']['%s'] = %s"%(self.bands_subgrp,it,it)

        #ar[self.bands_subgrp]['n_k'] = n_k
        #ar[self.bands_subgrp]['n_orbitals'] = n_orbitals
        #ar[self.bands_subgrp]['proj_mat'] = proj_mat
        #self.proj_mat = proj_mat
        #self.n_orbitals = n_orbitals
        #self.n_k = n_k
        #self.hopping = hopping
        del ar
예제 #11
0
    def convert_parproj_input(self, par_proj_subgrp='SumK_LDA_ParProj', symm_par_subgrp='SymmPar'):
        """
        Reads the input for the partial charges projectors from case.parproj, and stores it in the symm_par_subgrp
        group in the HDF5.
        """

        if not (mpi.is_master_node()): return

        self.par_proj_subgrp = par_proj_subgrp
        self.symm_par_subgrp = symm_par_subgrp

        mpi.report("Reading parproj input from %s..."%self.parproj_file)

        Dens_Mat_below = [ [numpy.zeros([self.shells[ish][3],self.shells[ish][3]],numpy.complex_) for ish in range(self.n_shells)] 
                           for isp in range(self.n_spin_blocs) ]

        R = read_fortran_file(self.parproj_file)
        #try:

        n_parproj = [int(R.next()) for i in range(self.n_shells)]
                
        # Initialise P, here a double list of matrices:
        proj_mat_pc = [ [ [ [numpy.zeros([self.shells[ish][3], self.n_orbitals[ik][isp]], numpy.complex_) 
                             for ir in range(n_parproj[ish])]
                            for ish in range (self.n_shells) ]
                          for isp in range(self.n_spin_blocs) ]
                        for ik in range(self.n_k) ]

        rot_mat_all = [numpy.identity(self.shells[ish][3],numpy.complex_) for ish in xrange(self.n_shells)]
        rot_mat_all_time_inv = [0 for i in range(self.n_shells)]

        for ish in range(self.n_shells):
            #print ish   
            # read first the projectors for this orbital:
            for ik in xrange(self.n_k):
                for ir in range(n_parproj[ish]):
                    for isp in range(self.n_spin_blocs):
                                    
                        for i in xrange(self.shells[ish][3]):    # read real part:
                            for j in xrange(self.n_orbitals[ik][isp]):
                                proj_mat_pc[ik][isp][ish][ir][i,j] = R.next()
                            
                    for isp in range(self.n_spin_blocs):
                        for i in xrange(self.shells[ish][3]):    # read imaginary part:
                            for j in xrange(self.n_orbitals[ik][isp]):
                                proj_mat_pc[ik][isp][ish][ir][i,j] += 1j * R.next()
                                        
                    
            # now read the Density Matrix for this orbital below the energy window:
            for isp in range(self.n_spin_blocs):
                for i in xrange(self.shells[ish][3]):    # read real part:
                    for j in xrange(self.shells[ish][3]):
                        Dens_Mat_below[isp][ish][i,j] = R.next()
            for isp in range(self.n_spin_blocs):
                for i in xrange(self.shells[ish][3]):    # read imaginary part:
                    for j in xrange(self.shells[ish][3]):
                        Dens_Mat_below[isp][ish][i,j] += 1j * R.next()
                if (self.SP==0): Dens_Mat_below[isp][ish] /= 2.0

            # Global -> local rotation matrix for this shell:
            for i in xrange(self.shells[ish][3]):    # read real part:
                for j in xrange(self.shells[ish][3]):
                    rot_mat_all[ish][i,j] = R.next()
            for i in xrange(self.shells[ish][3]):    # read imaginary part:
                for j in xrange(self.shells[ish][3]):
                    rot_mat_all[ish][i,j] += 1j * R.next()
                    
            #print Dens_Mat_below[0][ish],Dens_Mat_below[1][ish]
            
            if (self.SP):
                rot_mat_all_time_inv[ish] = int(R.next())

        #except StopIteration : # a more explicit error if the file is corrupted.
        #    raise "Wien2kConverter: reading file for Projectors failed!"
        R.close()

        #-----------------------------------------
        # Store the input into HDF5:
        ar = HDFArchive(self.hdf_file,'a')
        if not (self.par_proj_subgrp in ar): ar.create_group(self.par_proj_subgrp) 
        # The subgroup containing the data. If it does not exist, it is created.
        # If it exists, the data is overwritten!!!
        thingstowrite = ['Dens_Mat_below','n_parproj','proj_mat_pc','rot_mat_all','rot_mat_all_time_inv']
        for it in thingstowrite: exec "ar['%s']['%s'] = %s"%(self.par_proj_subgrp,it,it)
        del ar

        # Symmetries are used, 
        # Now do the symmetries for all orbitals:
        self.read_symmetry_input(orbits=self.shells,symm_file=self.symmpar_file,symm_subgrp=self.symm_par_subgrp,SO=self.SO,SP=self.SP)
예제 #12
0
파일: solver.py 프로젝트: tayral/triqs_0.x
    def Solve(self):
        """ Solve the impurity problem """

        # Find if an operator is in oplist
        def mysearch(op):
            l = [ k for (k,v) in OPdict.items() if (v-op).is_zero()]
            assert len(l) <=1
            return l[0] if l else None

        # Same but raises an error if pb
        def myfind(op):
            r = mysearch(op)
            if r==None : raise "Operator %s can not be found by myfind !"%r
            return r

        # For backward compatibility
        self.update_params(self.__dict__)

        # Test all a parameters before solutions
        mpi.report(parameters.check(self.__dict__,self.Required,self.Optional))

        # We have to add the Hamiltonian the epsilon part of G0
        if type(self.H_Local) != type(Operator()) : raise "H_Local is not an operator"
        H = self.H_Local
        for a,alpha_list in  self.GFStruct :
            for mu in alpha_list : 
                for nu in alpha_list : 
                    H += real(self.G0[a]._tail[2][mu,nu]) * Cdag(a,mu)*C(a,nu)

        OPdict = {"Hamiltonian": H}
        mpi.report("Hamiltonian with Eps0 term  : ",H)
        
        # First separate the quantum Numbers that are operators and those which are symmetries.
        QuantumNumberOperators  = dict( (n,op) for (n,op) in self.Quantum_Numbers.items() if type(op) == type(Operator()))
        QuantumNumberSymmetries = dict( (n,op) for (n,op) in self.Quantum_Numbers.items() if type(op) != type(Operator()))

        # Check that the quantum numbers commutes with the Hamiltonian
        for name,op in QuantumNumberOperators.items():
            assert commutator(self.H_Local ,op).is_zero(), "One quantum number is not commuting with Hamiltonian"
            OPdict[name]=op

        # Complete the OPdict with the fundamental operators
        OPdict, nf, nb, SymChar, NameOpFundamentalList = operators.complete_op_list_with_fundamentals(OPdict)

        # Add the operators to be averaged in OPdict and prepare the list for the C-code
        self.Measured_Operators_Results = {}
        self.twice_defined_Ops = {}
        self.Operators_To_Average_List = []
        for name, op in self.Measured_Operators.items():
          opn = mysearch(op)
          if opn == None : 
              OPdict[name] = op
              self.Measured_Operators_Results[name] = 0.0
              self.Operators_To_Average_List.append(name)
          else:
              mpi.report("Operator %s already defined as %s, using this instead for measuring"%(name,opn))
              self.twice_defined_Ops[name] = opn
              self.Measured_Operators_Results[opn] = 0.0
              if opn not in self.Operators_To_Average_List: self.Operators_To_Average_List.append(opn)

        # Time correlation functions are added
        self.OpCorr_To_Average_List = []
        for name, op in self.Measured_Time_Correlators.items():
          opn = mysearch(op[0])
          if opn == None : 
              OPdict[name] = op[0]
              self.OpCorr_To_Average_List.append(name)
          else:
              mpi.report("Operator %s already defined as %s, using this instead for measuring"%(name,opn))
              if opn not in self.OpCorr_To_Average_List: self.OpCorr_To_Average_List.append(opn)
        # Create storage for data:
        Nops = len(self.OpCorr_To_Average_List)
        f = lambda L : GfImTime(indices = [0], beta = self.beta, n_time_points =L )
        if (Nops>0):
            self.Measured_Time_Correlators_Results = BlockGf(name_block_generator = [ ( n,f(self.Measured_Time_Correlators[n][1]) ) for n in self.Measured_Time_Correlators], make_copies=False)
        else:
            self.Measured_Time_Correlators_Results = BlockGf(name_block_generator = [ ( 'OpCorr',f(2) ) ], make_copies=False)

        # Take care of the global moves

        # First, given a function (a,alpha,dagger) -> (a', alpha', dagger')
        # I construct a function on fundamental operators
        def Map_GM_to_Fund_Ops( GM ) :
            def f(fop) :
                a,alpha, dagger = fop.name + (fop.dag,)
                ap,alphap,daggerp = GM((a,alpha,dagger))
                return Cdag(ap,alphap) if daggerp else C(ap,alphap)
            return f

        # Complete the OpList so that it is closed under the global moves
        while 1:
            added_something = False
            for n,(proba,GM) in enumerate(self.Global_Moves):
                # F is a function that map all operators according to the global move
                F = extend_function_on_fundamentals(Map_GM_to_Fund_Ops(GM))
                # Make sure that OPdict is complete, i.e. all images of OPdict operators are in OPdict
                for name,op in OPdict.items() :
                    op_im = F(op)
                    if mysearch(op_im)==None :
                        # find the key and put in in the dictionnary
                        i=0
                        while 1:
                            new_name = name + 'GM' +  i*'_' + "%s"%n
                            if new_name not in OPdict : break
                        added_something = True
                        OPdict[new_name] = op_im
            # break the while loop
            if not added_something: break

        # Now I have all operators, I make the transcription of the global moves
        self.Global_Moves_Mapping_List = []
        for n,(proba,GM) in enumerate(self.Global_Moves):
            F = extend_function_on_fundamentals(Map_GM_to_Fund_Ops(GM))
            m = {}
            for name,op in OPdict.items() :
                op_im = F(op)
                n1,n2 = myfind(op),myfind(op_im)
                m[n1] = n2
            name = "%s"%n
            self.Global_Moves_Mapping_List.append((proba,m,name))
        #mpi.report ("Global_Moves_Mapping_List", self.Global_Moves_Mapping_List)

        # Now add the operator for F calculation if needed
        if self.Use_F :
            Hloc_WithoutQuadratic = self.H_Local.remove_quadratic()
            for n,op in OPdict.items() :
                if op.is_Fundamental():
                    op2 = commutator(Hloc_WithoutQuadratic,op)
                    if not mysearch(op2) : OPdict["%s_Comm_Hloc"%n] = op2

        # All operators have real coefficients. Check this and remove the 0j term
        # since the C++ expects operators with real numbers 
        for n,op in OPdict.items(): op.make_coef_real_and_check()

        # Transcription of operators for C++
        Oplist2 = operators.transcribe_op_list_for_C(OPdict)
        SymList = [sym for (n,sym) in SymChar.items() if n in QuantumNumberSymmetries]
        self.H_diag = C_Module.Hloc(nf,nb,Oplist2,QuantumNumberOperators,SymList,self.Quantum_Numbers_Selection,0) 

        # Create the C_Cag_Ops array which describes the grouping of (C,Cdagger) operator
        # for the MonteCarlo moves : (a, alpha) block structure [ [ (C_name, Cdag_name)]]
        self.C_Cdag_Ops = [ [ (myfind(C(a,alpha)), myfind(Cdag(a,alpha))) for alpha in al ] for a,al in self.GFStruct]

        # Define G0_inv and correct it to have G0 to have perfect 1/omega behavior
        self.G0_inv = inverse(self.G0)
        Delta = self.G0_inv.delta()
        for n,g in self.G0_inv:
          assert(g.N1==g.N2)
          identity=numpy.identity(g.N1)
          self.G0[n] <<= gf_init.A_Omega_Plus_B(identity, g._tail[0])
          self.G0[n] -= Delta[n]
          #self.G0[n] <<= iOmega_n + g._tail[0] - Delta[n]
        self.G0_inv <<= self.G0
        self.G0.invert()

        # Construct the function in tau
        f = lambda g,L : GfImTime(indices = g.indices, beta = g.beta, n_time_points =L )
        self.Delta_tau = BlockGf(name_block_generator = [ (n,f(g,self.N_Time_Slices_Delta) )   for n,g in self.G], make_copies=False, name='D')
        self.G_tau = BlockGf(name_block_generator = [ (n,f(g,self.N_Time_Slices_Gtau) )    for n,g in self.G], make_copies=False, name='G')
        self.F_tau = BlockGf(name_block_generator = self.G_tau, make_copies=True, name='F')
        
        for (i,gt) in self.Delta_tau : gt.set_from_inverse_fourier(Delta[i])
        mpi.report("Inv Fourier done")
        if (self.Legendre_Accumulation):
            self.G_Legendre = BlockGf(name_block_generator = [ (n,GfLegendre(indices =g.indices, beta =g.beta, n_legendre_coeffs =self.N_Legendre_Coeffs) )   for n,g in self.G], make_copies=False, name='Gl')
        else:
            self.G_Legendre = BlockGf(name_block_generator = [ (n,GfLegendre(indices =[1], beta =g.beta, n_legendre_coeffs =1) ) for n,g in self.G], make_copies=False, name='Gl') # G_Legendre must not be empty but is not needed in this case. So I make it as small as possible.
        
        # Starting the C++ code
        self.Sigma_Old <<= self.Sigma
        C_Module.MC_solve(self.__dict__ ) # C++ solver
        
        # Compute G on Matsubara axis possibly fitting the tail
        if self.Legendre_Accumulation:
          for s,g in self.G:
            identity=numpy.zeros([g.N1,g.N2],numpy.float)
            for i,m in enumerate (g._IndicesL):
              for j,n in enumerate (g._IndicesR):
                if m==n: identity[i,j]=1
            self.G_Legendre[s].enforce_discontinuity(identity) # set the known tail
            g <<= LegendreToMatsubara(self.G_Legendre[s])
        else:
          if (self.Time_Accumulation):
            for name, g in self.G_tau:
              identity=numpy.zeros([g.N1,g.N2],numpy.float)
              for i,m in enumerate (g._IndicesL):
                for j,n in enumerate (g._IndicesR):
                  if m==n: identity[i,j]=1
              g._tail.zero()
              g._tail[1] = identity
              self.G[name].set_from_fourier(g)

          # This is very sick... but what can we do???
          self.Sigma <<= self.G0_inv - inverse(self.G)
          self.fitTails()
          self.G <<= inverse(self.G0_inv - self.Sigma)

        # Now find the self-energy
        self.Sigma <<= self.G0_inv - inverse(self.G)

        mpi.report("Solver %(name)s has ended."%self.__dict__)

        # for operator averages: if twice defined operator, rename output:
        for op1,op2 in self.twice_defined_Ops.items():
            self.Measured_Operators_Results[op1] = self.Measured_Operators_Results[op2]
        for op1,op2 in self.twice_defined_Ops.items():
            if op2 in self.Measured_Operators_Results.keys(): del self.Measured_Operators_Results[op2]

        if self.Use_F :
            for (n,f) in self.F: f.set_from_fourier(self.F_tau[n])
            self.G2 = self.G0 + self.G0 * self.F
            self.Sigma2 = self.F * inverse(self.G2)
예제 #13
0
    def Solve(self,Iteration_Number=1,Test_Convergence=0.0001):
        """Calculation of the impurity Greens function using Hubbard-I"""

        # Test all a parameters before solutions
        print parameters.check(self.__dict__,self.Required,self.Optional)
       	#SolverBase.Solve(self,is_last_iteration,Iteration_Number,Test_Convergence)
       
        if self.Converged :
            mpi.report("Solver %(name)s has already converted: SKIPPING"%self.__dict__)
            return

        self.__save_eal('eal.dat',Iteration_Number)

        mpi.report( "Starting Fortran solver %(name)s"%self.__dict__)

        self.Sigma_Old <<= self.Sigma
        self.G_Old <<= self.G

        # call the fortran solver:
        temp = 1.0/self.beta
        gf,tail,self.atocc,self.atmag = gf_hi_fullu(e0f=self.ealmat, ur=self.ur, umn=self.umn, ujmn=self.ujmn, 
                                                    zmsb=self.zmsb, nmom=self.Nmoments, ns=self.Nspin, temp=temp, verbosity = self.Verbosity)

        #self.sig = sigma_atomic_fullu(gf=self.gf,e0f=self.eal,zmsb=self.zmsb,ns=self.Nspin,nlm=self.Nlm)

        if (self.Verbosity==0):
            # No fortran output, so give basic results here
            mpi.report("Atomic occupancy in Hubbard I Solver  : %s"%self.atocc)
            mpi.report("Atomic magn. mom. in Hubbard I Solver : %s"%self.atmag)

        # transfer the data to the GF class:
        if (self.UseSpinOrbit): 
            nlmtot = self.Nlm*2         # only one block in this case!
        else:
            nlmtot = self.Nlm

        M={}
        isp=-1
        for a,al in self.GFStruct:
            isp+=1
            #M[a] = gf[isp*self.Nlm:(isp+1)*self.Nlm,isp*self.Nlm:(isp+1)*self.Nlm,:]
            M[a] = gf[isp*nlmtot:(isp+1)*nlmtot,isp*nlmtot:(isp+1)*nlmtot,:]
            for i in range(min(self.Nmoments,10)):
                self.tailtempl[a][i+1].array[:] = tail[i][isp*nlmtot:(isp+1)*nlmtot,isp*nlmtot:(isp+1)*nlmtot]
                 
        glist = lambda : [ GfImFreq(indices = al, beta = self.beta, n_matsubara = self.Nmsb, data =M[a], tail =self.tailtempl[a]) 
                           for a,al in self.GFStruct]
        self.G = BlockGf(name_list = self.a_list, block_list = glist(),make_copies=False)
            
        # Self energy:
        self.G0 <<= gf_init.A_Omega_Plus_B(A=1,B=0.0)
        
        M = [ self.ealmat[isp*nlmtot:(isp+1)*nlmtot,isp*nlmtot:(isp+1)*nlmtot] for isp in range((2*self.Nlm)/nlmtot) ] 
        self.G0 -= M
        self.Sigma <<= self.G0 - inverse(self.G)

        # invert G0
        self.G0.invert()
       
        def test_distance(G1,G2, dist) :
            def f(G1,G2) : 
                print abs(G1._data.array - G2._data.array)
                dS = max(abs(G1._data.array - G2._data.array).flatten())  
                aS = max(abs(G1._data.array).flatten())
                return dS <= aS*dist
            return reduce(lambda x,y : x and y, [f(g1,g2) for (i1,g1),(i2,g2) in izip(G1,G2)])

        mpi.report("\nChecking Sigma for convergence...\nUsing tolerance %s"%Test_Convergence)
        self.Converged = test_distance(self.Sigma,self.Sigma_Old,Test_Convergence)

        if self.Converged :
            mpi.report("Solver HAS CONVERGED")
        else :
            mpi.report("Solver has not yet converged")
예제 #14
0
    def set_dc(self,dens_mat,U_interact,J_hund,orb=0,use_dc_formula=0,use_val=None):
        """Sets the double counting term for inequiv orbital orb
           use_dc_formula=0: LDA+U FLL double counting, use_dc_formula=1: Held's formula. 
           use_dc_formula=2: AMF
           Be sure that you use the correct interaction Hamiltonian!"""
        

        #if (not hasattr(self,"dc_imp")): self.__init_dc()
                    
                
        dm = [ {} for i in xrange(self.n_corr_shells)]
        for i in xrange(self.n_corr_shells):
            l = self.corr_shells[i][3] #*(1+self.corr_shells[i][4])
            for j in xrange(len(self.gf_struct_corr[i])):
                dm[i]['%s'%self.gf_struct_corr[i][j][0]] = numpy.zeros([l,l],numpy.float_)
        

        for icrsh in xrange(self.n_corr_shells):

            iorb = self.shellmap[icrsh]    # iorb is the index of the inequivalent shell corresponding to icrsh

            if (iorb==orb):
                # do this orbital

                l = self.corr_shells[icrsh][3] #*(1+self.corr_shells[icrsh][4])
                for j in xrange(len(self.gf_struct_corr[icrsh])):
                    self.dc_imp[icrsh]['%s'%self.gf_struct_corr[icrsh][j][0]] = numpy.identity(l,numpy.float_)


                # transform the CTQMC blocks to the full matrix:
                for ibl in range(len(self.gf_struct_solver[iorb])):
                    for i in range(len(self.gf_struct_solver[iorb][ibl][1])):
                        for j in range(len(self.gf_struct_solver[iorb][ibl][1])):
                            bl   = self.gf_struct_solver[iorb][ibl][0]
                            ind1 = self.gf_struct_solver[iorb][ibl][1][i]
                            ind2 = self.gf_struct_solver[iorb][ibl][1][j]
                            dm[icrsh][self.map_inv[iorb][bl]][ind1,ind2] = dens_mat[bl][i,j].real    # only real part relevant for trace

                M = self.corr_shells[icrsh][3]
                Ncr = {}
                Ncrtot = 0.0
                a_list = [a for a,al in self.gf_struct_corr[icrsh]]
                for bl in a_list:
                    Ncr[bl] = dm[icrsh][bl].trace()
                    Ncrtot += Ncr[bl]

                # average the densities if there is no SP:
                if (self.SP==0):
                    for bl in a_list:
                        Ncr[bl] = Ncrtot / len(a_list)
                # correction for SO: we have only one block in this case, but in DC we need N/2
                elif (self.SP==1 and self.SO==1):
                    for bl in a_list:
                        Ncr[bl] = Ncrtot / 2.0

                if (use_val is None):
                              
                    if (use_dc_formula==0):
                        self.dc_energ[icrsh] = U_interact / 2.0 * Ncrtot * (Ncrtot-1.0)
                        for bl in a_list:
                            Uav = U_interact*(Ncrtot-0.5) - J_hund*(Ncr[bl] - 0.5)
                            self.dc_imp[icrsh][bl] *= Uav                              
                            self.dc_energ[icrsh]  -= J_hund / 2.0 * (Ncr[bl]) * (Ncr[bl]-1.0)
                            mpi.report("DC for shell %(icrsh)i and block %(bl)s = %(Uav)f"%locals())
                    elif (use_dc_formula==1):
                        self.dc_energ[icrsh] = (U_interact + J_hund * (2.0-(M-1)) / (2*M-1)  ) / 2.0 * Ncrtot * (Ncrtot-1.0)
                        for bl in a_list:
                            # Held's formula, with U_interact the interorbital onsite interaction
                            Uav = (U_interact + J_hund * (2.0-(M-1)) / (2*M-1)  ) * (Ncrtot-0.5)
                            self.dc_imp[icrsh][bl] *= Uav 
                            mpi.report("DC for shell %(icrsh)i and block %(bl)s = %(Uav)f"%locals())
                    elif (use_dc_formula==2):
                        self.dc_energ[icrsh] = 0.5 * U_interact * Ncrtot * Ncrtot
                        for bl in a_list:
                            # AMF
                            Uav = U_interact*(Ncrtot - Ncr[bl]/M) - J_hund * (Ncr[bl] - Ncr[bl]/M)
                            self.dc_imp[icrsh][bl] *= Uav
                            self.dc_energ[icrsh] -= (U_interact + (M-1)*J_hund)/M * 0.5 * Ncr[bl] * Ncr[bl]
                            mpi.report("DC for shell %(icrsh)i and block %(bl)s = %(Uav)f"%locals())
                    
                    # output:
                    mpi.report("DC energy for shell %s = %s"%(icrsh,self.dc_energ[icrsh]))

                else:    
            
                    a_list = [a for a,al in self.gf_struct_corr[icrsh]]
                    for bl in a_list:
                        self.dc_imp[icrsh][bl] *= use_val
                    
                    self.dc_energ[icrsh] = use_val * Ncrtot

                    # output:
                    mpi.report("DC for shell %(icrsh)i = %(use_val)f"%locals())
                    mpi.report("DC energy = %s"%self.dc_energ[icrsh])
예제 #15
0
    def analyse_BS(self, threshold = 0.00001, include_shells = None, dm = None):
        """ Determines the Greens function block structure from the simple point integration"""

        if (dm==None): dm = self.simple_point_dens_mat()
        
        dens_mat = [dm[self.invshellmap[ish]] for ish in xrange(self.n_inequiv_corr_shells) ]

        if include_shells is None: include_shells=range(self.n_inequiv_corr_shells)
        for ish in include_shells:

            #self.gf_struct_solver.append([])
            self.gf_struct_solver[ish] = []

            a_list = [a for a,al in self.gf_struct_corr[self.invshellmap[ish]] ]
            for a in a_list:
                
                dm = dens_mat[ish][a]            
                dmbool = (abs(dm) > threshold)          # gives an index list of entries larger that threshold

                offdiag = []
                for i in xrange(len(dmbool)):
                    for j in xrange(i,len(dmbool)):
                        if ((dmbool[i,j])&(i!=j)): offdiag.append([i,j])

                NBlocs = len(dmbool)
                blocs = [ [i] for i in range(NBlocs) ]

                for i in range(len(offdiag)):
                    if (offdiag[i][0]!=offdiag[i][1]):
                        for j in range(len(blocs[offdiag[i][1]])): blocs[offdiag[i][0]].append(blocs[offdiag[i][1]][j])
                        del blocs[offdiag[i][1]]
                        for j in range(i+1,len(offdiag)):
                            if (offdiag[j][0]==offdiag[i][1]): offdiag[j][0]=offdiag[i][0]
                            if (offdiag[j][1]==offdiag[i][1]): offdiag[j][1]=offdiag[i][0]
                            if (offdiag[j][0]>offdiag[i][1]): offdiag[j][0] -= 1
                            if (offdiag[j][1]>offdiag[i][1]): offdiag[j][1] -= 1
                            offdiag[j].sort()
                        NBlocs-=1

                for i in range(NBlocs):
                    blocs[i].sort()
                    self.gf_struct_solver[ish].append( ('%s%s'%(a,i),blocs[i]) )
                   
                               
                # map is the mapping of the blocs from the SK blocs to the CTQMC blocs:
                self.map[ish][a] = range(len(dmbool))
                for ibl in range(NBlocs):
                    for j in range(len(blocs[ibl])):
                        self.map[ish][a][blocs[ibl][j]] = '%s%s'%(a,ibl)
                        self.map_inv[ish]['%s%s'%(a,ibl)] = a


            # now calculate degeneracies of orbitals:
            dm = {}
            for bl in self.gf_struct_solver[ish]:
                bln = bl[0]
                ind = bl[1]
                # get dm for the blocks:
                dm[bln] = numpy.zeros([len(ind),len(ind)],numpy.complex_)
                for i in range(len(ind)):
                    for j in range(len(ind)):
                        dm[bln][i,j] = dens_mat[ish][self.map_inv[ish][bln]][ind[i],ind[j]]

            for bl in self.gf_struct_solver[ish]:
                for bl2 in self.gf_struct_solver[ish]:
                    if (dm[bl[0]].shape==dm[bl2[0]].shape) :
                        if ( ( (abs(dm[bl[0]]-dm[bl2[0]])<threshold).all() ) and (bl[0]!=bl2[0]) ):
                            # check if it was already there:
                            ind1=-1
                            ind2=-2
                            for n,ind in enumerate(self.deg_shells[ish]):
                                if (bl[0] in ind): ind1=n
                                if (bl2[0] in ind): ind2=n
                            if ((ind1<0)and(ind2>=0)):
                                self.deg_shells[ish][ind2].append(bl[0])
                            elif ((ind1>=0)and(ind2<0)):
                                self.deg_shells[ish][ind1].append(bl2[0])
                            elif ((ind1<0)and(ind2<0)):
                                self.deg_shells[ish].append([bl[0],bl2[0]])

        if (mpi.is_master_node()):
            ar=HDFArchive(self.hdf_file,'a')
            ar[self.lda_data]['gf_struct_solver'] = self.gf_struct_solver
            ar[self.lda_data]['map'] = self.map
            ar[self.lda_data]['map_inv'] = self.map_inv
            try:
                ar[self.lda_data]['deg_shells'] = self.deg_shells
            except:
                mpi.report("deg_shells not stored, degeneracies not found")
            del ar
            
        return dens_mat
예제 #16
0
    def __init__(self, hdf_file, mu = 0.0, h_field = 0.0, use_lda_blocks = False, lda_data = 'SumK_LDA', symm_corr_data = 'SymmCorr',
                 par_proj_data = 'SumK_LDA_ParProj', symm_par_data = 'SymmPar', bands_data = 'SumK_LDA_Bands'):
        """
        Initialises the class from data previously stored into an HDF5
        """

        if  not (type(hdf_file)==StringType):
            mpi.report("Give a string for the HDF5 filename to read the input!")
        else:
            self.hdf_file = hdf_file
            self.lda_data = lda_data
            self.par_proj_data = par_proj_data
            self.bands_data = bands_data
            self.symm_par_data = symm_par_data
            self.symm_corr_data = symm_corr_data
            self.block_names = [ ['up','down'], ['ud'] ]
            self.n_spin_blocks_gf = [2,1]
            self.Gupf = None
            self.h_field = h_field
            
            # read input from HDF:
            things_to_read = ['energy_unit','n_k','k_dep_projection','SP','SO','charge_below','density_required',
                              'symm_op','n_shells','shells','n_corr_shells','corr_shells','use_rotations','rot_mat',
                              'rot_mat_time_inv','n_reps','dim_reps','T','n_orbitals','proj_mat','bz_weights','hopping']
            optional_things = ['gf_struct_solver','map_inv','map','chemical_potential','dc_imp','dc_energ','deg_shells']

            #ar=HDFArchive(self.hdf_file,'a')
            #del ar

            self.retval = self.read_input_from_hdf(subgrp=self.lda_data,things_to_read=things_to_read,optional_things=optional_things)

            #ar=HDFArchive(self.hdf_file,'a')
            #del ar

            if (self.SO) and (abs(self.h_field)>0.000001):
                self.h_field=0.0
                mpi.report("For SO, the external magnetic field is not implemented, setting it to 0!!")

           
            self.inequiv_shells(self.corr_shells)     # determine the number of inequivalent correlated shells

            # field to convert block_names to indices
            self.names_to_ind = [{}, {}]
            for ibl in range(2):
                for inm in range(self.n_spin_blocks_gf[ibl]): 
                    self.names_to_ind[ibl][self.block_names[ibl][inm]] = inm * self.SP #(self.Nspinblocs-1)

            # GF structure used for the local things in the k sums
            self.gf_struct_corr = [ [ (al, range( self.corr_shells[i][3])) for al in self.block_names[self.corr_shells[i][4]] ]  
                                   for i in xrange(self.n_corr_shells) ]

            if not (self.retval['gf_struct_solver']):
                # No gf_struct was stored in HDF, so first set a standard one:
                self.gf_struct_solver = [ [ (al, range( self.corr_shells[self.invshellmap[i]][3]) )
                                           for al in self.block_names[self.corr_shells[self.invshellmap[i]][4]] ]
                                         for i in xrange(self.n_inequiv_corr_shells) ]
                self.map = [ {} for i in xrange(self.n_inequiv_corr_shells) ]
                self.map_inv = [ {} for i in xrange(self.n_inequiv_corr_shells) ]
                for i in xrange(self.n_inequiv_corr_shells):
                    for al in self.block_names[self.corr_shells[self.invshellmap[i]][4]]:
                        self.map[i][al] = [al for j in range( self.corr_shells[self.invshellmap[i]][3] ) ]
                        self.map_inv[i][al] = al

            if not (self.retval['dc_imp']):
                # init the double counting:
                self.__init_dc()

            if not (self.retval['chemical_potential']):
                self.chemical_potential = mu

            if not (self.retval['deg_shells']):
                self.deg_shells = [ [] for i in range(self.n_inequiv_corr_shells)]

            if self.symm_op:
                #mpi.report("Do the init for symm:")
                self.Symm_corr = Symmetry(hdf_file,subgroup=self.symm_corr_data)

            # determine the smallest blocs, if wanted:
            if (use_lda_blocks): dm=self.analyse_BS()

          
            # now save things again to HDF5:
            if (mpi.is_master_node()):
                ar=HDFArchive(self.hdf_file,'a')
                ar[self.lda_data]['h_field'] = self.h_field
                del ar
            self.save()