Пример #1
0
 def __init__(self, S, BuildMasterOnly =True) :
     """
     Construction : with named arguments only.
     Possible constructors from source S : 
        - Parameters(d) : d is a dict to be copied (no deepcopy, just updated).
        - Parameters(f) : f is a string containing the path of a file
          The type of the file is determined from the extension: 
             - .py, .txt : the file is executed in the Parameters
             - .xml : to be written
     MPI : if BuildMasterOnly is true, the contruction is only made on the master, 
           the result is then bcasted to all the nodes.
           Otherwise, it is done on all nodes (not recommended to read files).
     """
     if MPI.IS_MASTER_NODE() or not BuildMasterOnly : 
         if type(S) == type(''):
            # detect the type of the file
            try : 
               extension = S.split('.')[-1].lower()
            except IndexError: 
               raise ValueError, "I am lost : I can not determine the extension of the file !"
            if extension in ['py','txt'] : execfile(S,{},self)
            else : raise ValueError, "Extension of the file not recognized"
         else : # S is therefore a dict 
             try : 
               self.update(S)
             except : 
               print "Error in Parameter constructor. Is the source an iterable ?"
               raise
     # end of master only
     if BuildMasterOnly : MPI.bcast(self) # bcast it on the nodes
Пример #2
0
    def update_params(self, d):

      allparams = [
        ('QMC_N_cycles_MAX', 'N_Cycles'),
        ('NCycles', 'N_Cycles'),
        ('Hloc', 'H_Local'),
        ('QuantumNumbers', 'Quantum_Numbers'),
        ('Length_One_QMC_Cycle', 'Length_Cycle'),
        ('Number_Warming_Iteration', 'N_Warmup_Cycles'),
        ('Number_Frequencies_Accumulated', 'N_Frequencies_Accumulated'),
        ('Global_Move', 'Global_Moves'),
        ('UseSegmentPicture', 'Use_Segment_Picture'),
        ('Proba_Move_Insert_Remove_Kink', 'Proba_Insert_Remove'),
        ('Proba_Move_Move_Kink', 'Proba_Move'),
        ('OperatorsToAverage', 'Measured_Operators'),
        ('OpCorrToAverage', 'Measured_Time_Correlators'),
        ('KeepGF_MC_series', 'Keep_Full_MC_Series'),
        ('DecorrelationAnalysisG_NFreq', 'Decorrelation_Analysis_G_NFreq'),
        ('RecordStatisticConfigurations', 'Record_Statistics_Configurations')
      ]

      issue_warning = False
      for (old, new) in allparams:
        if old in d:
          val = d.pop(old)
          d.update({new:val})
          issue_warning = True

      msg = """
**********************************************************************************
 Warning: some parameters you used have a different name now and will be
 deprecated in future versions. Please check the documentation.
**********************************************************************************
"""
      if issue_warning: MPI.report(msg)
Пример #3
0
    def update_params(self, d):

      allparams = [
        ('QMC_N_cycles_MAX', 'N_Cycles'),
        ('NCycles', 'N_Cycles'),
        ('Hloc', 'H_Local'),
        ('QuantumNumbers', 'Quantum_Numbers'),
        ('Length_One_QMC_Cycle', 'Length_Cycle'),
        ('Number_Warming_Iteration', 'N_Warmup_Cycles'),
        ('Number_Frequencies_Accumulated', 'N_Frequencies_Accumulated'),
        ('Global_Move', 'Global_Moves'),
        ('UseSegmentPicture', 'Use_Segment_Picture'),
        ('Proba_Move_Insert_Remove_Kink', 'Proba_Insert_Remove'),
        ('Proba_Move_Move_Kink', 'Proba_Move'),
        ('OperatorsToAverage', 'Measured_Operators'),
        ('OpCorrToAverage', 'Measured_Time_Correlators'),
        ('KeepGF_MC_series', 'Keep_Full_MC_Series'),
        ('DecorrelationAnalysisG_NFreq', 'Decorrelation_Analysis_G_NFreq'),
        ('RecordStatisticConfigurations', 'Record_Statistics_Configurations')
      ]

      issue_warning = False
      for (old, new) in allparams:
        if old in d:
          val = d.pop(old)
          d.update({new:val})
          issue_warning = True

      msg = """
**********************************************************************************
 Warning: some parameters you used have a different name now and will be
 deprecated in future versions. Please check the documentation.
**********************************************************************************
"""
      if issue_warning: MPI.report(msg)
Пример #4
0
 def __init__(self, S, BuildMasterOnly=True):
     """
     Construction : with named arguments only.
     Possible constructors from source S : 
        - Parameters(d) : d is a dict to be copied (no deepcopy, just updated).
        - Parameters(f) : f is a string containing the path of a file
          The type of the file is determined from the extension: 
             - .py, .txt : the file is executed in the Parameters
             - .xml : to be written
     MPI : if BuildMasterOnly is true, the contruction is only made on the master, 
           the result is then bcasted to all the nodes.
           Otherwise, it is done on all nodes (not recommended to read files).
     """
     if MPI.IS_MASTER_NODE() or not BuildMasterOnly:
         if type(S) == type(''):
             # detect the type of the file
             try:
                 extension = S.split('.')[-1].lower()
             except IndexError:
                 raise ValueError, "I am lost : I can not determine the extension of the file !"
             if extension in ['py', 'txt']: execfile(S, {}, self)
             else: raise ValueError, "Extension of the file not recognized"
         else:  # S is therefore a dict
             try:
                 self.update(S)
             except:
                 print "Error in Parameter constructor. Is the source an iterable ?"
                 raise
     # end of master only
     if BuildMasterOnly: MPI.bcast(self)  # bcast it on the nodes
Пример #5
0
        def HT(Res) : 
            # First compute the eps_hat array
            eps_hat = Epsilon_Hat(self.dos.eps) if Epsilon_Hat else numpy.array( [ x* numpy.identity (Sigma.N1) for x in self.dos.eps] )
            assert eps_hat.shape[0] == self.dos.eps.shape[0],"Epsilon_Hat function behaves incorrectly"
            assert eps_hat.shape[1] == eps_hat.shape[2],"Epsilon_Hat function behaves incorrectly (result not a square matrix)"
            assert Sigma.N1 == eps_hat.shape[1], "Size of Sigma and of epsilon_hat mismatch"

            Res.zero()
            Sigma_fnt = callable(Sigma)
            if Sigma_fnt : assert len(inspect.getargspec(Sigma)[0]) ==1, "Sigma function is not of the correct type. See Documentation"

            # Perform the sum over eps[i]
            tmp,tmp2 = Res.copy(),Res.copy()
            tmp <<= GF_Initializers.A_Omega_Plus_B(1,mu + eta * 1j)
            if not(Sigma_fnt) :
                tmp -= Sigma
            if Field != None : tmp -= Field
            
            # I slice all the arrays on the node. Cf reduce operation below. 
            for d,e_h,e in  itertools.izip (*[MPI.slice_array(A) for A in [self.rho_for_sum,eps_hat,self.dos.eps]]):
                tmp2.copyFrom(tmp)
                tmp2 -= e_h
                if Sigma_fnt : tmp2 -= Sigma(e)
                tmp2.invert()
                tmp2 *= d
                Res += tmp2
            # sum the Res GF of all nodes and returns the results on all nodes...
            # Cf Boost.mpi.python, collective communicator for documentation.
            # The point is that Res is pickable, hence can be transmitted between nodes without further code...
            Res <<= MPI.all_reduce(MPI.world,Res,lambda x,y : x+y)
            MPI.barrier()
Пример #6
0
 def __should_continue(self,N_Iter_SelfCons_Max) :
   """ stop test"""
   should_continue = True
   if MPI.IS_MASTER_NODE():
     if (self.Iteration_Number > N_Iter_SelfCons_Max):
       should_continue = False
   should_continue = MPI.bcast(should_continue)
   return should_continue
Пример #7
0
    def simplepointdensmat(self):


        ntoi = self.names_to_ind[self.SO]
        bln = self.blocnames[self.SO]

        MMat = [numpy.zeros( [self.N_Orbitals[0][ntoi[bl]],self.N_Orbitals[0][ntoi[bl]]], numpy.complex_) for bl in bln] 

        densmat = [ {} for icrsh in xrange(self.N_corr_shells)]
        for icrsh in xrange(self.N_corr_shells):
            for bl in self.blocnames[self.corr_shells[icrsh][4]]:
                densmat[icrsh][bl] = numpy.zeros([self.corr_shells[icrsh][3],self.corr_shells[icrsh][3]], numpy.complex_)

        ikarray=numpy.array(range(self.Nk))
          
        for ik in MPI.slice_array(ikarray):
            
            unchangedsize = all( [ self.N_Orbitals[ik][ntoi[bln[ib]]]==len(MMat[ib]) 
                                   for ib in range(self.NspinblocsGF[self.SO]) ] )
               
            if (not unchangedsize):
                MMat = [numpy.zeros( [self.N_Orbitals[ik][ntoi[bl]],self.N_Orbitals[ik][ntoi[bl]]], numpy.complex_) for bl in bln] 

            for ibl,bl in enumerate(bln):
                ind = ntoi[bl]
                for inu in range(self.N_Orbitals[ik][ind]):
                    if ( (self.Hopping[ik][ind][inu,inu]-self.hfield*(1-2*ibl)) < 0.0): 
                        MMat[ibl][inu,inu] = 1.0
                    else:
                        MMat[ibl][inu,inu] = 0.0 


            for icrsh in range(self.N_corr_shells):
                for ibn,bn in enumerate(self.blocnames[self.corr_shells[icrsh][4]]):
                    isp = self.names_to_ind[self.corr_shells[icrsh][4]][bn]
                    #print ik, bn, isp
                    densmat[icrsh][bn] += self.BZ_weights[ik] * numpy.dot( numpy.dot(self.Proj_Mat[ik][isp][icrsh],MMat[ibn]) , 
                                                                           self.Proj_Mat[ik][isp][icrsh].transpose().conjugate() )

        # get data from nodes:
        for icrsh in range(self.N_corr_shells):
            for sig in densmat[icrsh]:
                densmat[icrsh][sig] = MPI.all_reduce(MPI.world,densmat[icrsh][sig],lambda x,y : x+y)
        MPI.barrier()

                    
        if (self.symm_op!=0): densmat = self.Symm_corr.symmetrise(densmat)

        # Rotate to local coordinate system:
        if (self.use_rotations):
            for icrsh in xrange(self.N_corr_shells):
                for bn in densmat[icrsh]:
                    if (self.rotmat_timeinv[icrsh]==1): densmat[icrsh][bn] = densmat[icrsh][bn].conjugate()
                    densmat[icrsh][bn] = numpy.dot( numpy.dot(self.rotmat[icrsh].conjugate().transpose(),densmat[icrsh][bn]) , 
                                                    self.rotmat[icrsh])
                

        return densmat
Пример #8
0
    def fitTails(self):
        """Fits the tails using the constant value for the Re Sigma calculated from F=Sigma*G.
           Works only for blocks of size one."""

        #if (len(self.GFStruct)==2*self.Norb):
        if (self.blocssizeone):
            spinblocs = [v for v in self.map]
            MPI.report("Fitting tails manually")

            known_coeff = numpy.zeros([1, 1, 2], numpy.float_)
            msh = [x.imag for x in self.G[self.map[spinblocs[0]][0]].mesh]
            fit_start = msh[self.Fitting_Frequency_Start]
            fit_stop = msh[self.N_Frequencies_Accumulated]

            # Fit the tail of G just to get the density
            for n, g in self.G:
                g.fitTail([[[0, 0, 1]]], 7, fit_start, 2 * fit_stop)
            densmat = self.G.density()

            for sig1 in spinblocs:
                for i in range(self.Norb):

                    coeff = 0.0

                    for sig2 in spinblocs:
                        for j in range(self.Norb):
                            if (sig1 == sig2):
                                coeff += self.U[self.offset + i, self.offset +
                                                j] * densmat[self.map[sig1]
                                                             [j]][0, 0].real
                            else:
                                coeff += self.Up[self.offset + i, self.offset +
                                                 j] * densmat[self.map[sig2]
                                                              [j]][0, 0].real

                    known_coeff[0, 0, 1] = coeff
                    self.Sigma[self.map[sig1][i]].fitTail(
                        fixed_coef=known_coeff,
                        order_max=3,
                        fit_start=fit_start,
                        fit_stop=fit_stop)

        else:

            for n, sig in self.Sigma:

                known_coeff = numpy.zeros([sig.N1, sig.N2, 1], numpy.float_)
                msh = [x.imag for x in sig.mesh]
                fit_start = msh[self.Fitting_Frequency_Start]
                fit_stop = msh[self.N_Frequencies_Accumulated]

                sig.fitTail(fixed_coef=known_coeff,
                            order_max=3,
                            fit_start=fit_start,
                            fit_stop=fit_stop)
Пример #9
0
    def extract_Gloc(self, mu=None, withSigma = True):
        """ 
        extracts the local downfolded Green function at the chemical potential of the class.
        At the end, the local G is rotated from the gloabl coordinate system to the local system.
        if withSigma = False: Sigma is not included => non-interacting local GF
        """

        if (mu is None): mu = self.Chemical_Potential
            
        Gloc = [ self.Sigmaimp[icrsh].copy() for icrsh in xrange(self.N_corr_shells) ]   # this list will be returned  
        for icrsh in xrange(self.N_corr_shells): Gloc[icrsh].zero()                # initialize to zero

        ikarray=numpy.array(range(self.Nk))
        
        for ik in MPI.slice_array(ikarray):
            
            S = self.latticeGF_Matsubara(ik=ik,mu=mu,withSigma = withSigma) 
            S *= self.BZ_weights[ik]

                
            for icrsh in xrange(self.N_corr_shells):
                tmp = Gloc[icrsh].copy()                  # init temporary storage
                for sig,gf in tmp: tmp[sig] <<= self.downfold(ik,icrsh,sig,S[sig],gf)
                Gloc[icrsh] += tmp

        #collect data from MPI:
        for icrsh in xrange(self.N_corr_shells):
            Gloc[icrsh] <<= MPI.all_reduce(MPI.world,Gloc[icrsh],lambda x,y : x+y)
        MPI.barrier()

  
        # Gloc[:] is now the sum over k projected to the local orbitals.
        # here comes the symmetrisation, if needed:   
        if (self.symm_op!=0): Gloc = self.Symm_corr.symmetrise(Gloc)
        
        # Gloc is rotated to the local coordinate system:
        if (self.use_rotations):
            for icrsh in xrange(self.N_corr_shells):
                for sig,gf in Gloc[icrsh]: Gloc[icrsh][sig] <<= self.rotloc(icrsh,gf,direction='toLocal')

        # transform to CTQMC blocks:
        Glocret = [ GF( Name_Block_Generator = [ (a,GFBloc_ImFreq(Indices = al, Mesh = Gloc[0].mesh)) for a,al in self.GFStruct_Solver[i] ],
                        Copy = False) for i in xrange(self.N_inequiv_corr_shells)  ]
        for ish in xrange(self.N_inequiv_corr_shells):
            for ibl in range(len(self.GFStruct_Solver[ish])):
                for i in range(len(self.GFStruct_Solver[ish][ibl][1])):
                    for j in range(len(self.GFStruct_Solver[ish][ibl][1])):
                        bl   = self.GFStruct_Solver[ish][ibl][0]
                        ind1 = self.GFStruct_Solver[ish][ibl][1][i]
                        ind2 = self.GFStruct_Solver[ish][ibl][1][j]
                        Glocret[ish][bl][ind1,ind2] <<= Gloc[self.invshellmap[ish]][self.mapinv[ish][bl]][ind1,ind2]


        # return only the inequivalent shells:
        return Glocret
Пример #10
0
    def density_gf(self, Beta=40):
        """Calculates the density without setting up Gloc. It is useful for Hubbard I, and very fast."""

        densmat = [{} for icrsh in xrange(self.N_corr_shells)]
        for icrsh in xrange(self.N_corr_shells):
            for bl in self.blocnames[self.corr_shells[icrsh][4]]:
                densmat[icrsh][bl] = numpy.zeros(
                    [self.corr_shells[icrsh][3], self.corr_shells[icrsh][3]], numpy.complex_
                )

        ikarray = numpy.array(range(self.Nk))

        for ik in MPI.slice_array(ikarray):

            Gupf = self.latticeGF_Matsubara(ik=ik, mu=self.Chemical_Potential)
            Gupf *= self.BZ_weights[ik]
            dm = Gupf.density()
            MMat = [dm[bl] for bl in self.blocnames[self.SO]]

            for icrsh in range(self.N_corr_shells):
                for ibn, bn in enumerate(self.blocnames[self.corr_shells[icrsh][4]]):
                    isp = self.names_to_ind[self.corr_shells[icrsh][4]][bn]
                    # print ik, bn, isp
                    densmat[icrsh][bn] += numpy.dot(
                        numpy.dot(self.Proj_Mat[ik][isp][icrsh], MMat[ibn]),
                        self.Proj_Mat[ik][isp][icrsh].transpose().conjugate(),
                    )

        # get data from nodes:
        for icrsh in range(self.N_corr_shells):
            for sig in densmat[icrsh]:
                densmat[icrsh][sig] = MPI.all_reduce(MPI.world, densmat[icrsh][sig], lambda x, y: x + y)
        MPI.barrier()

        if self.symm_op != 0:
            densmat = self.Symm_corr.symmetrise(densmat)

        # Rotate to local coordinate system:
        if self.use_rotations:
            for icrsh in xrange(self.N_corr_shells):
                for bn in densmat[icrsh]:
                    if self.rotmat_timeinv[icrsh] == 1:
                        densmat[icrsh][bn] = densmat[icrsh][bn].conjugate()
                    densmat[icrsh][bn] = numpy.dot(
                        numpy.dot(self.rotmat[icrsh].conjugate().transpose(), densmat[icrsh][bn]), self.rotmat[icrsh]
                    )

        return densmat
Пример #11
0
    def __repack(self):
        """Calls the h5repack routine, in order to reduce the file size of the hdf5 archive.
           Should only be used BEFORE the first invokation of HDF_Archive in the program, otherwise
           the hdf5 linking is broken!!!"""

        import subprocess

        if not (MPI.IS_MASTER_NODE()): return

        MPI.report("Repacking the file %s"%self.HDFfile)

        retcode = subprocess.call(["h5repack","-i%s"%self.HDFfile, "-otemphgfrt.h5"])
        if (retcode!=0):
            MPI.report("h5repack failed!")
        else:
            subprocess.call(["mv","-f","temphgfrt.h5","%s"%self.HDFfile])
Пример #12
0
    def read_input_from_HDF(self, SubGrp, thingstoread, optionalthings=[]):
        """
        Reads data from the HDF file
        """

        retval = True
        # init variables on all nodes:
        for it in thingstoread:
            exec "self.%s = 0" % it
        for it in optionalthings:
            exec "self.%s = 0" % it

        if MPI.IS_MASTER_NODE():
            ar = HDF_Archive(self.HDFfile, "a")
            if SubGrp in ar:
                # first read the necessary things:
                for it in thingstoread:
                    if it in ar[SubGrp]:
                        exec "self.%s = ar['%s']['%s']" % (it, SubGrp, it)
                    else:
                        MPI.report("Loading %s failed!" % it)
                        retval = False

                if (retval) and (len(optionalthings) > 0):
                    # if necessary things worked, now read optional things:
                    retval = {}
                    for it in optionalthings:
                        if it in ar[SubGrp]:
                            exec "self.%s = ar['%s']['%s']" % (it, SubGrp, it)
                            retval["%s" % it] = True
                        else:
                            retval["%s" % it] = False
            else:
                MPI.report("Loading failed: No %s subgroup in HDF5!" % SubGrp)
                retval = False

            del ar

        # now do the broadcasting:
        for it in thingstoread:
            exec "self.%s = MPI.bcast(self.%s)" % (it, it)
        for it in optionalthings:
            exec "self.%s = MPI.bcast(self.%s)" % (it, it)

        retval = MPI.bcast(retval)

        return retval
Пример #13
0
    def __repack(self):
        """Calls the h5repack routine, in order to reduce the file size of the hdf5 archive.
           Should only be used BEFORE the first invokation of HDF_Archive in the program, otherwise
           the hdf5 linking is broken!!!"""

        import subprocess

        if not (MPI.IS_MASTER_NODE()): return

        MPI.report("Repacking the file %s" % self.HDFfile)

        retcode = subprocess.call(
            ["h5repack", "-i%s" % self.HDFfile, "-otemphgfrt.h5"])
        if (retcode != 0):
            MPI.report("h5repack failed!")
        else:
            subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % self.HDFfile])
Пример #14
0
   def Self_Consistency(self) :
      S.Transform_SymmetryBasis_toRealSpace (IN= S.Sigma, OUT = Sigma) # Embedding     
      
      # Computes sum over BZ and returns density
      F = lambda mu : SK(mu = mu,Sigma = Sigma, Field = None ,Res = G).total_density()/4 
      
      if Density_Required :
         self.Chemical_potential = Dichotomy.Dichotomy(Function = F,
                                                       xinit = self.Chemical_potential, yvalue =Density_Required,
                                                       Precision_on_y = 0.01, Delta_x=0.5,  MaxNbreLoop = 100, 
                                                       xname="Chemical_Potential", yname= "Total Density",
                                                       verbosity = 3)[0]
      else:
         MPI.report("Total density  = %.3f"%F(self.Chemical_potential))

      S.Transform_RealSpace_to_SymmetryBasis (IN = G, OUT = S.G)       # Extraction 
      S.G0 = inverse(S.Sigma + inverse(S.G))                           # Finally get S.G0 
Пример #15
0
    def fitTails(self): 
	"""Fits the tails using the constant value for the Re Sigma calculated from F=Sigma*G.
           Works only for blocks of size one."""
	
	#if (len(self.GFStruct)==2*self.Norb):
        if (self.blocssizeone):
            spinblocs = [v for v in self.map]
            MPI.report("Fitting tails manually")
	
            known_coeff = numpy.zeros([1,1,2],numpy.float_)
            msh = [x.imag for x in self.G[self.map[spinblocs[0]][0]].mesh ]
            fit_start = msh[self.Fitting_Frequency_Start]
            fit_stop = msh[self.N_Frequencies_Accumulated]	
            
            # Fit the tail of G just to get the density
            for n,g in self.G:
                g.fitTail([[[0,0,1]]],7,fit_start,2*fit_stop) 
            densmat = self.G.density()

            for sig1 in spinblocs:
                for i in range(self.Norb):

                    coeff = 0.0

                    for sig2 in spinblocs:
                        for j in range(self.Norb):
                            if (sig1==sig2):
                                coeff += self.U[self.offset+i,self.offset+j] * densmat[self.map[sig1][j]][0,0].real
                            else:
                                coeff += self.Up[self.offset+i,self.offset+j] * densmat[self.map[sig2][j]][0,0].real

                    known_coeff[0,0,1] = coeff
                    self.Sigma[self.map[sig1][i]].fitTail(fixed_coef = known_coeff, order_max = 3, fit_start = fit_start, fit_stop = fit_stop)

        else:

            for n,sig in self.Sigma:

                known_coeff = numpy.zeros([sig.N1,sig.N2,1],numpy.float_)
                msh = [x.imag for x in sig.mesh]
                fit_start = msh[self.Fitting_Frequency_Start]
                fit_stop  = msh[self.N_Frequencies_Accumulated]
            
                sig.fitTail(fixed_coef = known_coeff, order_max = 3, fit_start = fit_start, fit_stop = fit_stop)
Пример #16
0
        def HT(Res):
            # First compute the eps_hat array
            eps_hat = Epsilon_Hat(
                self.dos.eps) if Epsilon_Hat else numpy.array(
                    [x * numpy.identity(Sigma.N1) for x in self.dos.eps])
            assert eps_hat.shape[0] == self.dos.eps.shape[
                0], "Epsilon_Hat function behaves incorrectly"
            assert eps_hat.shape[1] == eps_hat.shape[
                2], "Epsilon_Hat function behaves incorrectly (result not a square matrix)"
            assert Sigma.N1 == eps_hat.shape[
                1], "Size of Sigma and of epsilon_hat mismatch"

            Res.zero()
            Sigma_fnt = callable(Sigma)
            if Sigma_fnt:
                assert len(
                    inspect.getargspec(Sigma)[0]
                ) == 1, "Sigma function is not of the correct type. See Documentation"

            # Perform the sum over eps[i]
            tmp, tmp2 = Res.copy(), Res.copy()
            tmp <<= GF_Initializers.A_Omega_Plus_B(1, mu + eta * 1j)
            if not (Sigma_fnt):
                tmp -= Sigma
            if Field != None: tmp -= Field

            # I slice all the arrays on the node. Cf reduce operation below.
            for d, e_h, e in itertools.izip(*[
                    MPI.slice_array(A)
                    for A in [self.rho_for_sum, eps_hat, self.dos.eps]
            ]):
                tmp2.copyFrom(tmp)
                tmp2 -= e_h
                if Sigma_fnt: tmp2 -= Sigma(e)
                tmp2.invert()
                tmp2 *= d
                Res += tmp2
            # sum the Res GF of all nodes and returns the results on all nodes...
            # Cf Boost.mpi.python, collective communicator for documentation.
            # The point is that Res is pickable, hence can be transmitted between nodes without further code...
            Res <<= MPI.all_reduce(MPI.world, Res, lambda x, y: x + y)
            MPI.barrier()
Пример #17
0
    def total_density(self, mu):
        """
        Calculates the total charge for the energy window for a given mu. Since in general N_Orbitals depends on k, 
        the calculation is done in the following order:
        G_aa'(k,iw) -> n(k) = Tr G_aa'(k,iw) -> sum_k n_k 
        
        mu: chemical potential
        
        The calculation is done in the global coordinate system, if distinction is made between local/global!
        """

        dens = 0.0
        ikarray = numpy.array(range(self.Nk))

        for ik in MPI.slice_array(ikarray):

            S = self.latticeGF_Matsubara(ik=ik, mu=mu)
            dens += self.BZ_weights[ik] * S.total_density()

        # collect data from MPI:
        dens = MPI.all_reduce(MPI.world, dens, lambda x, y: x + y)
        MPI.barrier()

        return dens
Пример #18
0
    def Self_Consistency(self):
        S.Transform_SymmetryBasis_toRealSpace(IN=S.Sigma,
                                              OUT=Sigma)  # Embedding

        # Computes sum over BZ and returns density
        F = lambda mu: SK(mu=mu, Sigma=Sigma, Field=None, Res=G).total_density(
        ) / 4

        if Density_Required:
            self.Chemical_potential = Dichotomy.Dichotomy(
                Function=F,
                xinit=self.Chemical_potential,
                yvalue=Density_Required,
                Precision_on_y=0.01,
                Delta_x=0.5,
                MaxNbreLoop=100,
                xname="Chemical_Potential",
                yname="Total Density",
                verbosity=3)[0]
        else:
            MPI.report("Total density  = %.3f" % F(self.Chemical_potential))

        S.Transform_RealSpace_to_SymmetryBasis(IN=G, OUT=S.G)  # Extraction
        S.G0 = inverse(S.Sigma + inverse(S.G))  # Finally get S.G0
Пример #19
0
    def __init__(self, HDFfile, subgroup=None):
        """Initialises the class.
           Reads the permutations and rotation matrizes from the file, and constructs the mapping for
           the given orbitals. For each orbit a matrix is read!!!
           SO: Flag for SO coupled calculations.
           SP: Spin polarisation yes/no
           """

        assert type(HDFfile) == StringType, "HDFfile must be a filename"
        self.HDFfile = HDFfile
        thingstoread = [
            'Ns', 'Natoms', 'perm', 'orbits', 'SO', 'SP', 'timeinv', 'mat',
            'mat_tinv'
        ]
        for it in thingstoread:
            exec "self.%s = 0" % it

        if (MPI.IS_MASTER_NODE()):
            #Read the stuff on master:
            ar = HDF_Archive(HDFfile, 'a')
            if (subgroup is None):
                ar2 = ar
            else:
                ar2 = ar[subgroup]

            for it in thingstoread:
                exec "self.%s = ar2['%s']" % (it, it)
            del ar2
            del ar

        #broadcasting
        for it in thingstoread:
            exec "self.%s = MPI.bcast(self.%s)" % (it, it)

        # now define the mapping of orbitals:
        # self.map[iorb]=jorb gives the permutation of the orbitals as given in the list, when the
        # permutation of the atoms is done:
        self.N_orbits = len(self.orbits)

        self.map = [[0 for iorb in range(self.N_orbits)]
                    for iNs in range(self.Ns)]
        for iNs in range(self.Ns):
            for iorb in range(self.N_orbits):

                srch = copy.deepcopy(self.orbits[iorb])
                srch[0] = self.perm[iNs][self.orbits[iorb][0] - 1]
                self.map[iNs][iorb] = self.orbits.index(srch)
Пример #20
0
  def run(self,N_Loops, Mixing_Coefficient = 0.5, MaxTime = 0 ):
    r"""
      Run the DMFT Loop with the following algorithm :: 
       
        while STOP_CONDITION : 
            self.Self_Consistency()
            for solver in Solver_List : S.solve()
            self.PostSolver() # defaults : does nothing

      where STOP_CONDITION is determined by the number of iterations.
        
      :param N_Loops:    Maximum number of iteration of the loop
      :param Mixing_Coefficient: 
      :param MaxTime: Maximum time of the loop.
    """

    # Set up the signal
    #   MPI.report("DMFTlab Job PID = %s"%os.getpid())
    # Set the signal handler and a 5-second alarm
    signal.signal(signal.SIGALRM, self.handler)
    signal.alarm(MaxTime)
 
    should_continue = True
    
    while (should_continue):
      MPI.report("------ Node : %d -------- Iteration Number = %d"%(MPI.rank,self.Iteration_Number))
      
      self.Self_Consistency()

      # call all solvers
      for n,sol in enumerate(self.SolverList) :
        if hasattr(self,"Chemical_potential") : sol.Chemical_potential=self.Chemical_potential
        sol.Iteration_Number=self.Iteration_Number
        sol.Solve()
        sol.Sigma  = sol.Sigma * Mixing_Coefficient + sol.Sigma_Old * (1-Mixing_Coefficient)
      
      # post-solver processing
      self.PostSolver()
                         
      self.Iteration_Number +=1
      should_continue = self.__should_continue(N_Loops)
 
    # end of the while loop
    MPI.report("----------- END of DMFT_Loop ----------------")
    MPI.barrier()
Пример #21
0
    def DOSpartial(self, broadening=0.01):
        """calculates the orbitally-resolved DOS"""

        assert hasattr(self, "Sigmaimp"), "Set Sigma First!!"

        #thingstoread = ['Dens_Mat_below','N_parproj','Proj_Mat_pc','rotmat_all']
        #retval = self.read_input_from_HDF(SubGrp=self.ParProjdata, thingstoread=thingstoread)
        retval = self.read_ParProj_input_from_HDF()
        if not retval: return retval
        if self.symm_op:
            self.Symm_par = Symmetry(self.HDFfile, subgroup=self.Symmpardata)

        mu = self.Chemical_Potential

        GFStruct_proj = [[(al, range(self.shells[i][3]))
                          for al in self.blocnames[self.SO]]
                         for i in xrange(self.N_shells)]
        Gproj = [
            GF(Name_Block_Generator=[
                (a, GFBloc_ReFreq(Indices=al, Mesh=self.Sigmaimp[0].mesh))
                for a, al in GFStruct_proj[ish]
            ],
               Copy=False) for ish in xrange(self.N_shells)
        ]
        for ish in range(self.N_shells):
            Gproj[ish].zero()

        Msh = [x for x in self.Sigmaimp[0].mesh]
        N_om = len(Msh)

        DOS = {}
        for bn in self.blocnames[self.SO]:
            DOS[bn] = numpy.zeros([N_om], numpy.float_)

        DOSproj = [{} for ish in range(self.N_shells)]
        DOSproj_orb = [{} for ish in range(self.N_shells)]
        for ish in range(self.N_shells):
            for bn in self.blocnames[self.SO]:
                dl = self.shells[ish][3]
                DOSproj[ish][bn] = numpy.zeros([N_om], numpy.float_)
                DOSproj_orb[ish][bn] = numpy.zeros([dl, dl, N_om],
                                                   numpy.float_)

        ikarray = numpy.array(range(self.Nk))

        for ik in MPI.slice_array(ikarray):

            S = self.latticeGF_realfreq(ik=ik, mu=mu, broadening=broadening)
            S *= self.BZ_weights[ik]

            # non-projected DOS
            for iom in range(N_om):
                for sig, gf in S:
                    DOS[sig][iom] += gf._data.array[:, :, iom].imag.trace() / (
                        -3.1415926535)

            #projected DOS:
            for ish in xrange(self.N_shells):
                tmp = Gproj[ish].copy()
                for ir in xrange(self.N_parproj[ish]):
                    for sig, gf in tmp:
                        tmp[sig] <<= self.downfold_pc(ik, ir, ish, sig, S[sig],
                                                      gf)
                    Gproj[ish] += tmp

        # collect data from MPI:
        for sig in DOS:
            DOS[sig] = MPI.all_reduce(MPI.world, DOS[sig], lambda x, y: x + y)
        for ish in xrange(self.N_shells):
            Gproj[ish] <<= MPI.all_reduce(MPI.world, Gproj[ish],
                                          lambda x, y: x + y)
        MPI.barrier()

        if (self.symm_op != 0): Gproj = self.Symm_par.symmetrise(Gproj)

        # rotation to local coord. system:
        if (self.use_rotations):
            for ish in xrange(self.N_shells):
                for sig, gf in Gproj[ish]:
                    Gproj[ish][sig] <<= self.rotloc_all(ish,
                                                        gf,
                                                        direction='toLocal')

        for ish in range(self.N_shells):
            for sig, gf in Gproj[ish]:
                for iom in range(N_om):
                    DOSproj[ish][sig][
                        iom] += gf._data.array[:, :, iom].imag.trace() / (
                            -3.1415926535)
                DOSproj_orb[ish][
                    sig][:, :, :] += gf._data.array[:, :, :].imag / (
                        -3.1415926535)

        if (MPI.IS_MASTER_NODE()):
            # output to files
            for bn in self.blocnames[self.SO]:
                f = open('./DOScorr%s.dat' % bn, 'w')
                for i in range(N_om):
                    f.write("%s    %s\n" % (Msh[i], DOS[bn][i]))
                f.close()

                # partial
                for ish in range(self.N_shells):
                    f = open('DOScorr%s_proj%s.dat' % (bn, ish), 'w')
                    for i in range(N_om):
                        f.write("%s    %s\n" % (Msh[i], DOSproj[ish][bn][i]))
                    f.close()

                    for i in range(self.shells[ish][3]):
                        for j in range(i, self.shells[ish][3]):
                            Fname = './DOScorr' + bn + '_proj' + str(
                                ish) + '_' + str(i) + '_' + str(j) + '.dat'
                            f = open(Fname, 'w')
                            for iom in range(N_om):
                                f.write("%s    %s\n" %
                                        (Msh[iom], DOSproj_orb[ish][bn][i, j,
                                                                        iom]))
                            f.close()
Пример #22
0
    def __call__(self,
                 Sigma,
                 mu=0,
                 eta=0,
                 Field=None,
                 Epsilon_Hat=None,
                 Res=None,
                 SelectedBlocks=()):
        """ 
	- Computes :
	   Res <- \[ \sum_k (\omega + \mu - Field - t(k) - Sigma(k,\omega)) \]
           if Res is None, it returns a new GF with the results.
           otherwise, Res must be a GF, in which the calculation is done, and which is then returned.
           (this allows chain calculation : SK(mu = mu,Sigma = Sigma, Res = G).total_density()
           which computes the sumK into G,  and returns the density of G.
  
        - Sigma can be a X, or a function k-> X or a function k,eps ->X where  : 
	    - k is expected to be a 1d-numpy array of size self.dim of float, 
	      containing the k vector in the basis of the RBZ  (i.e.  -0.5< k_i <0.5)
            - eps is t(k)
	    - X is anything such that X[BlockName] can be added/subtracted to a GFBloc for BlockName in SelectedBlocks.
	      e.g. X can be a GF (with at least the SelectedBlocks), or a dictionnary BlockName -> array
	      if the array has the same dimension as the GF blocks (for example to add a static Sigma).

        - Field : Any k independant object to be added to the GF 

        - Epsilon_Hat : a function of eps_k returning a matrix, the dimensions of Sigma

        - SelectedBlocks : The calculation is done with the SAME t(k) for all blocks. If this list is not None
	  only the blocks in this list are calculated.
	  e.g. G and Sigma have block indices 'up' and 'down'. 
	       if SelectedBlocks ==None : 'up' and 'down' are calculated
	       if SelectedBlocks == ['up'] : only 'up' is calculated. 'down' is 0.


        """
        S = Sigma.View_SelectedBlocks(
            SelectedBlocks) if SelectedBlocks else Sigma
        Gres = Res if Res else Sigma.copy()
        G = Gres.View_SelectedBlocks(
            SelectedBlocks) if SelectedBlocks else Gres

        # check input
        assert self.Orthogonal_Basis, "Local_G : must be orthogonal. non ortho cases not checked."
        assert isinstance(G, GF), "G must be a GF"
        assert len(list(set([g.N1 for i, g in G]))) == 1
        assert self.BZ_weights.shape[0] == self.N_kpts(), "Internal Error"
        no = list(set([g.N1 for i, g in G]))[0]
        Sigma_Nargs = len(
            inspect.getargspec(Sigma)[0]) if callable(Sigma) else 0
        assert Sigma_Nargs <= 2, "Sigma function is not of the correct type. See Documentation"

        # Initialize
        G.zero()
        tmp, tmp2 = G.copy(), G.copy()
        mupat = mu * numpy.identity(no, numpy.complex_)
        tmp <<= iOmega_n
        if Field != None: tmp -= Field
        if Sigma_Nargs == 0: tmp -= Sigma  # substract Sigma once for all

        # Loop on k points...
        for w, k, eps_k in izip(*[
                MPI.slice_array(A)
                for A in [self.BZ_weights, self.BZ_Points, self.Hopping]
        ]):

            eps_hat = Epsilon_Hat(eps_k) if Epsilon_Hat else eps_k
            tmp2 <<= tmp
            tmp2 -= tmp2.NBlocks * [eps_hat - mupat]

            if Sigma_Nargs == 1: tmp2 -= Sigma(k)
            elif Sigma_Nargs == 2: tmp2 -= Sigma(k, eps_k)

            tmp2.invert()
            tmp2 *= w
            G += tmp2

        G <<= MPI.all_reduce(MPI.world, G, lambda x, y: x + y)
        MPI.barrier()

        return Gres
Пример #23
0
    def __call__ (self, Sigma, mu=0, eta = 0, Field = None, Res = None, SelectedBlocks = () ):
	""" 
	- Computes :
	   Res <- \[ \sum_k (\omega + \mu - Field - t(k) - Sigma(k,\omega)) \]
           if Res is None, it returns a new GF with the results.
           otherwise, Res must be a GF, in which the calculation is done, and which is then returned.
           (this allows chain calculation : SK(mu = mu,Sigma = Sigma, Res = G).total_density()
           which computes the sumK into G,  and returns the density of G.
  
        - Sigma can be a X, or a function k-> X or a function k,eps ->X where  : 
	    - k is expected to be a 1d-numpy array of size self.dim of float, 
	      containing the k vector in the basis of the RBZ  (i.e.  -0.5< k_i <0.5)
            - eps is t(k)
	    - X is anything such that X[BlockName] can be added/subtracted to a GFBloc for BlockName in SelectedBlocks.
	      e.g. X can be a GF (with at least the SelectedBlocks), or a dictionnary BlockName -> array
	      if the array has the same dimension as the GF blocks (for example to add a static Sigma).

        - Field : Any k independant  Array_with_GF_Indices to be added to the GF 

        - SelectedBlocks : The calculation is done with the SAME t(k) for all blocks. If this list is not None
	  only the blocks in this list are calculated.
	  e.g. G and Sigma have block indices 'up' and 'down'. 
	       if SelectedBlocks ==None : 'up' and 'down' are calculated
	       if SelectedBlocks == ['up'] : only 'up' is calculated. 'down' is 0.

         """
        if Field : assert isinstance(Field,Array_with_GF_Indices) , " Field must be a  Array_with_GF_Indices object. Cf Example"
        S = Sigma.View_SelectedBlocks(SelectedBlocks) if SelectedBlocks else Sigma
        Gres = Res if Res else Sigma.copy() 
        G = Gres.View_SelectedBlocks(SelectedBlocks) if SelectedBlocks else Gres

        # check input
        assert self.Orthogonal_Basis, "Local_G : must be orthogonal. non ortho cases not checked."
        assert isinstance(G,GF), "G must be a GF"
        assert list(set([ g.N1 for i,g in G])) == [self.Hopping.shape[1]],"G size and hopping size mismatch"
        assert self.BZ_weights.shape[0] == self.N_kpts(), "Internal Error"
        Sigma_Nargs = len(inspect.getargspec(Sigma)[0]) if callable (Sigma) else 0
        assert Sigma_Nargs <=2 , "Sigma function is not of the correct type. See Documentation"

        #init
        G.zero()
        #tmp,tmp2 = GF(G),GF(G)
        tmp,tmp2 = G.copy(),G.copy()
        mupat = mu * self.Mu_Pattern 
        tmp <<= GF_Initializers.A_Omega_Plus_B(A=1,B=0)
        #tmp.Set_Omega()
        ##tmp += tmp.Nblocks() * [ mupat ]
        if Field : tmp -= Field 
        if Sigma_Nargs==0: tmp -= Sigma  # substract Sigma once for all

        # Loop on k points...
        for w, k, eps_k in izip(*[MPI.slice_array(A) for A in [self.BZ_weights, self.BZ_Points, self.Hopping]]):
            tmp2 <<= tmp
            #tmp2.copy_from(tmp)
            tmp2 -= tmp2.NBlocks * [eps_k -mupat ]
            #tmp2.save("tmp2_w")
            #Sigma.save("S_w")

            if Sigma_Nargs == 1: tmp2 -= Sigma (k)
            elif Sigma_Nargs ==2: tmp2 -= Sigma (k,eps_k)
            tmp2.invert()
            tmp2 *= w
            G += tmp2
            #G.save("GG1")
            #print mu,mupat,eps_k
            #assert 0
            #print G['up'][1,1]._data
        G <<= MPI.all_reduce(MPI.world,G,lambda x,y : x+y)
        MPI.barrier()

        return Res
Пример #24
0
    def convert_DMFT_input(self):
        """
        Reads the input files, and stores the data in the HDFfile
        """
        
                   
        if not (MPI.IS_MASTER_NODE()): return # do it only on master:
        MPI.report("Reading input from %s..."%self.LDA_file)

        # Read and write only on Master!!!
        # R is a generator : each R.Next() will return the next number in the file
        R = Read_Fortran_File(self.LDA_file)
        try:
            EnergyUnit = R.next()                         # read the energy convertion factor
            Nk = int(R.next())                            # read the number of k points
            k_dep_projection = 1                          
            SP = int(R.next())                            # flag for spin-polarised calculation
            SO = int(R.next())                            # flag for spin-orbit calculation
            charge_below = R.next()                       # total charge below energy window
            Density_Required = R.next()                   # total density required, for setting the chemical potential
            symm_op = 1                                   # Use symmetry groups for the k-sum

            # the information on the non-correlated shells is not important here, maybe skip:
            N_shells = int(R.next())                      # number of shells (e.g. Fe d, As p, O p) in the unit cell, 
                                                               # corresponds to index R in formulas
            # now read the information about the shells:
            shells = [ [ int(R.next()) for i in range(4) ] for icrsh in range(N_shells) ]    # reads iatom, sort, l, dim

            N_corr_shells = int(R.next())                 # number of corr. shells (e.g. Fe d, Ce f) in the unit cell, 
                                                          # corresponds to index R in formulas
            # now read the information about the shells:
            corr_shells = [ [ int(R.next()) for i in range(6) ] for icrsh in range(N_corr_shells) ]    # reads iatom, sort, l, dim, SO flag, irep

            self.inequiv_shells(corr_shells)              # determine the number of inequivalent correlated shells, has to be known for further reading...


            use_rotations = 1
            rotmat = [numpy.identity(corr_shells[icrsh][3],numpy.complex_) for icrsh in xrange(N_corr_shells)]
           
            # read the matrices
            rotmat_timeinv = [0 for i in range(N_corr_shells)]

            for icrsh in xrange(N_corr_shells):
                for i in xrange(corr_shells[icrsh][3]):    # read real part:
                    for j in xrange(corr_shells[icrsh][3]):
                        rotmat[icrsh][i,j] = R.next()
                for i in xrange(corr_shells[icrsh][3]):    # read imaginary part:
                    for j in xrange(corr_shells[icrsh][3]):
                        rotmat[icrsh][i,j] += 1j * R.next()

                if (SP==1):             # read time inversion flag:
                    rotmat_timeinv[icrsh] = int(R.next())
                    
                  
            
            # Read here the infos for the transformation of the basis:
            Nreps = [1 for i in range(self.N_inequiv_corr_shells)]
            dim_reps = [0 for i in range(self.N_inequiv_corr_shells)]
            T = []
            for icrsh in range(self.N_inequiv_corr_shells):
                Nreps[icrsh] = int(R.next())   # number of representatives ("subsets"), e.g. t2g and eg
                dim_reps[icrsh] = [int(R.next()) for i in range(Nreps[icrsh])]   # dimensions of the subsets
            
            # The transformation matrix:
            # it is of dimension 2l+1, if no SO, and 2*(2l+1) with SO!!
            #T = []
            #for ish in xrange(self.N_inequiv_corr_shells):
                ll = 2*corr_shells[self.invshellmap[icrsh]][2]+1
                lmax = ll * (corr_shells[self.invshellmap[icrsh]][4] + 1)
                T.append(numpy.zeros([lmax,lmax],numpy.complex_))
                
                # now read it from file:
                for i in xrange(lmax):
                    for j in xrange(lmax):
                        T[icrsh][i,j] = R.next()
                for i in xrange(lmax):
                    for j in xrange(lmax):
                        T[icrsh][i,j] += 1j * R.next()

    
            # Spin blocks to be read:
            Nspinblocs = SP + 1 - SO   # number of spins to read for Norbs and Ham, NOT Projectors
                 
        
            # read the list of N_Orbitals for all k points
            N_Orbitals = [ [0 for isp in range(Nspinblocs)] for ik in xrange(Nk)]
            for isp in range(Nspinblocs):
                for ik in xrange(Nk):
                    N_Orbitals[ik][isp] = int(R.next())
            #print N_Orbitals

            # Initialise the projectors:
            Proj_Mat = [ [ [numpy.zeros([corr_shells[icrsh][3], N_Orbitals[ik][isp]], numpy.complex_) 
                            for icrsh in range (N_corr_shells)] 
                           for isp in range(Nspinblocs)] 
                         for ik in range(Nk) ]

            # Read the projectors from the file:
            for ik in xrange(Nk):
                for icrsh in range(N_corr_shells):
                    no = corr_shells[icrsh][3]
                    # first Real part for BOTH spins, due to conventions in dmftproj:
                    for isp in range(Nspinblocs):
                        for i in xrange(no):
                            for j in xrange(N_Orbitals[ik][isp]):
                                Proj_Mat[ik][isp][icrsh][i,j] = R.next()
                    # now Imag part:
                    for isp in range(Nspinblocs):
                        for i in xrange(no):
                            for j in xrange(N_Orbitals[ik][isp]):
                                Proj_Mat[ik][isp][icrsh][i,j] += 1j * R.next()
            
          
            # now define the arrays for weights and hopping ...
            BZ_weights = numpy.ones([Nk],numpy.float_)/ float(Nk)  # w(k_index),  default normalisation 
            Hopping = [ [numpy.zeros([N_Orbitals[ik][isp],N_Orbitals[ik][isp]],numpy.complex_) 
                         for isp in range(Nspinblocs)] for ik in xrange(Nk) ]

                            
            # weights in the file
            for ik in xrange(Nk) : BZ_weights[ik] = R.next()         
                
            # if the sum over spins is in the weights, take it out again!!
            sm = sum(BZ_weights)
            BZ_weights[:] /= sm 
	    
            # Grab the H
            # we use now the convention of a DIAGONAL Hamiltonian!!!!
            for isp in range(Nspinblocs):
                for ik in xrange(Nk) :
                    no = N_Orbitals[ik][isp]
                    for i in xrange(no):
                        Hopping[ik][isp][i,i] = R.next() * EnergyUnit
            
            #keep some things that we need for reading parproj:
            self.N_shells = N_shells
            self.shells = shells
            self.N_corr_shells = N_corr_shells
            self.corr_shells = corr_shells
            self.Nspinblocs = Nspinblocs
            self.N_Orbitals = N_Orbitals
            self.Nk = Nk
            self.SO = SO
            self.SP = SP
            self.EnergyUnit = EnergyUnit
        except StopIteration : # a more explicit error if the file is corrupted.
            raise "SumK_LDA : reading file HMLT_file failed!"

        R.close()
        
        #print Proj_Mat[0]

        #-----------------------------------------
        # Store the input into HDF5:
        ar = HDF_Archive(self.HDFfile,'a')
        if not (self.LDASubGrp in ar): ar.create_group(self.LDASubGrp) 
        # The subgroup containing the data. If it does not exist, it is created.
        # If it exists, the data is overwritten!!!
        
        ar[self.LDASubGrp]['EnergyUnit'] = EnergyUnit
        ar[self.LDASubGrp]['Nk'] = Nk
        ar[self.LDASubGrp]['k_dep_projection'] = k_dep_projection
        ar[self.LDASubGrp]['SP'] = SP
        ar[self.LDASubGrp]['SO'] = SO
        ar[self.LDASubGrp]['charge_below'] = charge_below
        ar[self.LDASubGrp]['Density_Required'] = Density_Required
        ar[self.LDASubGrp]['symm_op'] = symm_op
        ar[self.LDASubGrp]['N_shells'] = N_shells
        ar[self.LDASubGrp]['shells'] = shells
        ar[self.LDASubGrp]['N_corr_shells'] = N_corr_shells
        ar[self.LDASubGrp]['corr_shells'] = corr_shells
        ar[self.LDASubGrp]['use_rotations'] = use_rotations
        ar[self.LDASubGrp]['rotmat'] = rotmat
        ar[self.LDASubGrp]['rotmat_timeinv'] = rotmat_timeinv
        ar[self.LDASubGrp]['Nreps'] = Nreps
        ar[self.LDASubGrp]['dim_reps'] = dim_reps
        ar[self.LDASubGrp]['T'] = T
        ar[self.LDASubGrp]['N_Orbitals'] = N_Orbitals
        ar[self.LDASubGrp]['Proj_Mat'] = Proj_Mat
        ar[self.LDASubGrp]['BZ_weights'] = BZ_weights
        ar[self.LDASubGrp]['Hopping'] = Hopping
        
        del ar
              
        # Symmetries are used, 
        # Now do the symmetries for correlated orbitals:
        self.read_Symmetry_input(orbits=corr_shells,symmfile=self.Symm_file,SymmSubGrp=self.SymmSubGrp,SO=SO,SP=SP)
Пример #25
0
    def Solve(self):
        """ Solve the impurity problem """

        # Find if an operator is in oplist
        def mysearch(op):
            l = [ k for (k,v) in OPdict.items() if (v-op).is_zero()]
            assert len(l) <=1
            return l[0] if l else None

        # Same but raises an error if pb
        def myfind(op):
            r = mysearch(op)
            if r==None : raise "Operator %s can not be found by myfind !"%r
            return r

        # For backward compatibility
        self.update_params(self.__dict__)

        # Test all a parameters before solutions
        MPI.report(Parameters.check(self.__dict__,self.Required,self.Optional))

        # We have to add the Hamiltonian the epsilon part of G0
        if type(self.H_Local) != type(Operator()) : raise "H_Local is not an operator"
        H = self.H_Local
        for a,alpha_list in  self.GFStruct :
            for mu in alpha_list : 
                for nu in alpha_list : 
                    H += real(self.G0[a]._tail[2][mu,nu]) * Cdag(a,mu)*C(a,nu)

        OPdict = {"Hamiltonian": H}
        MPI.report("Hamiltonian with Eps0 term  : ",H)
        
        # First separate the quantum Numbers that are operators and those which are symmetries.
        QuantumNumberOperators  = dict( (n,op) for (n,op) in self.Quantum_Numbers.items() if type(op) == type(Operator()))
        QuantumNumberSymmetries = dict( (n,op) for (n,op) in self.Quantum_Numbers.items() if type(op) != type(Operator()))

        # Check that the quantum numbers commutes with the Hamiltonian
        for name,op in QuantumNumberOperators.items():
            assert Commutator(self.H_Local ,op).is_zero(), "One quantum number is not commuting with Hamiltonian"
            OPdict[name]=op

        # Complete the OPdict with the fundamental operators
        OPdict, nf, nb, SymChar, NameOpFundamentalList = Operators.Complete_OperatorsList_with_Fundamentals(OPdict)

        # Add the operators to be averaged in OPdict and prepare the list for the C-code
        self.Measured_Operators_Results = {}
        self.twice_defined_Ops = {}
        self.Operators_To_Average_List = []
        for name, op in self.Measured_Operators.items():
          opn = mysearch(op)
          if opn == None : 
              OPdict[name] = op
              self.Measured_Operators_Results[name] = 0.0
              self.Operators_To_Average_List.append(name)
          else:
              MPI.report("Operator %s already defined as %s, using this instead for measuring"%(name,opn))
              self.twice_defined_Ops[name] = opn
              self.Measured_Operators_Results[opn] = 0.0
              if opn not in self.Operators_To_Average_List: self.Operators_To_Average_List.append(opn)

        # Time correlation functions are added
        self.OpCorr_To_Average_List = []
        for name, op in self.Measured_Time_Correlators.items():
          opn = mysearch(op[0])
          if opn == None : 
              OPdict[name] = op[0]
              self.OpCorr_To_Average_List.append(name)
          else:
              MPI.report("Operator %s already defined as %s, using this instead for measuring"%(name,opn))
              if opn not in self.OpCorr_To_Average_List: self.OpCorr_To_Average_List.append(opn)
        # Create storage for data:
        Nops = len(self.OpCorr_To_Average_List)
        f = lambda L : GFBloc_ImTime(Indices= [0], Beta = self.Beta, NTimeSlices=L )
        if (Nops>0):
            self.Measured_Time_Correlators_Results = GF(Name_Block_Generator = [ ( n,f(self.Measured_Time_Correlators[n][1]) ) for n in self.Measured_Time_Correlators], Copy=False)
        else:
            self.Measured_Time_Correlators_Results = GF(Name_Block_Generator = [ ( 'OpCorr',f(2) ) ], Copy=False)

        # Take care of the global moves

        # First, given a function (a,alpha,dagger) -> (a', alpha', dagger')
        # I construct a function on fundamental operators
        def Map_GM_to_Fund_Ops( GM ) :
            def f(fop) :
                a,alpha, dagger = fop.name + (fop.dag,)
                ap,alphap,daggerp = GM((a,alpha,dagger))
                return Cdag(ap,alphap) if daggerp else C(ap,alphap)
            return f

        # Complete the OpList so that it is closed under the global moves
        while 1:
            added_something = False
            for n,(proba,GM) in enumerate(self.Global_Moves):
                # F is a function that map all operators according to the global move
                F = Extend_Function_on_Fundamentals(Map_GM_to_Fund_Ops(GM))
                # Make sure that OPdict is complete, i.e. all images of OPdict operators are in OPdict
                for name,op in OPdict.items() :
                    op_im = F(op)
                    if mysearch(op_im)==None :
                        # find the key and put in in the dictionnary
                        i=0
                        while 1:
                            new_name = name + 'GM' +  i*'_' + "%s"%n
                            if new_name not in OPdict : break
                        added_something = True
                        OPdict[new_name] = op_im
            # break the while loop
            if not added_something: break

        # Now I have all operators, I make the transcription of the global moves
        self.Global_Moves_Mapping_List = []
        for n,(proba,GM) in enumerate(self.Global_Moves):
            F = Extend_Function_on_Fundamentals(Map_GM_to_Fund_Ops(GM))
            m = {}
            for name,op in OPdict.items() :
                op_im = F(op)
                n1,n2 = myfind(op),myfind(op_im)
                m[n1] = n2
            name = "%s"%n
            self.Global_Moves_Mapping_List.append((proba,m,name))
        #MPI.report ("Global_Moves_Mapping_List", self.Global_Moves_Mapping_List)

        # Now add the operator for F calculation if needed
        if self.Use_F :
            Hloc_WithoutQuadratic = self.H_Local.RemoveQuadraticTerms()
            for n,op in OPdict.items() :
                if op.is_Fundamental():
                    op2 = Commutator(Hloc_WithoutQuadratic,op)
                    if not mysearch(op2) : OPdict["%s_Comm_Hloc"%n] = op2

        # All operators have real coefficients. Check this and remove the 0j term
        # since the C++ expects operators with real numbers 
        for n,op in OPdict.items(): op.make_coef_real_and_check()

        # Transcription of operators for C++
        Oplist2 = Operators.Transcribe_OpList_for_C(OPdict)
        SymList = [sym for (n,sym) in SymChar.items() if n in QuantumNumberSymmetries]
        self.H_diag = C_Module.Hloc(nf,nb,Oplist2,QuantumNumberOperators,SymList,self.Quantum_Numbers_Selection,0) 

        # Create the C_Cag_Ops array which describes the grouping of (C,Cdagger) operator
        # for the MonteCarlo moves : (a, alpha) block structure [ [ (C_name, Cdag_name)]]
        self.C_Cdag_Ops = [ [ (myfind(C(a,alpha)), myfind(Cdag(a,alpha))) for alpha in al ] for a,al in self.GFStruct]

        # Define G0_inv and correct it to have G0 to have perfect 1/omega behavior
        self.G0_inv = inverse(self.G0)
        Delta = self.G0_inv.Delta()
        for n,g in self.G0_inv:
          assert(g.N1==g.N2)
          identity=numpy.identity(g.N1)
          self.G0[n] <<= GF_Initializers.A_Omega_Plus_B(identity, g._tail[0])
          self.G0[n] -= Delta[n]
          #self.G0[n] <<= iOmega_n + g._tail[0] - Delta[n]
        self.G0_inv <<= self.G0
        self.G0.invert()

        # Construct the function in tau
        f = lambda g,L : GFBloc_ImTime(Indices= g.Indices, Beta = g.Beta, NTimeSlices=L )
        self.Delta_tau = GF(Name_Block_Generator = [ (n,f(g,self.N_Time_Slices_Delta) )   for n,g in self.G], Copy=False, Name='D')
        self.G_tau = GF(Name_Block_Generator = [ (n,f(g,self.N_Time_Slices_Gtau) )    for n,g in self.G], Copy=False, Name='G')
        self.F_tau = GF(Name_Block_Generator = self.G_tau, Copy=True, Name='F')
        
        for (i,gt) in self.Delta_tau : gt.setFromInverseFourierOf(Delta[i])
        MPI.report("Inv Fourier done")
        if (self.Legendre_Accumulation):
            self.G_Legendre = GF(Name_Block_Generator = [ (n,GFBloc_ImLegendre(Indices=g.Indices, Beta=g.Beta, NLegendreCoeffs=self.N_Legendre_Coeffs) )   for n,g in self.G], Copy=False, Name='Gl')
        else:
            self.G_Legendre = GF(Name_Block_Generator = [ (n,GFBloc_ImLegendre(Indices=[1], Beta=g.Beta, NLegendreCoeffs=1) ) for n,g in self.G], Copy=False, Name='Gl') # G_Legendre must not be empty but is not needed in this case. So I make it as small as possible.
        
        # Starting the C++ code
        self.Sigma_Old <<= self.Sigma
        C_Module.MC_solve(self.__dict__ ) # C++ solver
        
        # Compute G on Matsubara axis possibly fitting the tail
        if self.Legendre_Accumulation:
          for s,g in self.G:
            identity=numpy.zeros([g.N1,g.N2],numpy.float)
            for i,m in enumerate (g._IndicesL):
              for j,n in enumerate (g._IndicesR):
                if m==n: identity[i,j]=1
            self.G_Legendre[s].enforce_discontinuity(identity) # set the known tail
            g <<= LegendreToMatsubara(self.G_Legendre[s])
        else:
          if (self.Time_Accumulation):
            for name, g in self.G_tau:
              identity=numpy.zeros([g.N1,g.N2],numpy.float)
              for i,m in enumerate (g._IndicesL):
                for j,n in enumerate (g._IndicesR):
                  if m==n: identity[i,j]=1
              g._tail.zero()
              g._tail[1] = identity
              self.G[name].setFromFourierOf(g)

          # This is very sick... but what can we do???
          self.Sigma <<= self.G0_inv - inverse(self.G)
          self.fitTails()
          self.G <<= inverse(self.G0_inv - self.Sigma)

        # Now find the self-energy
        self.Sigma <<= self.G0_inv - inverse(self.G)

        MPI.report("Solver %(Name)s has ended."%self.__dict__)

        # for operator averages: if twice defined operator, rename output:
        for op1,op2 in self.twice_defined_Ops.items():
            self.Measured_Operators_Results[op1] = self.Measured_Operators_Results[op2]
        for op1,op2 in self.twice_defined_Ops.items():
            if op2 in self.Measured_Operators_Results.keys(): del self.Measured_Operators_Results[op2]

        if self.Use_F :
            for (n,f) in self.F: f.setFromFourierOf(self.F_tau[n])
            self.G2 = self.G0 + self.G0 * self.F
            self.Sigma2 = self.F * inverse(self.G2)
Пример #26
0
    def DOSpartial(self,broadening=0.01):
        """calculates the orbitally-resolved DOS"""

        assert hasattr(self,"Sigmaimp"), "Set Sigma First!!"

        #thingstoread = ['Dens_Mat_below','N_parproj','Proj_Mat_pc','rotmat_all']
        #retval = self.read_input_from_HDF(SubGrp=self.ParProjdata, thingstoread=thingstoread)
        retval = self.read_ParProj_input_from_HDF()
        if not retval: return retval
        if self.symm_op: self.Symm_par = Symmetry(self.HDFfile,subgroup=self.Symmpardata)

        mu = self.Chemical_Potential

        GFStruct_proj = [ [ (al, range(self.shells[i][3])) for al in self.blocnames[self.SO] ]  for i in xrange(self.N_shells) ]
        Gproj = [GF(Name_Block_Generator = [ (a,GFBloc_ReFreq(Indices = al, Mesh = self.Sigmaimp[0].mesh)) for a,al in GFStruct_proj[ish] ], Copy = False ) 
                 for ish in xrange(self.N_shells)]
        for ish in range(self.N_shells): Gproj[ish].zero()

        Msh = [x for x in self.Sigmaimp[0].mesh]
        N_om = len(Msh)

        DOS = {}
        for bn in self.blocnames[self.SO]:
            DOS[bn] = numpy.zeros([N_om],numpy.float_)

        DOSproj     = [ {} for ish in range(self.N_shells) ]
        DOSproj_orb = [ {} for ish in range(self.N_shells) ]
        for ish in range(self.N_shells):
            for bn in self.blocnames[self.SO]:
                dl = self.shells[ish][3]
                DOSproj[ish][bn] = numpy.zeros([N_om],numpy.float_)
                DOSproj_orb[ish][bn] = numpy.zeros([dl,dl,N_om],numpy.float_)

        ikarray=numpy.array(range(self.Nk))

        for ik in MPI.slice_array(ikarray):

            S = self.latticeGF_realfreq(ik=ik,mu=mu,broadening=broadening)
            S *= self.BZ_weights[ik]

            # non-projected DOS
            for iom in range(N_om): 
                for sig,gf in S: DOS[sig][iom] += gf._data.array[:,:,iom].imag.trace()/(-3.1415926535)
               
            #projected DOS:
            for ish in xrange(self.N_shells):
                tmp = Gproj[ish].copy()
                for ir in xrange(self.N_parproj[ish]):
                    for sig,gf in tmp: tmp[sig] <<= self.downfold_pc(ik,ir,ish,sig,S[sig],gf)
                    Gproj[ish] += tmp
                   
        # collect data from MPI:
        for sig in DOS:
            DOS[sig] = MPI.all_reduce(MPI.world,DOS[sig],lambda x,y : x+y)
        for ish in xrange(self.N_shells):
            Gproj[ish] <<= MPI.all_reduce(MPI.world,Gproj[ish],lambda x,y : x+y)
        MPI.barrier()        
                  
        if (self.symm_op!=0): Gproj = self.Symm_par.symmetrise(Gproj)

        # rotation to local coord. system:
        if (self.use_rotations):
            for ish in xrange(self.N_shells):
                for sig,gf in Gproj[ish]: Gproj[ish][sig] <<= self.rotloc_all(ish,gf,direction='toLocal')
                
        for ish in range(self.N_shells):
            for sig,gf in Gproj[ish]:  
                for iom in range(N_om): DOSproj[ish][sig][iom] += gf._data.array[:,:,iom].imag.trace()/(-3.1415926535)
                DOSproj_orb[ish][sig][:,:,:] += gf._data.array[:,:,:].imag / (-3.1415926535)
	    

        if (MPI.IS_MASTER_NODE()):
            # output to files
            for bn in self.blocnames[self.SO]:
                f=open('./DOScorr%s.dat'%bn, 'w')
                for i in range(N_om): f.write("%s    %s\n"%(Msh[i],DOS[bn][i]))
                f.close()    

                # partial
                for ish in range(self.N_shells):
                    f=open('DOScorr%s_proj%s.dat'%(bn,ish),'w')
                    for i in range(N_om): f.write("%s    %s\n"%(Msh[i],DOSproj[ish][bn][i]))
                    f.close()
 
                    for i in range(self.shells[ish][3]):
                        for j in range(i,self.shells[ish][3]):
                            Fname = './DOScorr'+bn+'_proj'+str(ish)+'_'+str(i)+'_'+str(j)+'.dat'
                            f=open(Fname,'w')
                            for iom in range(N_om): f.write("%s    %s\n"%(Msh[iom],DOSproj_orb[ish][bn][i,j,iom]))
                            f.close()
Пример #27
0
    def read_Symmetry_input(self,orbits,symmfile,SymmSubGrp,SO,SP):
        """
        Reads input for the symmetrisations from symmfile, which is case.sympar or case.symqmc.
        """

        if not (MPI.IS_MASTER_NODE()): return

        MPI.report("Reading symmetry input from %s..."%symmfile)

        N_orbits = len(orbits)
        R=Read_Fortran_File(symmfile)

        try:
            Ns = int(R.next())           # Number of symmetry operations
            Natoms = int(R.next())       # number of atoms involved
            perm = [ [int(R.next()) for i in xrange(Natoms)] for j in xrange(Ns) ]    # list of permutations of the atoms
            if SP: 
                timeinv = [ int(R.next()) for j in xrange(Ns) ]           # timeinversion for SO xoupling
            else:
                timeinv = [ 0 for j in xrange(Ns) ] 

            # Now read matrices:
            mat = []  
            for iNs in xrange(Ns):
                
                mat.append( [ numpy.zeros([orbits[orb][3], orbits[orb][3]],numpy.complex_) for orb in xrange(N_orbits) ] )
                for orb in range(N_orbits):
                    for i in xrange(orbits[orb][3]):
                        for j in xrange(orbits[orb][3]):
                            mat[iNs][orb][i,j] = R.next()            # real part
                    for i in xrange(orbits[orb][3]):
                        for j in xrange(orbits[orb][3]):
                            mat[iNs][orb][i,j] += 1j * R.next()      # imaginary part

            # determine the inequivalent shells:
            #SHOULD BE FINALLY REMOVED, PUT IT FOR ALL ORBITALS!!!!!
            #self.inequiv_shells(orbits)
            mat_tinv = [numpy.identity(orbits[orb][3],numpy.complex_)
                        for orb in range(N_orbits)]

            if ((SO==0) and (SP==0)):
                # here we need an additional time inversion operation, so read it:
                for orb in range(N_orbits):
                    for i in xrange(orbits[orb][3]):
                        for j in xrange(orbits[orb][3]):
                            mat_tinv[orb][i,j] = R.next()            # real part
                    for i in xrange(orbits[orb][3]):
                        for j in xrange(orbits[orb][3]):
                            mat_tinv[orb][i,j] += 1j * R.next()      # imaginary part
                


        except StopIteration : # a more explicit error if the file is corrupted.
	    raise "Symmetry : reading file failed!"
        
        R.close()

        # Save it to the HDF:
        ar=HDF_Archive(self.HDFfile,'a')
        if not (SymmSubGrp in ar): ar.create_group(SymmSubGrp)
        thingstowrite = ['Ns','Natoms','perm','orbits','SO','SP','timeinv','mat','mat_tinv']
        for it in thingstowrite: exec "ar['%s']['%s'] = %s"%(SymmSubGrp,it,it)
        del ar
Пример #28
0
    N_Legendre_Coeffs=50,  # Number of Legendre coefficients
    Random_Generator_Name='mt19937',  # Name of the random number generator
    Use_Segment_Picture=True,  # Use the segment picture
    Measured_Operators={  # Operators to be averaged
        'Nimp': N('up', 1) + N('down', 1)
    },
    Global_Moves=[  # Global move in the QMC
        (0.05, lambda (a, alpha, dag): ({
            'up': 'down',
            'down': 'up'
        }[a], alpha, dag))
    ],
)

# Initialize the non-interacting Green's function S.G0
for spin, g0 in S.G0:
    g0 <<= inverse(iOmega_n - e_f - V**2 * Wilson(D))

# Run the solver. The result will be in S.G
S.Solve()

# Save the results in an hdf5 file (only on the master node)
from pytriqs.Base.Archive import HDF_Archive
import pytriqs.Base.Utility.MPI as MPI

if MPI.IS_MASTER_NODE():
    Results = HDF_Archive("solution.h5", 'w')
    Results["G"] = S.G
    Results["Gl"] = S.G_Legendre
    Results["Nimp"] = S.Measured_Operators_Results['Nimp']
Пример #29
0
    def partial_charges(self):
        """Calculates the orbitally-resolved density matrix for all the orbitals considered in the input.
           The theta-projectors are used, hence case.parproj data is necessary"""
           

        #thingstoread = ['Dens_Mat_below','N_parproj','Proj_Mat_pc','rotmat_all']
        #retval = self.read_input_from_HDF(SubGrp=self.ParProjdata,thingstoread=thingstoread)
        retval = self.read_ParProj_input_from_HDF()
        if not retval: return retval
        if self.symm_op: self.Symm_par = Symmetry(self.HDFfile,subgroup=self.Symmpardata)
        
        # Density matrix in the window
        bln = self.blocnames[self.SO]
        ntoi = self.names_to_ind[self.SO]
        self.Dens_Mat_window = [ [numpy.zeros([self.shells[ish][3],self.shells[ish][3]],numpy.complex_) for ish in range(self.N_shells)]   
                                 for isp in range(len(bln)) ]    # init the density matrix

        mu = self.Chemical_Potential
        GFStruct_proj = [ [ (al, range(self.shells[i][3])) for al in bln ]  for i in xrange(self.N_shells) ]
        if hasattr(self,"Sigmaimp"):
            Gproj = [GF(Name_Block_Generator = [ (a,GFBloc_ImFreq(Indices = al, Mesh = self.Sigmaimp[0].mesh)) for a,al in GFStruct_proj[ish] ], Copy = False)
                     for ish in xrange(self.N_shells)]
        else:
            Gproj = [GF(Name_Block_Generator = [ (a,GFBloc_ImFreq(Indices = al, Beta = 40)) for a,al in GFStruct_proj[ish] ], Copy = False)
                     for ish in xrange(self.N_shells)]

        for ish in xrange(self.N_shells): Gproj[ish].zero()

        ikarray=numpy.array(range(self.Nk))
        #print MPI.rank, MPI.slice_array(ikarray)
        #print "K-Sum starts on node",MPI.rank," at ",datetime.now()
        
        for ik in MPI.slice_array(ikarray):
            #print MPI.rank, ik, datetime.now()
            S = self.latticeGF_Matsubara(ik=ik,mu=mu)
            S *= self.BZ_weights[ik]

            for ish in xrange(self.N_shells):
                tmp = Gproj[ish].copy()
                for ir in xrange(self.N_parproj[ish]):
                    for sig,gf in tmp: tmp[sig] <<= self.downfold_pc(ik,ir,ish,sig,S[sig],gf)
                    Gproj[ish] += tmp
        
        #print "K-Sum done on node",MPI.rank," at ",datetime.now()
        #collect data from MPI:
        for ish in xrange(self.N_shells):
            Gproj[ish] <<= MPI.all_reduce(MPI.world,Gproj[ish],lambda x,y : x+y)
        MPI.barrier()

        #print "Data collected on node",MPI.rank," at ",datetime.now()

        # Symmetrisation:
        if (self.symm_op!=0): Gproj = self.Symm_par.symmetrise(Gproj)
        #print "Symmetrisation done on node",MPI.rank," at ",datetime.now()
        
        for ish in xrange(self.N_shells):

            # Rotation to local:
            if (self.use_rotations):
                for sig,gf in Gproj[ish]: Gproj[ish][sig] <<= self.rotloc_all(ish,gf,direction='toLocal')

            isp = 0
            for sig,gf in Gproj[ish]: #dmg.append(Gproj[ish].density()[sig])
                self.Dens_Mat_window[isp][ish] = Gproj[ish].density()[sig]
                isp+=1
       
        # add Density matrices to get the total:
        Dens_Mat = [ [ self.Dens_Mat_below[ntoi[bln[isp]]][ish]+self.Dens_Mat_window[isp][ish] for ish in range(self.N_shells)]
                     for isp in range(len(bln)) ]

        return Dens_Mat
Пример #30
0
    def convert_Parproj_input(self,
                              ParProjSubGrp='SumK_LDA_ParProj',
                              SymmParSubGrp='SymmPar'):
        """
        Reads the input for the partial charges projectors from case.parproj, and stores it in the SymmParSubGrp
        group in the HDF5.
        """

        if not (MPI.IS_MASTER_NODE()): return

        self.ParProjSubGrp = ParProjSubGrp
        self.SymmParSubGrp = SymmParSubGrp

        MPI.report("Reading parproj input from %s..." % self.Parproj_file)

        Dens_Mat_below = [[
            numpy.zeros([self.shells[ish][3], self.shells[ish][3]],
                        numpy.complex_) for ish in range(self.N_shells)
        ] for isp in range(self.Nspinblocs)]

        R = Read_Fortran_File(self.Parproj_file)
        #try:

        N_parproj = [int(R.next()) for i in range(self.N_shells)]

        # Initialise P, here a double list of matrices:
        Proj_Mat_pc = [[[[
            numpy.zeros([self.shells[ish][3], self.N_Orbitals[ik][isp]],
                        numpy.complex_) for ir in range(N_parproj[ish])
        ] for ish in range(self.N_shells)] for isp in range(self.Nspinblocs)]
                       for ik in range(self.Nk)]

        rotmat_all = [
            numpy.identity(self.shells[ish][3], numpy.complex_)
            for ish in xrange(self.N_shells)
        ]
        rotmat_all_timeinv = [0 for i in range(self.N_shells)]

        for ish in range(self.N_shells):
            #print ish
            # read first the projectors for this orbital:
            for ik in xrange(self.Nk):
                for ir in range(N_parproj[ish]):
                    for isp in range(self.Nspinblocs):

                        for i in xrange(
                                self.shells[ish][3]):  # read real part:
                            for j in xrange(self.N_Orbitals[ik][isp]):
                                Proj_Mat_pc[ik][isp][ish][ir][i, j] = R.next()

                    for isp in range(self.Nspinblocs):
                        for i in xrange(
                                self.shells[ish][3]):  # read imaginary part:
                            for j in xrange(self.N_Orbitals[ik][isp]):
                                Proj_Mat_pc[ik][isp][ish][ir][
                                    i, j] += 1j * R.next()

            # now read the Density Matrix for this orbital below the energy window:
            for isp in range(self.Nspinblocs):
                for i in xrange(self.shells[ish][3]):  # read real part:
                    for j in xrange(self.shells[ish][3]):
                        Dens_Mat_below[isp][ish][i, j] = R.next()
            for isp in range(self.Nspinblocs):
                for i in xrange(self.shells[ish][3]):  # read imaginary part:
                    for j in xrange(self.shells[ish][3]):
                        Dens_Mat_below[isp][ish][i, j] += 1j * R.next()
                if (self.SP == 0): Dens_Mat_below[isp][ish] /= 2.0

            # Global -> local rotation matrix for this shell:
            for i in xrange(self.shells[ish][3]):  # read real part:
                for j in xrange(self.shells[ish][3]):
                    rotmat_all[ish][i, j] = R.next()
            for i in xrange(self.shells[ish][3]):  # read imaginary part:
                for j in xrange(self.shells[ish][3]):
                    rotmat_all[ish][i, j] += 1j * R.next()

            #print Dens_Mat_below[0][ish],Dens_Mat_below[1][ish]

            if (self.SP):
                rotmat_all_timeinv[ish] = int(R.next())

        #except StopIteration : # a more explicit error if the file is corrupted.
        #    raise "SumK_LDA_Wien2k_input: reading file for Projectors failed!"
        R.close()

        #-----------------------------------------
        # Store the input into HDF5:
        ar = HDF_Archive(self.HDFfile, 'a')
        if not (self.ParProjSubGrp in ar): ar.create_group(self.ParProjSubGrp)
        # The subgroup containing the data. If it does not exist, it is created.
        # If it exists, the data is overwritten!!!
        thingstowrite = [
            'Dens_Mat_below', 'N_parproj', 'Proj_Mat_pc', 'rotmat_all',
            'rotmat_all_timeinv'
        ]
        for it in thingstowrite:
            exec "ar['%s']['%s'] = %s" % (self.ParProjSubGrp, it, it)
        del ar

        # Symmetries are used,
        # Now do the symmetries for all orbitals:
        self.read_Symmetry_input(orbits=self.shells,
                                 symmfile=self.Symmpar_file,
                                 SymmSubGrp=self.SymmParSubGrp,
                                 SO=self.SO,
                                 SP=self.SP)
Пример #31
0
    def partial_charges(self):
        """Calculates the orbitally-resolved density matrix for all the orbitals considered in the input.
           The theta-projectors are used, hence case.parproj data is necessary"""

        #thingstoread = ['Dens_Mat_below','N_parproj','Proj_Mat_pc','rotmat_all']
        #retval = self.read_input_from_HDF(SubGrp=self.ParProjdata,thingstoread=thingstoread)
        retval = self.read_ParProj_input_from_HDF()
        if not retval: return retval
        if self.symm_op:
            self.Symm_par = Symmetry(self.HDFfile, subgroup=self.Symmpardata)

        # Density matrix in the window
        bln = self.blocnames[self.SO]
        ntoi = self.names_to_ind[self.SO]
        self.Dens_Mat_window = [[
            numpy.zeros([self.shells[ish][3], self.shells[ish][3]],
                        numpy.complex_) for ish in range(self.N_shells)
        ] for isp in range(len(bln))]  # init the density matrix

        mu = self.Chemical_Potential
        GFStruct_proj = [[(al, range(self.shells[i][3])) for al in bln]
                         for i in xrange(self.N_shells)]
        if hasattr(self, "Sigmaimp"):
            Gproj = [
                GF(Name_Block_Generator=[
                    (a, GFBloc_ImFreq(Indices=al, Mesh=self.Sigmaimp[0].mesh))
                    for a, al in GFStruct_proj[ish]
                ],
                   Copy=False) for ish in xrange(self.N_shells)
            ]
        else:
            Gproj = [
                GF(Name_Block_Generator=[(a, GFBloc_ImFreq(Indices=al,
                                                           Beta=40))
                                         for a, al in GFStruct_proj[ish]],
                   Copy=False) for ish in xrange(self.N_shells)
            ]

        for ish in xrange(self.N_shells):
            Gproj[ish].zero()

        ikarray = numpy.array(range(self.Nk))
        #print MPI.rank, MPI.slice_array(ikarray)
        #print "K-Sum starts on node",MPI.rank," at ",datetime.now()

        for ik in MPI.slice_array(ikarray):
            #print MPI.rank, ik, datetime.now()
            S = self.latticeGF_Matsubara(ik=ik, mu=mu)
            S *= self.BZ_weights[ik]

            for ish in xrange(self.N_shells):
                tmp = Gproj[ish].copy()
                for ir in xrange(self.N_parproj[ish]):
                    for sig, gf in tmp:
                        tmp[sig] <<= self.downfold_pc(ik, ir, ish, sig, S[sig],
                                                      gf)
                    Gproj[ish] += tmp

        #print "K-Sum done on node",MPI.rank," at ",datetime.now()
        #collect data from MPI:
        for ish in xrange(self.N_shells):
            Gproj[ish] <<= MPI.all_reduce(MPI.world, Gproj[ish],
                                          lambda x, y: x + y)
        MPI.barrier()

        #print "Data collected on node",MPI.rank," at ",datetime.now()

        # Symmetrisation:
        if (self.symm_op != 0): Gproj = self.Symm_par.symmetrise(Gproj)
        #print "Symmetrisation done on node",MPI.rank," at ",datetime.now()

        for ish in xrange(self.N_shells):

            # Rotation to local:
            if (self.use_rotations):
                for sig, gf in Gproj[ish]:
                    Gproj[ish][sig] <<= self.rotloc_all(ish,
                                                        gf,
                                                        direction='toLocal')

            isp = 0
            for sig, gf in Gproj[ish]:  #dmg.append(Gproj[ish].density()[sig])
                self.Dens_Mat_window[isp][ish] = Gproj[ish].density()[sig]
                isp += 1

        # add Density matrices to get the total:
        Dens_Mat = [[
            self.Dens_Mat_below[ntoi[bln[isp]]][ish] +
            self.Dens_Mat_window[isp][ish] for ish in range(self.N_shells)
        ] for isp in range(len(bln))]

        return Dens_Mat
Пример #32
0
    def spaghettis(self,
                   broadening,
                   shift=0.0,
                   plotrange=None,
                   ishell=None,
                   invertAkw=False,
                   Fermisurface=False):
        """ Calculates the correlated band structure with a real-frequency self energy. 
            ATTENTION: Many things from the original input file are are overwritten!!!"""

        assert hasattr(self, "Sigmaimp"), "Set Sigma First!!"
        thingstoread = [
            'Nk', 'N_Orbitals', 'Proj_Mat', 'Hopping', 'N_parproj',
            'Proj_Mat_pc'
        ]
        retval = self.read_input_from_HDF(SubGrp=self.Bandsdata,
                                          thingstoread=thingstoread)
        if not retval: return retval

        if Fermisurface: ishell = None

        # print hamiltonian for checks:
        if ((self.SP == 1) and (self.SO == 0)):
            f1 = open('hamup.dat', 'w')
            f2 = open('hamdn.dat', 'w')

            for ik in xrange(self.Nk):
                for i in xrange(self.N_Orbitals[ik][0]):
                    f1.write('%s    %s\n' %
                             (ik, self.Hopping[ik][0][i, i].real))
                for i in xrange(self.N_Orbitals[ik][1]):
                    f2.write('%s    %s\n' %
                             (ik, self.Hopping[ik][1][i, i].real))
                f1.write('\n')
                f2.write('\n')
            f1.close()
            f2.close()
        else:
            f = open('ham.dat', 'w')
            for ik in xrange(self.Nk):
                for i in xrange(self.N_Orbitals[ik][0]):
                    f.write('%s    %s\n' %
                            (ik, self.Hopping[ik][0][i, i].real))
                f.write('\n')
            f.close()

        #=========================================
        # calculate A(k,w):

        mu = self.Chemical_Potential
        bln = self.blocnames[self.SO]

        # init DOS:
        M = [x for x in self.Sigmaimp[0].mesh]
        N_om = len(M)

        if plotrange is None:
            omminplot = M[0] - 0.001
            ommaxplot = M[N_om - 1] + 0.001
        else:
            omminplot = plotrange[0]
            ommaxplot = plotrange[1]

        if (ishell is None):
            Akw = {}
            for ibn in bln:
                Akw[ibn] = numpy.zeros([self.Nk, N_om], numpy.float_)
        else:
            Akw = {}
            for ibn in bln:
                Akw[ibn] = numpy.zeros([self.shells[ishell][3], self.Nk, N_om],
                                       numpy.float_)

        if Fermisurface:
            omminplot = -2.0 * broadening
            ommaxplot = 2.0 * broadening
            Akw = {}
            for ibn in bln:
                Akw[ibn] = numpy.zeros([self.Nk, 1], numpy.float_)

        if not (ishell is None):
            GFStruct_proj = [(al, range(self.shells[ishell][3])) for al in bln]
            Gproj = GF(Name_Block_Generator=[
                (a, GFBloc_ReFreq(Indices=al, Mesh=self.Sigmaimp[0].mesh))
                for a, al in GFStruct_proj
            ],
                       Copy=False)
            Gproj.zero()

        for ik in xrange(self.Nk):

            S = self.latticeGF_realfreq(ik=ik, mu=mu, broadening=broadening)
            if (ishell is None):
                # non-projected A(k,w)
                for iom in range(N_om):
                    if (M[iom] > omminplot) and (M[iom] < ommaxplot):
                        if Fermisurface:
                            for sig, gf in S:
                                Akw[sig][
                                    ik,
                                    0] += gf._data.array[:, :, iom].imag.trace(
                                    ) / (-3.1415926535) * (M[1] - M[0])
                        else:
                            for sig, gf in S:
                                Akw[sig][
                                    ik,
                                    iom] += gf._data.array[:, :,
                                                           iom].imag.trace(
                                                           ) / (-3.1415926535)
                            Akw[sig][
                                ik,
                                iom] += ik * shift  # shift Akw for plotting in xmgrace

            else:
                # projected A(k,w):
                Gproj.zero()
                tmp = Gproj.copy()
                for ir in xrange(self.N_parproj[ishell]):
                    for sig, gf in tmp:
                        tmp[sig] <<= self.downfold_pc(ik, ir, ishell, sig,
                                                      S[sig], gf)
                    Gproj += tmp

                # TO BE FIXED:
                # rotate to local frame
                #if (self.use_rotations):
                #    for sig,gf in Gproj: Gproj[sig] <<= self.rotloc(0,gf,direction='toLocal')

                for iom in range(N_om):
                    if (M[iom] > omminplot) and (M[iom] < ommaxplot):
                        for ish in range(self.shells[ishell][3]):
                            for ibn in bln:
                                Akw[ibn][ish, ik,
                                         iom] = Gproj[ibn]._data.array[
                                             ish, ish,
                                             iom].imag / (-3.1415926535)

        # END k-LOOP
        if (MPI.IS_MASTER_NODE()):
            if (ishell is None):

                for ibn in bln:
                    # loop over GF blocs:

                    if (invertAkw):
                        maxAkw = Akw[ibn].max()
                        minAkw = Akw[ibn].min()

                    # open file for storage:
                    if Fermisurface:
                        f = open('FS_' + ibn + '.dat', 'w')
                    else:
                        f = open('Akw_' + ibn + '.dat', 'w')

                    for ik in range(self.Nk):
                        if Fermisurface:
                            if (invertAkw):
                                Akw[ibn][ik, 0] = 1.0 / (minAkw - maxAkw) * (
                                    Akw[ibn][ik, iom] - maxAkw)
                            f.write('%s    %s\n' % (ik, Akw[ibn][ik, 0]))
                        else:
                            for iom in range(N_om):
                                if (M[iom] > omminplot) and (M[iom] <
                                                             ommaxplot):
                                    if (invertAkw):
                                        Akw[ibn][
                                            ik,
                                            iom] = 1.0 / (minAkw - maxAkw) * (
                                                Akw[ibn][ik, iom] - maxAkw)
                                    if (shift > 0.0001):
                                        f.write('%s      %s\n' %
                                                (M[iom], Akw[ibn][ik, iom]))
                                    else:
                                        f.write(
                                            '%s     %s      %s\n' %
                                            (ik, M[iom], Akw[ibn][ik, iom]))

                            f.write('\n')

                    f.close()

            else:
                for ibn in bln:
                    for ish in range(self.shells[ishell][3]):

                        if (invertAkw):
                            maxAkw = Akw[ibn][ish, :, :].max()
                            minAkw = Akw[ibn][ish, :, :].min()

                        f = open('Akw_' + ibn + '_proj' + str(ish) + '.dat',
                                 'w')

                        for ik in range(self.Nk):
                            for iom in range(N_om):
                                if (M[iom] > omminplot) and (M[iom] <
                                                             ommaxplot):
                                    if (invertAkw):
                                        Akw[ibn][ish, ik, iom] = 1.0 / (
                                            minAkw - maxAkw
                                        ) * (Akw[ibn][ish, ik, iom] - maxAkw)
                                    if (shift > 0.0001):
                                        f.write(
                                            '%s      %s\n' %
                                            (M[iom], Akw[ibn][ish, ik, iom]))
                                    else:
                                        f.write('%s     %s      %s\n' %
                                                (ik, M[iom], Akw[ibn][ish, ik,
                                                                      iom]))

                            f.write('\n')

                        f.close()
Пример #33
0
    def convert_Parproj_input(self,ParProjSubGrp='SumK_LDA_ParProj',SymmParSubGrp='SymmPar'):
        """
        Reads the input for the partial charges projectors from case.parproj, and stores it in the SymmParSubGrp
        group in the HDF5.
        """

        if not (MPI.IS_MASTER_NODE()): return

        self.ParProjSubGrp = ParProjSubGrp
        self.SymmParSubGrp = SymmParSubGrp

        MPI.report("Reading parproj input from %s..."%self.Parproj_file)

        Dens_Mat_below = [ [numpy.zeros([self.shells[ish][3],self.shells[ish][3]],numpy.complex_) for ish in range(self.N_shells)] 
                           for isp in range(self.Nspinblocs) ]

        R = Read_Fortran_File(self.Parproj_file)
        #try:

        N_parproj = [int(R.next()) for i in range(self.N_shells)]
                
        # Initialise P, here a double list of matrices:
        Proj_Mat_pc = [ [ [ [numpy.zeros([self.shells[ish][3], self.N_Orbitals[ik][isp]], numpy.complex_) 
                             for ir in range(N_parproj[ish])]
                            for ish in range (self.N_shells) ]
                          for isp in range(self.Nspinblocs) ]
                        for ik in range(self.Nk) ]

        rotmat_all = [numpy.identity(self.shells[ish][3],numpy.complex_) for ish in xrange(self.N_shells)]
        rotmat_all_timeinv = [0 for i in range(self.N_shells)]

        for ish in range(self.N_shells):
            #print ish   
            # read first the projectors for this orbital:
            for ik in xrange(self.Nk):
                for ir in range(N_parproj[ish]):
                    for isp in range(self.Nspinblocs):
                                    
                        for i in xrange(self.shells[ish][3]):    # read real part:
                            for j in xrange(self.N_Orbitals[ik][isp]):
                                Proj_Mat_pc[ik][isp][ish][ir][i,j] = R.next()
                            
                    for isp in range(self.Nspinblocs):
                        for i in xrange(self.shells[ish][3]):    # read imaginary part:
                            for j in xrange(self.N_Orbitals[ik][isp]):
                                Proj_Mat_pc[ik][isp][ish][ir][i,j] += 1j * R.next()
                                        
                    
            # now read the Density Matrix for this orbital below the energy window:
            for isp in range(self.Nspinblocs):
                for i in xrange(self.shells[ish][3]):    # read real part:
                    for j in xrange(self.shells[ish][3]):
                        Dens_Mat_below[isp][ish][i,j] = R.next()
            for isp in range(self.Nspinblocs):
                for i in xrange(self.shells[ish][3]):    # read imaginary part:
                    for j in xrange(self.shells[ish][3]):
                        Dens_Mat_below[isp][ish][i,j] += 1j * R.next()
                if (self.SP==0): Dens_Mat_below[isp][ish] /= 2.0

            # Global -> local rotation matrix for this shell:
            for i in xrange(self.shells[ish][3]):    # read real part:
                for j in xrange(self.shells[ish][3]):
                    rotmat_all[ish][i,j] = R.next()
            for i in xrange(self.shells[ish][3]):    # read imaginary part:
                for j in xrange(self.shells[ish][3]):
                    rotmat_all[ish][i,j] += 1j * R.next()
                    
            #print Dens_Mat_below[0][ish],Dens_Mat_below[1][ish]
            
            if (self.SP):
                rotmat_all_timeinv[ish] = int(R.next())

        #except StopIteration : # a more explicit error if the file is corrupted.
        #    raise "SumK_LDA_Wien2k_input: reading file for Projectors failed!"
        R.close()

        #-----------------------------------------
        # Store the input into HDF5:
        ar = HDF_Archive(self.HDFfile,'a')
        if not (self.ParProjSubGrp in ar): ar.create_group(self.ParProjSubGrp) 
        # The subgroup containing the data. If it does not exist, it is created.
        # If it exists, the data is overwritten!!!
        thingstowrite = ['Dens_Mat_below','N_parproj','Proj_Mat_pc','rotmat_all','rotmat_all_timeinv']
        for it in thingstowrite: exec "ar['%s']['%s'] = %s"%(self.ParProjSubGrp,it,it)
        del ar

        # Symmetries are used, 
        # Now do the symmetries for all orbitals:
        self.read_Symmetry_input(orbits=self.shells,symmfile=self.Symmpar_file,SymmSubGrp=self.SymmParSubGrp,SO=self.SO,SP=self.SP)
Пример #34
0
    def convert_DMFT_input(self):
        """
        Reads the input files, and stores the data in the HDFfile
        """

        if not (MPI.IS_MASTER_NODE()): return  # do it only on master:
        MPI.report("Reading input from %s..." % self.LDA_file)

        # Read and write only on Master!!!
        # R is a generator : each R.Next() will return the next number in the file
        R = Read_Fortran_File(self.LDA_file)
        try:
            EnergyUnit = R.next()  # read the energy convertion factor
            Nk = int(R.next())  # read the number of k points
            k_dep_projection = 1
            SP = int(R.next())  # flag for spin-polarised calculation
            SO = int(R.next())  # flag for spin-orbit calculation
            charge_below = R.next()  # total charge below energy window
            Density_Required = R.next(
            )  # total density required, for setting the chemical potential
            symm_op = 1  # Use symmetry groups for the k-sum

            # the information on the non-correlated shells is not important here, maybe skip:
            N_shells = int(R.next(
            ))  # number of shells (e.g. Fe d, As p, O p) in the unit cell,
            # corresponds to index R in formulas
            # now read the information about the shells:
            shells = [[int(R.next()) for i in range(4)]
                      for icrsh in range(N_shells)
                      ]  # reads iatom, sort, l, dim

            N_corr_shells = int(R.next(
            ))  # number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
            # corresponds to index R in formulas
            # now read the information about the shells:
            corr_shells = [[int(R.next()) for i in range(6)]
                           for icrsh in range(N_corr_shells)
                           ]  # reads iatom, sort, l, dim, SO flag, irep

            self.inequiv_shells(
                corr_shells
            )  # determine the number of inequivalent correlated shells, has to be known for further reading...

            use_rotations = 1
            rotmat = [
                numpy.identity(corr_shells[icrsh][3], numpy.complex_)
                for icrsh in xrange(N_corr_shells)
            ]

            # read the matrices
            rotmat_timeinv = [0 for i in range(N_corr_shells)]

            for icrsh in xrange(N_corr_shells):
                for i in xrange(corr_shells[icrsh][3]):  # read real part:
                    for j in xrange(corr_shells[icrsh][3]):
                        rotmat[icrsh][i, j] = R.next()
                for i in xrange(corr_shells[icrsh][3]):  # read imaginary part:
                    for j in xrange(corr_shells[icrsh][3]):
                        rotmat[icrsh][i, j] += 1j * R.next()

                if (SP == 1):  # read time inversion flag:
                    rotmat_timeinv[icrsh] = int(R.next())

            # Read here the infos for the transformation of the basis:
            Nreps = [1 for i in range(self.N_inequiv_corr_shells)]
            dim_reps = [0 for i in range(self.N_inequiv_corr_shells)]
            T = []
            for icrsh in range(self.N_inequiv_corr_shells):
                Nreps[icrsh] = int(R.next(
                ))  # number of representatives ("subsets"), e.g. t2g and eg
                dim_reps[icrsh] = [int(R.next()) for i in range(Nreps[icrsh])
                                   ]  # dimensions of the subsets

                # The transformation matrix:
                # it is of dimension 2l+1, if no SO, and 2*(2l+1) with SO!!
                #T = []
                #for ish in xrange(self.N_inequiv_corr_shells):
                ll = 2 * corr_shells[self.invshellmap[icrsh]][2] + 1
                lmax = ll * (corr_shells[self.invshellmap[icrsh]][4] + 1)
                T.append(numpy.zeros([lmax, lmax], numpy.complex_))

                # now read it from file:
                for i in xrange(lmax):
                    for j in xrange(lmax):
                        T[icrsh][i, j] = R.next()
                for i in xrange(lmax):
                    for j in xrange(lmax):
                        T[icrsh][i, j] += 1j * R.next()

            # Spin blocks to be read:
            Nspinblocs = SP + 1 - SO  # number of spins to read for Norbs and Ham, NOT Projectors

            # read the list of N_Orbitals for all k points
            N_Orbitals = [[0 for isp in range(Nspinblocs)]
                          for ik in xrange(Nk)]
            for isp in range(Nspinblocs):
                for ik in xrange(Nk):
                    N_Orbitals[ik][isp] = int(R.next())
            #print N_Orbitals

            # Initialise the projectors:
            Proj_Mat = [[[
                numpy.zeros([corr_shells[icrsh][3], N_Orbitals[ik][isp]],
                            numpy.complex_) for icrsh in range(N_corr_shells)
            ] for isp in range(Nspinblocs)] for ik in range(Nk)]

            # Read the projectors from the file:
            for ik in xrange(Nk):
                for icrsh in range(N_corr_shells):
                    no = corr_shells[icrsh][3]
                    # first Real part for BOTH spins, due to conventions in dmftproj:
                    for isp in range(Nspinblocs):
                        for i in xrange(no):
                            for j in xrange(N_Orbitals[ik][isp]):
                                Proj_Mat[ik][isp][icrsh][i, j] = R.next()
                    # now Imag part:
                    for isp in range(Nspinblocs):
                        for i in xrange(no):
                            for j in xrange(N_Orbitals[ik][isp]):
                                Proj_Mat[ik][isp][icrsh][i, j] += 1j * R.next()

            # now define the arrays for weights and hopping ...
            BZ_weights = numpy.ones([Nk], numpy.float_) / float(
                Nk)  # w(k_index),  default normalisation
            Hopping = [[
                numpy.zeros([N_Orbitals[ik][isp], N_Orbitals[ik][isp]],
                            numpy.complex_) for isp in range(Nspinblocs)
            ] for ik in xrange(Nk)]

            # weights in the file
            for ik in xrange(Nk):
                BZ_weights[ik] = R.next()

            # if the sum over spins is in the weights, take it out again!!
            sm = sum(BZ_weights)
            BZ_weights[:] /= sm

            # Grab the H
            # we use now the convention of a DIAGONAL Hamiltonian!!!!
            for isp in range(Nspinblocs):
                for ik in xrange(Nk):
                    no = N_Orbitals[ik][isp]
                    for i in xrange(no):
                        Hopping[ik][isp][i, i] = R.next() * EnergyUnit

            #keep some things that we need for reading parproj:
            self.N_shells = N_shells
            self.shells = shells
            self.N_corr_shells = N_corr_shells
            self.corr_shells = corr_shells
            self.Nspinblocs = Nspinblocs
            self.N_Orbitals = N_Orbitals
            self.Nk = Nk
            self.SO = SO
            self.SP = SP
            self.EnergyUnit = EnergyUnit
        except StopIteration:  # a more explicit error if the file is corrupted.
            raise "SumK_LDA : reading file HMLT_file failed!"

        R.close()

        #print Proj_Mat[0]

        #-----------------------------------------
        # Store the input into HDF5:
        ar = HDF_Archive(self.HDFfile, 'a')
        if not (self.LDASubGrp in ar): ar.create_group(self.LDASubGrp)
        # The subgroup containing the data. If it does not exist, it is created.
        # If it exists, the data is overwritten!!!

        ar[self.LDASubGrp]['EnergyUnit'] = EnergyUnit
        ar[self.LDASubGrp]['Nk'] = Nk
        ar[self.LDASubGrp]['k_dep_projection'] = k_dep_projection
        ar[self.LDASubGrp]['SP'] = SP
        ar[self.LDASubGrp]['SO'] = SO
        ar[self.LDASubGrp]['charge_below'] = charge_below
        ar[self.LDASubGrp]['Density_Required'] = Density_Required
        ar[self.LDASubGrp]['symm_op'] = symm_op
        ar[self.LDASubGrp]['N_shells'] = N_shells
        ar[self.LDASubGrp]['shells'] = shells
        ar[self.LDASubGrp]['N_corr_shells'] = N_corr_shells
        ar[self.LDASubGrp]['corr_shells'] = corr_shells
        ar[self.LDASubGrp]['use_rotations'] = use_rotations
        ar[self.LDASubGrp]['rotmat'] = rotmat
        ar[self.LDASubGrp]['rotmat_timeinv'] = rotmat_timeinv
        ar[self.LDASubGrp]['Nreps'] = Nreps
        ar[self.LDASubGrp]['dim_reps'] = dim_reps
        ar[self.LDASubGrp]['T'] = T
        ar[self.LDASubGrp]['N_Orbitals'] = N_Orbitals
        ar[self.LDASubGrp]['Proj_Mat'] = Proj_Mat
        ar[self.LDASubGrp]['BZ_weights'] = BZ_weights
        ar[self.LDASubGrp]['Hopping'] = Hopping

        del ar

        # Symmetries are used,
        # Now do the symmetries for correlated orbitals:
        self.read_Symmetry_input(orbits=corr_shells,
                                 symmfile=self.Symm_file,
                                 SymmSubGrp=self.SymmSubGrp,
                                 SO=SO,
                                 SP=SP)
Пример #35
0
    def convert_bands_input(self,BandsSubGrp = 'SumK_LDA_Bands'):
        """
        Converts the input for momentum resolved spectral functions, and stores it in BandsSubGrp in the
        HDF5.
        """

        if not (MPI.IS_MASTER_NODE()): return

        self.BandsSubGrp = BandsSubGrp
        MPI.report("Reading bands input from %s..."%self.Band_file)

        R = Read_Fortran_File(self.Band_file)
        try:
            Nk = int(R.next())

            # read the list of N_Orbitals for all k points
            N_Orbitals = [ [0 for isp in range(self.Nspinblocs)] for ik in xrange(Nk)]
            for isp in range(self.Nspinblocs):
                for ik in xrange(Nk):
                    N_Orbitals[ik][isp] = int(R.next())

            # Initialise the projectors:
            Proj_Mat = [ [ [numpy.zeros([self.corr_shells[icrsh][3], N_Orbitals[ik][isp]], numpy.complex_) 
                            for icrsh in range (self.N_corr_shells)] 
                           for isp in range(self.Nspinblocs)] 
                         for ik in range(Nk) ]

            # Read the projectors from the file:
            for ik in xrange(Nk):
                for icrsh in range(self.N_corr_shells):
                    no = self.corr_shells[icrsh][3]
                    # first Real part for BOTH spins, due to conventions in dmftproj:
                    for isp in range(self.Nspinblocs):
                        for i in xrange(no):
                            for j in xrange(N_Orbitals[ik][isp]):
                                Proj_Mat[ik][isp][icrsh][i,j] = R.next()
                    # now Imag part:
                    for isp in range(self.Nspinblocs):
                        for i in xrange(no):
                            for j in xrange(N_Orbitals[ik][isp]):
                                Proj_Mat[ik][isp][icrsh][i,j] += 1j * R.next()

            Hopping = [ [numpy.zeros([N_Orbitals[ik][isp],N_Orbitals[ik][isp]],numpy.complex_) 
                         for isp in range(self.Nspinblocs)] for ik in xrange(Nk) ]
         	    
            # Grab the H
            # we use now the convention of a DIAGONAL Hamiltonian!!!!
            for isp in range(self.Nspinblocs):
                for ik in xrange(Nk) :
                    no = N_Orbitals[ik][isp]
                    for i in xrange(no):
                        Hopping[ik][isp][i,i] = R.next() * self.EnergyUnit

            # now read the partial projectors:
            N_parproj = [int(R.next()) for i in range(self.N_shells)]
            # Initialise P, here a double list of matrices:
            Proj_Mat_pc = [ [ [ [numpy.zeros([self.shells[ish][3], N_Orbitals[ik][isp]], numpy.complex_) 
                                 for ir in range(N_parproj[ish])]
                                for ish in range (self.N_shells) ]
                              for isp in range(self.Nspinblocs) ]
                            for ik in range(Nk) ]


            for ish in range(self.N_shells):
               
                for ik in xrange(Nk):
                    for ir in range(N_parproj[ish]):
                        for isp in range(self.Nspinblocs):
                                    
                            for i in xrange(self.shells[ish][3]):    # read real part:
                                for j in xrange(N_Orbitals[ik][isp]):
                                    Proj_Mat_pc[ik][isp][ish][ir][i,j] = R.next()
                            
                            for i in xrange(self.shells[ish][3]):    # read imaginary part:
                                for j in xrange(N_Orbitals[ik][isp]):
                                    Proj_Mat_pc[ik][isp][ish][ir][i,j] += 1j * R.next()

        except StopIteration : # a more explicit error if the file is corrupted.
            raise "SumK_LDA : reading file HMLT_file failed!"

        R.close()
        # reading done!

        #-----------------------------------------
        # Store the input into HDF5:
        ar = HDF_Archive(self.HDFfile,'a')
        if not (self.BandsSubGrp in ar): ar.create_group(self.BandsSubGrp) 
        # The subgroup containing the data. If it does not exist, it is created.
        # If it exists, the data is overwritten!!!
        thingstowrite = ['Nk','N_Orbitals','Proj_Mat','Hopping','N_parproj','Proj_Mat_pc']
        for it in thingstowrite: exec "ar['%s']['%s'] = %s"%(self.BandsSubGrp,it,it)

        #ar[self.BandsSubGrp]['Nk'] = Nk
        #ar[self.BandsSubGrp]['N_Orbitals'] = N_Orbitals
        #ar[self.BandsSubGrp]['Proj_Mat'] = Proj_Mat
        #self.Proj_Mat = Proj_Mat
        #self.N_Orbitals = N_Orbitals
        #self.Nk = Nk
        #self.Hopping = Hopping
        del ar
Пример #36
0
    def SetDoubleCounting(self, densmat, U_interact, J_Hund, orb=0, useDCformula=0, useval=None):
        """Sets the double counting term for inequiv orbital orb
           useDCformula=0: LDA+U FLL double counting, useDCformula=1: Held's formula. 
           useDCformula=2: AMF
           Be sure that you use the correct interaction Hamiltonian!"""

        # if (not hasattr(self,"dc_imp")): self.__initDC()

        dm = [{} for i in xrange(self.N_corr_shells)]
        for i in xrange(self.N_corr_shells):
            l = self.corr_shells[i][3]  # *(1+self.corr_shells[i][4])
            for j in xrange(len(self.GFStruct_corr[i])):
                dm[i]["%s" % self.GFStruct_corr[i][j][0]] = numpy.zeros([l, l], numpy.float_)

        for icrsh in xrange(self.N_corr_shells):

            iorb = self.shellmap[icrsh]  # iorb is the index of the inequivalent shell corresponding to icrsh

            if iorb == orb:
                # do this orbital

                l = self.corr_shells[icrsh][3]  # *(1+self.corr_shells[icrsh][4])
                for j in xrange(len(self.GFStruct_corr[icrsh])):
                    self.dc_imp[icrsh]["%s" % self.GFStruct_corr[icrsh][j][0]] = numpy.identity(l, numpy.float_)

                # transform the CTQMC blocks to the full matrix:
                for ibl in range(len(self.GFStruct_Solver[iorb])):
                    for i in range(len(self.GFStruct_Solver[iorb][ibl][1])):
                        for j in range(len(self.GFStruct_Solver[iorb][ibl][1])):
                            bl = self.GFStruct_Solver[iorb][ibl][0]
                            ind1 = self.GFStruct_Solver[iorb][ibl][1][i]
                            ind2 = self.GFStruct_Solver[iorb][ibl][1][j]
                            dm[icrsh][self.mapinv[iorb][bl]][ind1, ind2] = densmat[bl][
                                i, j
                            ].real  # only real part relevant for trace

                M = self.corr_shells[icrsh][3]
                Ncr = {}
                Ncrtot = 0.0
                a_list = [a for a, al in self.GFStruct_corr[icrsh]]
                for bl in a_list:
                    Ncr[bl] = dm[icrsh][bl].trace()
                    Ncrtot += Ncr[bl]

                # average the densities if there is no SP:
                if self.SP == 0:
                    for bl in a_list:
                        Ncr[bl] = Ncrtot / len(a_list)
                # correction for SO: we have only one block in this case, but in DC we need N/2
                elif self.SP == 1 and self.SO == 1:
                    for bl in a_list:
                        Ncr[bl] = Ncrtot / 2.0

                if useval is None:

                    if useDCformula == 0:
                        self.DCenerg[icrsh] = U_interact / 2.0 * Ncrtot * (Ncrtot - 1.0)
                        for bl in a_list:
                            Uav = U_interact * (Ncrtot - 0.5) - J_Hund * (Ncr[bl] - 0.5)
                            self.dc_imp[icrsh][bl] *= Uav
                            self.DCenerg[icrsh] -= J_Hund / 2.0 * (Ncr[bl]) * (Ncr[bl] - 1.0)
                            MPI.report("DC for shell %(icrsh)i and block %(bl)s = %(Uav)f" % locals())
                    elif useDCformula == 1:
                        self.DCenerg[icrsh] = (
                            (U_interact + J_Hund * (2.0 - (M - 1)) / (2 * M - 1)) / 2.0 * Ncrtot * (Ncrtot - 1.0)
                        )
                        for bl in a_list:
                            # Held's formula, with U_interact the interorbital onsite interaction
                            Uav = (U_interact + J_Hund * (2.0 - (M - 1)) / (2 * M - 1)) * (Ncrtot - 0.5)
                            self.dc_imp[icrsh][bl] *= Uav
                            MPI.report("DC for shell %(icrsh)i and block %(bl)s = %(Uav)f" % locals())
                    elif useDCformula == 2:
                        self.DCenerg[icrsh] = 0.5 * U_interact * Ncrtot * Ncrtot
                        for bl in a_list:
                            # AMF
                            Uav = U_interact * (Ncrtot - Ncr[bl] / M) - J_Hund * (Ncr[bl] - Ncr[bl] / M)
                            self.dc_imp[icrsh][bl] *= Uav
                            self.DCenerg[icrsh] -= (U_interact + (M - 1) * J_Hund) / M * 0.5 * Ncr[bl] * Ncr[bl]
                            MPI.report("DC for shell %(icrsh)i and block %(bl)s = %(Uav)f" % locals())

                    # output:
                    MPI.report("DC energy for shell %s = %s" % (icrsh, self.DCenerg[icrsh]))

                else:

                    a_list = [a for a, al in self.GFStruct_corr[icrsh]]
                    for bl in a_list:
                        self.dc_imp[icrsh][bl] *= useval

                    self.DCenerg[icrsh] = useval * Ncrtot

                    # output:
                    MPI.report("DC for shell %(icrsh)i = %(useval)f" % locals())
                    MPI.report("DC energy = %s" % self.DCenerg[icrsh])
Пример #37
0
    def Solve(self,Iteration_Number=1,Test_Convergence=0.0001):
        """Calculation of the impurity Greens function using Hubbard-I"""

        # Test all a parameters before solutions
        print Parameters.check(self.__dict__,self.Required,self.Optional)
       	#Solver_Base.Solve(self,is_last_iteration,Iteration_Number,Test_Convergence)
       
        if self.Converged :
            MPI.report("Solver %(Name)s has already converted: SKIPPING"%self.__dict__)
            return

        self.__save_eal('eal.dat',Iteration_Number)

        MPI.report( "Starting Fortran solver %(Name)s"%self.__dict__)

        self.Sigma_Old <<= self.Sigma
        self.G_Old <<= self.G

        # call the fortran solver:
        temp = 1.0/self.Beta
        gf,tail,self.atocc,self.atmag = gf_hi_fullu(e0f=self.ealmat, ur=self.ur, umn=self.umn, ujmn=self.ujmn, 
                                                    zmsb=self.zmsb, nmom=self.Nmoments, ns=self.Nspin, temp=temp, verbosity = self.Verbosity)

        #self.sig = sigma_atomic_fullu(gf=self.gf,e0f=self.eal,zmsb=self.zmsb,ns=self.Nspin,nlm=self.Nlm)

        if (self.Verbosity==0):
            # No fortran output, so give basic results here
            MPI.report("Atomic occupancy in Hubbard I Solver  : %s"%self.atocc)
            MPI.report("Atomic magn. mom. in Hubbard I Solver : %s"%self.atmag)

        # transfer the data to the GF class:
        if (self.UseSpinOrbit): 
            nlmtot = self.Nlm*2         # only one block in this case!
        else:
            nlmtot = self.Nlm

        M={}
        isp=-1
        for a,al in self.GFStruct:
            isp+=1
            #M[a] = gf[isp*self.Nlm:(isp+1)*self.Nlm,isp*self.Nlm:(isp+1)*self.Nlm,:]
            M[a] = gf[isp*nlmtot:(isp+1)*nlmtot,isp*nlmtot:(isp+1)*nlmtot,:]
            for i in range(min(self.Nmoments,10)):
                self.tailtempl[a][i+1].array[:] = tail[i][isp*nlmtot:(isp+1)*nlmtot,isp*nlmtot:(isp+1)*nlmtot]
                 
        glist = lambda : [ GFBloc_ImFreq(Indices = al, Beta = self.Beta, NFreqMatsubara = self.Nmsb, Data=M[a], Tail=self.tailtempl[a]) 
                           for a,al in self.GFStruct]
        self.G = GF(NameList = self.a_list, BlockList = glist(),Copy=False)
            
        # Self energy:
        self.G0 <<= GF_Initializers.A_Omega_Plus_B(A=1,B=0.0)
        
        M = [ self.ealmat[isp*nlmtot:(isp+1)*nlmtot,isp*nlmtot:(isp+1)*nlmtot] for isp in range((2*self.Nlm)/nlmtot) ] 
        self.G0 -= M
        self.Sigma <<= self.G0 - inverse(self.G)

        # invert G0
        self.G0.invert()
       
        def test_distance(G1,G2, dist) :
            def f(G1,G2) : 
                print abs(G1._data.array - G2._data.array)
                dS = max(abs(G1._data.array - G2._data.array).flatten())  
                aS = max(abs(G1._data.array).flatten())
                return dS <= aS*dist
            return reduce(lambda x,y : x and y, [f(g1,g2) for (i1,g1),(i2,g2) in izip(G1,G2)])

        MPI.report("\nChecking Sigma for convergence...\nUsing tolerance %s"%Test_Convergence)
        self.Converged = test_distance(self.Sigma,self.Sigma_Old,Test_Convergence)

        if self.Converged :
            MPI.report("Solver HAS CONVERGED")
        else :
            MPI.report("Solver has not yet converged")
Пример #38
0
    def analyse_BS(self, threshold=0.00001, includeshells=None):
        """ Determines the Greens function block structure from the simple point integration"""

        dm = self.simplepointdensmat()

        densmat = [dm[self.invshellmap[ish]] for ish in xrange(self.N_inequiv_corr_shells)]

        if includeshells is None:
            includeshells = range(self.N_inequiv_corr_shells)
        for ish in includeshells:

            # self.GFStruct_Solver.append([])
            self.GFStruct_Solver[ish] = []

            a_list = [a for a, al in self.GFStruct_corr[self.invshellmap[ish]]]
            for a in a_list:

                dm = densmat[ish][a]
                dmbool = abs(dm) > threshold  # gives an index list of entries larger that threshold

                offdiag = []
                for i in xrange(len(dmbool)):
                    for j in xrange(i, len(dmbool)):
                        if (dmbool[i, j]) & (i != j):
                            offdiag.append([i, j])

                NBlocs = len(dmbool)
                blocs = [[i] for i in range(NBlocs)]

                for i in range(len(offdiag)):
                    if offdiag[i][0] != offdiag[i][1]:
                        for j in range(len(blocs[offdiag[i][1]])):
                            blocs[offdiag[i][0]].append(blocs[offdiag[i][1]][j])
                        del blocs[offdiag[i][1]]
                        for j in range(i + 1, len(offdiag)):
                            if offdiag[j][0] == offdiag[i][1]:
                                offdiag[j][0] = offdiag[i][0]
                            if offdiag[j][1] == offdiag[i][1]:
                                offdiag[j][1] = offdiag[i][0]
                            if offdiag[j][0] > offdiag[i][1]:
                                offdiag[j][0] -= 1
                            if offdiag[j][1] > offdiag[i][1]:
                                offdiag[j][1] -= 1
                            offdiag[j].sort()
                        NBlocs -= 1

                for i in range(NBlocs):
                    blocs[i].sort()
                    self.GFStruct_Solver[ish].append(("%s%s" % (a, i), blocs[i]))

                # map is the mapping of the blocs from the SK blocs to the CTQMC blocs:
                self.map[ish][a] = range(len(dmbool))
                for ibl in range(NBlocs):
                    for j in range(len(blocs[ibl])):
                        self.map[ish][a][blocs[ibl][j]] = "%s%s" % (a, ibl)
                        self.mapinv[ish]["%s%s" % (a, ibl)] = a

            # now calculate degeneracies of orbitals:
            dm = {}
            for bl in self.GFStruct_Solver[ish]:
                bln = bl[0]
                ind = bl[1]
                # get dm for the blocks:
                dm[bln] = numpy.zeros([len(ind), len(ind)], numpy.complex_)
                for i in range(len(ind)):
                    for j in range(len(ind)):
                        dm[bln][i, j] = densmat[ish][self.mapinv[ish][bln]][ind[i], ind[j]]

            for bl in self.GFStruct_Solver[ish]:
                for bl2 in self.GFStruct_Solver[ish]:
                    if dm[bl[0]].shape == dm[bl2[0]].shape:
                        if ((abs(dm[bl[0]] - dm[bl2[0]]) < threshold).all()) and (bl[0] != bl2[0]):
                            # check if it was already there:
                            ind1 = -1
                            ind2 = -2
                            for n, ind in enumerate(self.deg_shells[ish]):
                                if bl[0] in ind:
                                    ind1 = n
                                if bl2[0] in ind:
                                    ind2 = n
                            if (ind1 < 0) and (ind2 >= 0):
                                self.deg_shells[ish][ind2].append(bl[0])
                            elif (ind1 >= 0) and (ind2 < 0):
                                self.deg_shells[ish][ind1].append(bl2[0])
                            elif (ind1 < 0) and (ind2 < 0):
                                self.deg_shells[ish].append([bl[0], bl2[0]])

        if MPI.IS_MASTER_NODE():
            ar = HDF_Archive(self.HDFfile, "a")
            ar[self.LDAdata]["GFStruct_Solver"] = self.GFStruct_Solver
            ar[self.LDAdata]["map"] = self.map
            ar[self.LDAdata]["mapinv"] = self.mapinv
            try:
                ar[self.LDAdata]["deg_shells"] = self.deg_shells
            except:
                MPI.report("deg_shells not stored, degeneracies not found")
            del ar

        return densmat
Пример #39
0
    def __call__ (self, Sigma, mu=0, eta = 0, Field = None, Epsilon_Hat=None, Res = None, SelectedBlocks = ()):
	""" 
	- Computes :
	   Res <- \[ \sum_k (\omega + \mu - Field - t(k) - Sigma(k,\omega)) \]
           if Res is None, it returns a new GF with the results.
           otherwise, Res must be a GF, in which the calculation is done, and which is then returned.
           (this allows chain calculation : SK(mu = mu,Sigma = Sigma, Res = G).total_density()
           which computes the sumK into G,  and returns the density of G.
  
        - Sigma can be a X, or a function k-> X or a function k,eps ->X where  : 
	    - k is expected to be a 1d-numpy array of size self.dim of float, 
	      containing the k vector in the basis of the RBZ  (i.e.  -0.5< k_i <0.5)
            - eps is t(k)
	    - X is anything such that X[BlockName] can be added/subtracted to a GFBloc for BlockName in SelectedBlocks.
	      e.g. X can be a GF (with at least the SelectedBlocks), or a dictionnary BlockName -> array
	      if the array has the same dimension as the GF blocks (for example to add a static Sigma).

        - Field : Any k independant object to be added to the GF 

        - Epsilon_Hat : a function of eps_k returning a matrix, the dimensions of Sigma

        - SelectedBlocks : The calculation is done with the SAME t(k) for all blocks. If this list is not None
	  only the blocks in this list are calculated.
	  e.g. G and Sigma have block indices 'up' and 'down'. 
	       if SelectedBlocks ==None : 'up' and 'down' are calculated
	       if SelectedBlocks == ['up'] : only 'up' is calculated. 'down' is 0.


        """
        S = Sigma.View_SelectedBlocks(SelectedBlocks) if SelectedBlocks else Sigma
        Gres = Res if Res else Sigma.copy() 
        G = Gres.View_SelectedBlocks(SelectedBlocks) if SelectedBlocks else Gres

        # check input
        assert self.Orthogonal_Basis, "Local_G : must be orthogonal. non ortho cases not checked."
        assert isinstance(G,GF), "G must be a GF"
        assert len(list(set([g.N1 for i,g in G]))) == 1
        assert self.BZ_weights.shape[0] == self.N_kpts(), "Internal Error"
        no = list(set([g.N1 for i,g in G]))[0]
        Sigma_Nargs = len(inspect.getargspec(Sigma)[0]) if callable (Sigma) else 0
        assert Sigma_Nargs <=2 , "Sigma function is not of the correct type. See Documentation"

        # Initialize
        G.zero()
        tmp,tmp2 = G.copy(),G.copy()
        mupat = mu * numpy.identity(no, numpy.complex_)
        tmp <<= iOmega_n
        if Field != None : tmp -= Field 
        if Sigma_Nargs==0: tmp -= Sigma  # substract Sigma once for all

        # Loop on k points...
        for w, k, eps_k in izip(*[MPI.slice_array(A) for A in [self.BZ_weights, self.BZ_Points, self.Hopping]]):

            eps_hat = Epsilon_Hat(eps_k) if Epsilon_Hat else eps_k
            tmp2 <<= tmp
            tmp2 -= tmp2.NBlocks * [eps_hat - mupat]

            if Sigma_Nargs == 1: tmp2 -= Sigma (k)
            elif Sigma_Nargs ==2: tmp2 -= Sigma (k,eps_k)

            tmp2.invert()
            tmp2 *= w
            G += tmp2

        G <<= MPI.all_reduce(MPI.world,G,lambda x,y : x+y)
        MPI.barrier()

        return Gres
Пример #40
0
    def __init__(self,
                 Beta,
                 Norb,
                 U_interact=None,
                 J_Hund=None,
                 GFStruct=False,
                 map=False,
                 use_spinflip=False,
                 useMatrix=True,
                 l=2,
                 T=None,
                 dimreps=None,
                 irep=None,
                 deg_orbs=[],
                 Sl_Int=None):

        self.offset = 0
        self.use_spinflip = use_spinflip
        self.Norb = Norb

        if (useMatrix):

            if not (Sl_Int is None):
                Umat = Umatrix(l=l)
                assert len(Sl_Int) == (l + 1), "Sl_Int has the wrong length"
                if (type(Sl_Int) == ListType):
                    Rcl = numpy.array(Sl_Int)
                else:
                    Rcl = Sl_Int
                Umat(T=T, Rcl=Rcl)
            else:
                if ((U_interact == None) and (J_Hund == None)):
                    MPI.report("Give U,J or Slater integrals!!!")
                    assert 0
                Umat = Umatrix(U_interact=U_interact, J_Hund=J_Hund, l=l)
                Umat(T=T)

            Umat.ReduceMatrix()
            if (Umat.N == Umat.Nmat):
                # Transformation T is of size 2l+1
                self.U = Umat.U
                self.Up = Umat.Up
            else:
                # Transformation is of size 2(2l+1)
                self.U = Umat.U
            # now we have the reduced matrices U and Up, we need it for tail fitting anyways

            if (use_spinflip):
                #Take the 4index Umatrix
                # check for imaginary matrix elements:
                if (abs(Umat.Ufull.imag) > 0.0001).any():
                    MPI.report(
                        "WARNING: complex interaction matrix!! Ignoring imaginary part for the moment!"
                    )
                    MPI.report(
                        "If you want to change this, look into Wien2k/Solver_MultiBand.py"
                    )
                self.U4ind = Umat.Ufull.real

            # this will be changed for arbitrary irep:
            # use only one subgroup of orbitals?
            if not (irep is None):
                #print irep, dimreps
                assert not (dimreps is None
                            ), "Dimensions of the representatives are missing!"
                assert Norb == dimreps[
                    irep - 1], "Dimensions of dimrep and Norb do not fit!"
                for ii in range(irep - 1):
                    self.offset += dimreps[ii]

        else:
            if ((U_interact == None) and (J_Hund == None)):
                MPI.report("For Kanamori representation, give U and J!!")
                assert 0
            self.U = numpy.zeros([Norb, Norb], numpy.float_)
            self.Up = numpy.zeros([Norb, Norb], numpy.float_)
            for i in range(Norb):
                for j in range(Norb):
                    if (i == j):
                        self.Up[i, i] = U_interact + 2.0 * J_Hund
                    else:
                        self.Up[i, j] = U_interact
                        self.U[i, j] = U_interact - J_Hund

        if (GFStruct):
            assert map, "give also the mapping!"
            self.map = map
        else:
            # standard GFStruct and map
            GFStruct = [('%s' % (ud), [n for n in range(Norb)])
                        for ud in ['up', 'down']]
            self.map = {
                'up': ['up' for v in range(self.Norb)],
                'down': ['down' for v in range(self.Norb)]
            }

        #print GFStruct,self.map

        if (use_spinflip == False):
            Hamiltonian = self.__setHamiltonian_density()
        else:
            if (useMatrix):
                Hamiltonian = self.__setfullHamiltonian_Slater()
            else:
                Hamiltonian = self.__setfullHamiltonian_Kanamori(J_Hund=J_Hund)

        Quantum_Numbers = self.__setQuantumNumbers(GFStruct)

        # Determine if there are only blocs of size 1:
        self.blocssizeone = True
        for ib in GFStruct:
            if (len(ib[1]) > 1): self.blocssizeone = False

        # now initialize the solver with the stuff given above:
        Solver.__init__(self,
                        Beta=Beta,
                        GFstruct=GFStruct,
                        H_Local=Hamiltonian,
                        Quantum_Numbers=Quantum_Numbers)

        #self.SetGlobalMoves(deg_orbs)

        self.N_Cycles = 10000
        self.Nmax_Matrix = 100
        self.N_Time_Slices_Delta = 10000
        #if ((len(GFStruct)==2*Norb) and (use_spinflip==False)):
        if ((self.blocssizeone) and (use_spinflip == False)):
            self.Use_Segment_Picture = True
        else:
            self.Use_Segment_Picture = False
Пример #41
0
    def __init__(self, HDFfile, mu = 0.0, hfield = 0.0, UseLDABlocs = False, LDAdata = 'SumK_LDA', Symmcorrdata = 'SymmCorr',
                 ParProjdata = 'SumK_LDA_ParProj', Symmpardata = 'SymmPar', Bandsdata = 'SumK_LDA_Bands'):
        """
        Initialises the class from data previously stored into an HDF5
        """

        if  not (type(HDFfile)==StringType):
            MPI.report("Give a string for the HDF5 filename to read the input!")
        else:
            self.HDFfile = HDFfile
            self.LDAdata = LDAdata
            self.ParProjdata = ParProjdata
            self.Bandsdata = Bandsdata
            self.Symmpardata = Symmpardata
            self.Symmcorrdata = Symmcorrdata
            self.blocnames= [ ['up','down'], ['ud'] ]
            self.NspinblocsGF = [2,1]
            self.Gupf = None
            self.hfield = hfield
            
            # read input from HDF:
            thingstoread = ['EnergyUnit','Nk','k_dep_projection','SP','SO','charge_below','Density_Required',
                            'symm_op','N_shells','shells','N_corr_shells','corr_shells','use_rotations','rotmat','rotmat_timeinv','Nreps',
                            'dim_reps','T','N_Orbitals','Proj_Mat','BZ_weights','Hopping']
            optionalthings = ['GFStruct_Solver','mapinv','map','Chemical_Potential','dc_imp','DCenerg','deg_shells']

            #ar=HDF_Archive(self.HDFfile,'a')
            #del ar

            self.retval = self.read_input_from_HDF(SubGrp=self.LDAdata,thingstoread=thingstoread,optionalthings=optionalthings)

            #ar=HDF_Archive(self.HDFfile,'a')
            #del ar

            if (self.SO) and (abs(self.hfield)>0.000001):
                self.hfield=0.0
                MPI.report("For SO, the external magnetic field is not implemented, setting it to 0!!")

           
            self.inequiv_shells(self.corr_shells)     # determine the number of inequivalent correlated shells

            # field to convert blocnames to indices
            self.names_to_ind = [{}, {}]
            for ibl in range(2):
                for inm in range(self.NspinblocsGF[ibl]): 
                    self.names_to_ind[ibl][self.blocnames[ibl][inm]] = inm * self.SP #(self.Nspinblocs-1)

            # GF structure used for the local things in the k sums
            self.GFStruct_corr = [ [ (al, range( self.corr_shells[i][3])) for al in self.blocnames[self.corr_shells[i][4]] ]  
                                   for i in xrange(self.N_corr_shells) ]

            if not (self.retval['GFStruct_Solver']):
                # No GFStruct was stored in HDF, so first set a standard one:
                self.GFStruct_Solver = [ [ (al, range( self.corr_shells[self.invshellmap[i]][3]) )
                                           for al in self.blocnames[self.corr_shells[self.invshellmap[i]][4]] ]
                                         for i in xrange(self.N_inequiv_corr_shells) ]
                self.map = [ {} for i in xrange(self.N_inequiv_corr_shells) ]
                self.mapinv = [ {} for i in xrange(self.N_inequiv_corr_shells) ]
                for i in xrange(self.N_inequiv_corr_shells):
                    for al in self.blocnames[self.corr_shells[self.invshellmap[i]][4]]:
                        self.map[i][al] = [al for j in range( self.corr_shells[self.invshellmap[i]][3] ) ]
                        self.mapinv[i][al] = al

            if not (self.retval['dc_imp']):
                # init the double counting:
                self.__initDC()

            if not (self.retval['Chemical_Potential']):
                self.Chemical_Potential = mu

            if not (self.retval['deg_shells']):
                self.deg_shells = [ [] for i in range(self.N_inequiv_corr_shells)]

            if self.symm_op:
                #MPI.report("Do the init for symm:")
                self.Symm_corr = Symmetry(HDFfile,subgroup=self.Symmcorrdata)

            # determine the smallest blocs, if wanted:
            if (UseLDABlocs): dm=self.analyse_BS()

          
            # now save things again to HDF5:
            if (MPI.IS_MASTER_NODE()):
                ar=HDF_Archive(self.HDFfile,'a')
                ar[self.LDAdata]['hfield'] = self.hfield
                del ar
            self.save()
Пример #42
0
    def __init__(
        self,
        HDFfile,
        mu=0.0,
        hfield=0.0,
        UseLDABlocs=False,
        LDAdata="SumK_LDA",
        Symmcorrdata="SymmCorr",
        ParProjdata="SumK_LDA_ParProj",
        Symmpardata="SymmPar",
        Bandsdata="SumK_LDA_Bands",
    ):
        """
        Initialises the class from data previously stored into an HDF5
        """

        if not (type(HDFfile) == StringType):
            MPI.report("Give a string for the HDF5 filename to read the input!")
        else:
            self.HDFfile = HDFfile
            self.LDAdata = LDAdata
            self.ParProjdata = ParProjdata
            self.Bandsdata = Bandsdata
            self.Symmpardata = Symmpardata
            self.Symmcorrdata = Symmcorrdata
            self.blocnames = [["up", "down"], ["ud"]]
            self.NspinblocsGF = [2, 1]
            self.Gupf = None
            self.hfield = hfield

            # read input from HDF:
            thingstoread = [
                "EnergyUnit",
                "Nk",
                "k_dep_projection",
                "SP",
                "SO",
                "charge_below",
                "Density_Required",
                "symm_op",
                "N_shells",
                "shells",
                "N_corr_shells",
                "corr_shells",
                "use_rotations",
                "rotmat",
                "rotmat_timeinv",
                "Nreps",
                "dim_reps",
                "T",
                "N_Orbitals",
                "Proj_Mat",
                "BZ_weights",
                "Hopping",
            ]
            optionalthings = [
                "GFStruct_Solver",
                "mapinv",
                "map",
                "Chemical_Potential",
                "dc_imp",
                "DCenerg",
                "deg_shells",
            ]

            # ar=HDF_Archive(self.HDFfile,'a')
            # del ar

            self.retval = self.read_input_from_HDF(
                SubGrp=self.LDAdata, thingstoread=thingstoread, optionalthings=optionalthings
            )

            # ar=HDF_Archive(self.HDFfile,'a')
            # del ar

            if (self.SO) and (abs(self.hfield) > 0.000001):
                self.hfield = 0.0
                MPI.report("For SO, the external magnetic field is not implemented, setting it to 0!!")

            self.inequiv_shells(self.corr_shells)  # determine the number of inequivalent correlated shells

            # field to convert blocnames to indices
            self.names_to_ind = [{}, {}]
            for ibl in range(2):
                for inm in range(self.NspinblocsGF[ibl]):
                    self.names_to_ind[ibl][self.blocnames[ibl][inm]] = inm * self.SP  # (self.Nspinblocs-1)

            # GF structure used for the local things in the k sums
            self.GFStruct_corr = [
                [(al, range(self.corr_shells[i][3])) for al in self.blocnames[self.corr_shells[i][4]]]
                for i in xrange(self.N_corr_shells)
            ]

            if not (self.retval["GFStruct_Solver"]):
                # No GFStruct was stored in HDF, so first set a standard one:
                self.GFStruct_Solver = [
                    [
                        (al, range(self.corr_shells[self.invshellmap[i]][3]))
                        for al in self.blocnames[self.corr_shells[self.invshellmap[i]][4]]
                    ]
                    for i in xrange(self.N_inequiv_corr_shells)
                ]
                self.map = [{} for i in xrange(self.N_inequiv_corr_shells)]
                self.mapinv = [{} for i in xrange(self.N_inequiv_corr_shells)]
                for i in xrange(self.N_inequiv_corr_shells):
                    for al in self.blocnames[self.corr_shells[self.invshellmap[i]][4]]:
                        self.map[i][al] = [al for j in range(self.corr_shells[self.invshellmap[i]][3])]
                        self.mapinv[i][al] = al

            if not (self.retval["dc_imp"]):
                # init the double counting:
                self.__initDC()

            if not (self.retval["Chemical_Potential"]):
                self.Chemical_Potential = mu

            if not (self.retval["deg_shells"]):
                self.deg_shells = [[] for i in range(self.N_inequiv_corr_shells)]

            if self.symm_op:
                # MPI.report("Do the init for symm:")
                self.Symm_corr = Symmetry(HDFfile, subgroup=self.Symmcorrdata)

            # determine the smallest blocs, if wanted:
            if UseLDABlocs:
                dm = self.analyse_BS()

            # now save things again to HDF5:
            if MPI.IS_MASTER_NODE():
                ar = HDF_Archive(self.HDFfile, "a")
                ar[self.LDAdata]["hfield"] = self.hfield
                del ar
            self.save()
Пример #43
0
    def Solve(self):
        """ Solve the impurity problem """

        # Find if an operator is in oplist
        def mysearch(op):
            l = [ k for (k,v) in OPdict.items() if (v-op).is_zero()]
            assert len(l) <=1
            return l[0] if l else None

        # Same but raises an error if pb
        def myfind(op):
            r = mysearch(op)
            if r==None : raise "Operator %s can not be found by myfind !"%r
            return r

        # For backward compatibility
        self.update_params(self.__dict__)

        # Test all a parameters before solutions
        MPI.report(Parameters.check(self.__dict__,self.Required,self.Optional))

        # We have to add the Hamiltonian the epsilon part of G0
        if type(self.H_Local) != type(Operator()) : raise "H_Local is not an operator"
        H = self.H_Local
        for a,alpha_list in  self.GFStruct :
            for mu in alpha_list : 
                for nu in alpha_list : 
                    H += real(self.G0[a]._tail[2][mu,nu]) * Cdag(a,mu)*C(a,nu)

        OPdict = {"Hamiltonian": H}
        MPI.report("Hamiltonian with Eps0 term  : ",H)
        
        # First separate the quantum Numbers that are operators and those which are symmetries.
        QuantumNumberOperators  = dict( (n,op) for (n,op) in self.Quantum_Numbers.items() if type(op) == type(Operator()))
        QuantumNumberSymmetries = dict( (n,op) for (n,op) in self.Quantum_Numbers.items() if type(op) != type(Operator()))

        # Check that the quantum numbers commutes with the Hamiltonian
        for name,op in QuantumNumberOperators.items():
            assert Commutator(self.H_Local ,op).is_zero(), "One quantum number is not commuting with Hamiltonian"
            OPdict[name]=op

        # Complete the OPdict with the fundamental operators
        OPdict, nf, nb, SymChar, NameOpFundamentalList = Operators.Complete_OperatorsList_with_Fundamentals(OPdict)

        # Add the operators to be averaged in OPdict and prepare the list for the C-code
        self.Measured_Operators_Results = {}
        self.twice_defined_Ops = {}
        self.Operators_To_Average_List = []
        for name, op in self.Measured_Operators.items():
          opn = mysearch(op)
          if opn == None : 
              OPdict[name] = op
              self.Measured_Operators_Results[name] = 0.0
              self.Operators_To_Average_List.append(name)
          else:
              MPI.report("Operator %s already defined as %s, using this instead for measuring"%(name,opn))
              self.twice_defined_Ops[name] = opn
              self.Measured_Operators_Results[opn] = 0.0
              if opn not in self.Operators_To_Average_List: self.Operators_To_Average_List.append(opn)

        # Time correlation functions are added
        self.OpCorr_To_Average_List = []
        for name, op in self.Measured_Time_Correlators.items():
          opn = mysearch(op[0])
          if opn == None : 
              OPdict[name] = op[0]
              self.OpCorr_To_Average_List.append(name)
          else:
              MPI.report("Operator %s already defined as %s, using this instead for measuring"%(name,opn))
              if opn not in self.OpCorr_To_Average_List: self.OpCorr_To_Average_List.append(opn)
        # Create storage for data:
        Nops = len(self.OpCorr_To_Average_List)
        f = lambda L : GFBloc_ImTime(Indices= [0], Beta = self.Beta, NTimeSlices=L )
        if (Nops>0):
            self.Measured_Time_Correlators_Results = GF(Name_Block_Generator = [ ( n,f(self.Measured_Time_Correlators[n][1]) ) for n in self.Measured_Time_Correlators], Copy=False)
        else:
            self.Measured_Time_Correlators_Results = GF(Name_Block_Generator = [ ( 'OpCorr',f(2) ) ], Copy=False)

        # Take care of the global moves

        # First, given a function (a,alpha,dagger) -> (a', alpha', dagger')
        # I construct a function on fundamental operators
        def Map_GM_to_Fund_Ops( GM ) :
            def f(fop) :
                a,alpha, dagger = fop.name + (fop.dag,)
                ap,alphap,daggerp = GM((a,alpha,dagger))
                return Cdag(ap,alphap) if daggerp else C(ap,alphap)
            return f

        # Complete the OpList so that it is closed under the global moves
        while 1:
            added_something = False
            for n,(proba,GM) in enumerate(self.Global_Moves):
                # F is a function that map all operators according to the global move
                F = Extend_Function_on_Fundamentals(Map_GM_to_Fund_Ops(GM))
                # Make sure that OPdict is complete, i.e. all images of OPdict operators are in OPdict
                for name,op in OPdict.items() :
                    op_im = F(op)
                    if mysearch(op_im)==None :
                        # find the key and put in in the dictionnary
                        i=0
                        while 1:
                            new_name = name + 'GM' +  i*'_' + "%s"%n
                            if new_name not in OPdict : break
                        added_something = True
                        OPdict[new_name] = op_im
            # break the while loop
            if not added_something: break

        # Now I have all operators, I make the transcription of the global moves
        self.Global_Moves_Mapping_List = []
        for n,(proba,GM) in enumerate(self.Global_Moves):
            F = Extend_Function_on_Fundamentals(Map_GM_to_Fund_Ops(GM))
            m = {}
            for name,op in OPdict.items() :
                op_im = F(op)
                n1,n2 = myfind(op),myfind(op_im)
                m[n1] = n2
            name = "%s"%n
            self.Global_Moves_Mapping_List.append((proba,m,name))
        #MPI.report ("Global_Moves_Mapping_List", self.Global_Moves_Mapping_List)

        # Now add the operator for F calculation if needed
        if self.Use_F :
            Hloc_WithoutQuadratic = self.H_Local.RemoveQuadraticTerms()
            for n,op in OPdict.items() :
                if op.is_Fundamental():
                    op2 = Commutator(Hloc_WithoutQuadratic,op)
                    if not mysearch(op2) : OPdict["%s_Comm_Hloc"%n] = op2

        # All operators have real coefficients. Check this and remove the 0j term
        # since the C++ expects operators with real numbers 
        for n,op in OPdict.items(): op.make_coef_real_and_check()

        # Transcription of operators for C++
        Oplist2 = Operators.Transcribe_OpList_for_C(OPdict)
        SymList = [sym for (n,sym) in SymChar.items() if n in QuantumNumberSymmetries]
        self.H_diag = C_Module.Hloc(nf,nb,Oplist2,QuantumNumberOperators,SymList,self.Quantum_Numbers_Selection,0) 

        # Create the C_Cag_Ops array which describes the grouping of (C,Cdagger) operator
        # for the MonteCarlo moves : (a, alpha) block structure [ [ (C_name, Cdag_name)]]
        self.C_Cdag_Ops = [ [ (myfind(C(a,alpha)), myfind(Cdag(a,alpha))) for alpha in al ] for a,al in self.GFStruct]

        # Define G0_inv and correct it to have G0 to have perfect 1/omega behavior
        self.G0_inv = inverse(self.G0)
        Delta = self.G0_inv.Delta()
        for n,g in self.G0_inv:
          assert(g.N1==g.N2)
          identity=numpy.identity(g.N1)
          self.G0[n] <<= GF_Initializers.A_Omega_Plus_B(identity, g._tail[0])
          self.G0[n] -= Delta[n]
          #self.G0[n] <<= iOmega_n + g._tail[0] - Delta[n]
        self.G0_inv <<= self.G0
        self.G0.invert()

        # Construct the function in tau
        f = lambda g,L : GFBloc_ImTime(Indices= g.Indices, Beta = g.Beta, NTimeSlices=L )
        self.Delta_tau = GF(Name_Block_Generator = [ (n,f(g,self.N_Time_Slices_Delta) )   for n,g in self.G], Copy=False, Name='D')
        self.G_tau = GF(Name_Block_Generator = [ (n,f(g,self.N_Time_Slices_Gtau) )    for n,g in self.G], Copy=False, Name='G')
        self.F_tau = GF(Name_Block_Generator = self.G_tau, Copy=True, Name='F')
        
        for (i,gt) in self.Delta_tau : gt.setFromInverseFourierOf(Delta[i])
        MPI.report("Inv Fourier done")
        if (self.Legendre_Accumulation):
            self.G_Legendre = GF(Name_Block_Generator = [ (n,GFBloc_ImLegendre(Indices=g.Indices, Beta=g.Beta, NLegendreCoeffs=self.N_Legendre_Coeffs) )   for n,g in self.G], Copy=False, Name='Gl')
        else:
            self.G_Legendre = GF(Name_Block_Generator = [ (n,GFBloc_ImLegendre(Indices=[1], Beta=g.Beta, NLegendreCoeffs=1) ) for n,g in self.G], Copy=False, Name='Gl') # G_Legendre must not be empty but is not needed in this case. So I make it as small as possible.
        
        # Starting the C++ code
        self.Sigma_Old <<= self.Sigma
        C_Module.MC_solve(self.__dict__ ) # C++ solver
        
        # Compute G on Matsubara axis possibly fitting the tail
        if self.Legendre_Accumulation:
          for s,g in self.G:
            identity=numpy.zeros([g.N1,g.N2],numpy.float)
            for i,m in enumerate (g._IndicesL):
              for j,n in enumerate (g._IndicesR):
                if m==n: identity[i,j]=1
            self.G_Legendre[s].enforce_discontinuity(identity) # set the known tail
            g <<= LegendreToMatsubara(self.G_Legendre[s])
        else:
          if (self.Time_Accumulation):
            for name, g in self.G_tau:
              identity=numpy.zeros([g.N1,g.N2],numpy.float)
              for i,m in enumerate (g._IndicesL):
                for j,n in enumerate (g._IndicesR):
                  if m==n: identity[i,j]=1
              g._tail.zero()
              g._tail[1] = identity
              self.G[name].setFromFourierOf(g)

          # This is very sick... but what can we do???
          self.Sigma <<= self.G0_inv - inverse(self.G)
          self.fitTails()
          self.G <<= inverse(self.G0_inv - self.Sigma)

        # Now find the self-energy
        self.Sigma <<= self.G0_inv - inverse(self.G)

        MPI.report("Solver %(Name)s has ended."%self.__dict__)

        # for operator averages: if twice defined operator, rename output:
        for op1,op2 in self.twice_defined_Ops.items():
            self.Measured_Operators_Results[op1] = self.Measured_Operators_Results[op2]
        for op1,op2 in self.twice_defined_Ops.items():
            if op2 in self.Measured_Operators_Results.keys(): del self.Measured_Operators_Results[op2]

        if self.Use_F :
            for (n,f) in self.F: f.setFromFourierOf(self.F_tau[n])
            self.G2 = self.G0 + self.G0 * self.F
            self.Sigma2 = self.F * inverse(self.G2)
Пример #44
0
    def calc_DensityCorrection(self, Filename="densmat.dat"):
        """ Calculates the density correction in order to feed it back to the DFT calculations."""

        assert type(Filename) == StringType, "Filename has to be a string!"

        ntoi = self.names_to_ind[self.SO]
        bln = self.blocnames[self.SO]

        # Set up deltaN:
        deltaN = {}
        for ib in bln:
            deltaN[ib] = [
                numpy.zeros([self.N_Orbitals[ik][ntoi[ib]], self.N_Orbitals[ik][ntoi[ib]]], numpy.complex_)
                for ik in range(self.Nk)
            ]

        ikarray = numpy.array(range(self.Nk))

        dens = {}
        for ib in bln:
            dens[ib] = 0.0

        for ik in MPI.slice_array(ikarray):

            S = self.latticeGF_Matsubara(ik=ik, mu=self.Chemical_Potential)
            for sig, g in S:
                deltaN[sig][ik] = S[sig].density()
                dens[sig] += self.BZ_weights[ik] * S[sig].total_density()

        # put MPI Barrier:
        for sig in deltaN:
            for ik in range(self.Nk):
                deltaN[sig][ik] = MPI.all_reduce(MPI.world, deltaN[sig][ik], lambda x, y: x + y)
            dens[sig] = MPI.all_reduce(MPI.world, dens[sig], lambda x, y: x + y)
        MPI.barrier()

        # now save to file:
        if MPI.IS_MASTER_NODE():
            if self.SP == 0:
                f = open(Filename, "w")
            else:
                f = open(Filename + "up", "w")
                f1 = open(Filename + "dn", "w")
            # write chemical potential (in Rydberg):
            f.write("%.14f\n" % (self.Chemical_Potential / self.EnergyUnit))
            if self.SP != 0:
                f1.write("%.14f\n" % (self.Chemical_Potential / self.EnergyUnit))
            # write beta in ryderg-1
            f.write("%.14f\n" % (S.Beta * self.EnergyUnit))
            if self.SP != 0:
                f1.write("%.14f\n" % (S.Beta * self.EnergyUnit))
            if self.SP == 0:
                for ik in range(self.Nk):
                    f.write("%s\n" % self.N_Orbitals[ik][0])
                    for inu in range(self.N_Orbitals[ik][0]):
                        for imu in range(self.N_Orbitals[ik][0]):
                            valre = (deltaN["up"][ik][inu, imu].real + deltaN["down"][ik][inu, imu].real) / 2.0
                            valim = (deltaN["up"][ik][inu, imu].imag + deltaN["down"][ik][inu, imu].imag) / 2.0
                            f.write("%.14f  %.14f " % (valre, valim))
                        f.write("\n")
                    f.write("\n")
                f.close()
            elif (self.SP == 1) and (self.SO == 0):
                for ik in range(self.Nk):
                    f.write("%s\n" % self.N_Orbitals[ik][0])
                    for inu in range(self.N_Orbitals[ik][0]):
                        for imu in range(self.N_Orbitals[ik][0]):
                            f.write(
                                "%.14f  %.14f " % (deltaN["up"][ik][inu, imu].real, deltaN["up"][ik][inu, imu].imag)
                            )
                        f.write("\n")
                    f.write("\n")
                f.close()
                for ik in range(self.Nk):
                    f1.write("%s\n" % self.N_Orbitals[ik][1])
                    for inu in range(self.N_Orbitals[ik][1]):
                        for imu in range(self.N_Orbitals[ik][1]):
                            f1.write(
                                "%.14f  %.14f " % (deltaN["down"][ik][inu, imu].real, deltaN["down"][ik][inu, imu].imag)
                            )
                        f1.write("\n")
                    f1.write("\n")
                f1.close()
            else:
                for ik in range(self.Nk):
                    f.write("%s\n" % self.N_Orbitals[ik][0])
                    for inu in range(self.N_Orbitals[ik][0]):
                        for imu in range(self.N_Orbitals[ik][0]):
                            f.write(
                                "%.14f  %.14f " % (deltaN["ud"][ik][inu, imu].real, deltaN["ud"][ik][inu, imu].imag)
                            )
                        f.write("\n")
                    f.write("\n")
                f.close()
                for ik in range(self.Nk):
                    f1.write("%s\n" % self.N_Orbitals[ik][0])
                    for inu in range(self.N_Orbitals[ik][0]):
                        for imu in range(self.N_Orbitals[ik][0]):
                            f1.write(
                                "%.14f  %.14f " % (deltaN["ud"][ik][inu, imu].real, deltaN["ud"][ik][inu, imu].imag)
                            )
                        f1.write("\n")
                    f1.write("\n")
                f1.close()

        return deltaN, dens
Пример #45
0
    def __init__(self, Beta, Norb, U_interact=None, J_Hund=None, GFStruct=False, map=False, use_spinflip=False,
                 useMatrix = True, l=2, T=None, dimreps=None, irep=None, deg_orbs = [], Sl_Int = None):
    
        self.offset = 0
        self.use_spinflip = use_spinflip
        self.Norb = Norb
        
        if (useMatrix):
                                      
            if not (Sl_Int is None):
                Umat = Umatrix(l=l)
                assert len(Sl_Int)==(l+1),"Sl_Int has the wrong length"
                if (type(Sl_Int)==ListType):
                    Rcl = numpy.array(Sl_Int)
                else:
                    Rcl = Sl_Int
                Umat(T=T,Rcl=Rcl)
            else:
                if ((U_interact==None)and(J_Hund==None)):
                    MPI.report("Give U,J or Slater integrals!!!")
                    assert 0
                Umat = Umatrix(U_interact=U_interact, J_Hund=J_Hund, l=l)
                Umat(T=T)
            
            Umat.ReduceMatrix()
            if (Umat.N==Umat.Nmat):
                # Transformation T is of size 2l+1
                self.U = Umat.U
                self.Up = Umat.Up
            else:
                # Transformation is of size 2(2l+1)
                self.U = Umat.U
             # now we have the reduced matrices U and Up, we need it for tail fitting anyways

            if (use_spinflip):
                #Take the 4index Umatrix
                # check for imaginary matrix elements:
                if (abs(Umat.Ufull.imag)>0.0001).any():
                    MPI.report("WARNING: complex interaction matrix!! Ignoring imaginary part for the moment!")
                    MPI.report("If you want to change this, look into Wien2k/Solver_MultiBand.py")
                self.U4ind = Umat.Ufull.real
    
            # this will be changed for arbitrary irep:
            # use only one subgroup of orbitals?
            if not (irep is None):
                #print irep, dimreps
                assert not (dimreps is None), "Dimensions of the representatives are missing!"
                assert Norb==dimreps[irep-1],"Dimensions of dimrep and Norb do not fit!"
                for ii in range(irep-1):
                    self.offset += dimreps[ii]

               

        else:
            if ((U_interact==None)and(J_Hund==None)):
                MPI.report("For Kanamori representation, give U and J!!")
                assert 0
            self.U  = numpy.zeros([Norb,Norb],numpy.float_)
            self.Up = numpy.zeros([Norb,Norb],numpy.float_)
	    for i in range(Norb):
		for j in range(Norb):
		    if (i==j):
		        self.Up[i,i] = U_interact + 2.0*J_Hund
		    else:
			self.Up[i,j] = U_interact
			self.U[i,j]  = U_interact - J_Hund


        if (GFStruct):
            assert map, "give also the mapping!"
            self.map = map
        else:
            # standard GFStruct and map
            GFStruct = [ ('%s'%(ud),[n for n in range(Norb)]) for ud in ['up','down'] ]
            self.map = {'up' : ['up' for v in range(self.Norb)], 'down' : ['down' for v in range(self.Norb)]}

        #print GFStruct,self.map
        
        if (use_spinflip==False):
            Hamiltonian = self.__setHamiltonian_density()
        else:
            if (useMatrix):
                Hamiltonian = self.__setfullHamiltonian_Slater()
            else:
                Hamiltonian = self.__setfullHamiltonian_Kanamori(J_Hund = J_Hund)

        Quantum_Numbers = self.__setQuantumNumbers(GFStruct)
    
        # Determine if there are only blocs of size 1:
        self.blocssizeone = True
        for ib in GFStruct:
            if (len(ib[1])>1): self.blocssizeone = False

       
        # now initialize the solver with the stuff given above:
        Solver.__init__(self,
                        Beta = Beta,
                        GFstruct = GFStruct,
                        H_Local = Hamiltonian,
                        Quantum_Numbers = Quantum_Numbers )

        #self.SetGlobalMoves(deg_orbs)

        self.N_Cycles  = 10000
        self.Nmax_Matrix = 100
        self.N_Time_Slices_Delta= 10000
        #if ((len(GFStruct)==2*Norb) and (use_spinflip==False)): 
        if ((self.blocssizeone) and (use_spinflip==False)):
            self.Use_Segment_Picture = True
        else:
            self.Use_Segment_Picture = False
Пример #46
0
    def run(self):
        """
        """
        MPI.barrier()
        if MPI.size == 1:  # single machine. Avoid the fork
            while not (self.Finished()):
                n = self.Next()
                if n != None:
                    self.Treate(self.The_Function(n), 0)
            return

        # Code for multiprocessor machines
        RequestList, pid = [], 0  # the pid of the child on the master
        node_running, node_stopped = MPI.size * [False], MPI.size * [False]

        if MPI.rank == 0:
            while not (self.Finished()) or pid or [n for n in node_running if n] != []:
                # Treat the request which have self.Finished
                def keep_request(r):
                    # if not(MPI.test(r)) :  return True
                    # if r.message !=None : self.Treate(*r.message)
                    # node_running[r.status.source] = False
                    T = r.test()
                    if T is None:
                        return True
                    value = T[0]
                    if value != None:
                        self.Treate(*value)
                    node_running[T[1].source] = False
                    return False

                RequestList = filter(keep_request, RequestList)
                # send new calculation to the nodes or "stop" them
                for node in [n for n in range(1, MPI.size) if not (node_running[n] or node_stopped[n])]:
                    # open('tmp','a').write("master : comm to node %d %s\n"%(node,self.Finished()))
                    MPI.send(self.Finished(), node)
                    if not (self.Finished()):
                        MPI.send(self.Next(), node)  # send the data for the computation
                        node_running[node] = True
                        RequestList.append(MPI.irecv(node))  # Post the receive
                    else:
                        node_stopped[node] = True

                # Look if the child process on the master has self.Finished.
                if not (pid) or os.waitpid(pid, os.WNOHANG):
                    if pid:
                        RR = cPickle.load(open("res_master", "r"))
                        if RR != None:
                            self.Treate(*RR)
                    if not (self.Finished()):
                        pid = os.fork()
                        currently_calculated_by_master = self.Next()
                        if pid == 0:  # we are on the child
                            if currently_calculated_by_master:
                                res = self.The_Function(currently_calculated_by_master)
                            else:
                                res = None
                            cPickle.dump((res, MPI.rank), open("res_master", "w"))
                            os._exit(0)  # Cf python doc. Used for child only.
                    else:
                        pid = 0
                if pid:
                    time.sleep(self.SleepTime)  # so that most of the time is for the actual calculation on the master

        else:  # not master
            while not (MPI.recv(0)):  # master will first send a Finished flag
                omega = MPI.recv(0)
                if omega == None:
                    res = None
                else:
                    res = self.The_Function(omega)
                MPI.send((res, MPI.rank), 0)

        MPI.barrier()
Пример #47
0
    def convert_bands_input(self, BandsSubGrp='SumK_LDA_Bands'):
        """
        Converts the input for momentum resolved spectral functions, and stores it in BandsSubGrp in the
        HDF5.
        """

        if not (MPI.IS_MASTER_NODE()): return

        self.BandsSubGrp = BandsSubGrp
        MPI.report("Reading bands input from %s..." % self.Band_file)

        R = Read_Fortran_File(self.Band_file)
        try:
            Nk = int(R.next())

            # read the list of N_Orbitals for all k points
            N_Orbitals = [[0 for isp in range(self.Nspinblocs)]
                          for ik in xrange(Nk)]
            for isp in range(self.Nspinblocs):
                for ik in xrange(Nk):
                    N_Orbitals[ik][isp] = int(R.next())

            # Initialise the projectors:
            Proj_Mat = [[[
                numpy.zeros([self.corr_shells[icrsh][3], N_Orbitals[ik][isp]],
                            numpy.complex_)
                for icrsh in range(self.N_corr_shells)
            ] for isp in range(self.Nspinblocs)] for ik in range(Nk)]

            # Read the projectors from the file:
            for ik in xrange(Nk):
                for icrsh in range(self.N_corr_shells):
                    no = self.corr_shells[icrsh][3]
                    # first Real part for BOTH spins, due to conventions in dmftproj:
                    for isp in range(self.Nspinblocs):
                        for i in xrange(no):
                            for j in xrange(N_Orbitals[ik][isp]):
                                Proj_Mat[ik][isp][icrsh][i, j] = R.next()
                    # now Imag part:
                    for isp in range(self.Nspinblocs):
                        for i in xrange(no):
                            for j in xrange(N_Orbitals[ik][isp]):
                                Proj_Mat[ik][isp][icrsh][i, j] += 1j * R.next()

            Hopping = [[
                numpy.zeros([N_Orbitals[ik][isp], N_Orbitals[ik][isp]],
                            numpy.complex_) for isp in range(self.Nspinblocs)
            ] for ik in xrange(Nk)]

            # Grab the H
            # we use now the convention of a DIAGONAL Hamiltonian!!!!
            for isp in range(self.Nspinblocs):
                for ik in xrange(Nk):
                    no = N_Orbitals[ik][isp]
                    for i in xrange(no):
                        Hopping[ik][isp][i, i] = R.next() * self.EnergyUnit

            # now read the partial projectors:
            N_parproj = [int(R.next()) for i in range(self.N_shells)]
            # Initialise P, here a double list of matrices:
            Proj_Mat_pc = [[[[
                numpy.zeros([self.shells[ish][3], N_Orbitals[ik][isp]],
                            numpy.complex_) for ir in range(N_parproj[ish])
            ] for ish in range(self.N_shells)]
                            for isp in range(self.Nspinblocs)]
                           for ik in range(Nk)]

            for ish in range(self.N_shells):

                for ik in xrange(Nk):
                    for ir in range(N_parproj[ish]):
                        for isp in range(self.Nspinblocs):

                            for i in xrange(
                                    self.shells[ish][3]):  # read real part:
                                for j in xrange(N_Orbitals[ik][isp]):
                                    Proj_Mat_pc[ik][isp][ish][ir][
                                        i, j] = R.next()

                            for i in xrange(self.shells[ish]
                                            [3]):  # read imaginary part:
                                for j in xrange(N_Orbitals[ik][isp]):
                                    Proj_Mat_pc[ik][isp][ish][ir][
                                        i, j] += 1j * R.next()

        except StopIteration:  # a more explicit error if the file is corrupted.
            raise "SumK_LDA : reading file HMLT_file failed!"

        R.close()
        # reading done!

        #-----------------------------------------
        # Store the input into HDF5:
        ar = HDF_Archive(self.HDFfile, 'a')
        if not (self.BandsSubGrp in ar): ar.create_group(self.BandsSubGrp)
        # The subgroup containing the data. If it does not exist, it is created.
        # If it exists, the data is overwritten!!!
        thingstowrite = [
            'Nk', 'N_Orbitals', 'Proj_Mat', 'Hopping', 'N_parproj',
            'Proj_Mat_pc'
        ]
        for it in thingstowrite:
            exec "ar['%s']['%s'] = %s" % (self.BandsSubGrp, it, it)

        #ar[self.BandsSubGrp]['Nk'] = Nk
        #ar[self.BandsSubGrp]['N_Orbitals'] = N_Orbitals
        #ar[self.BandsSubGrp]['Proj_Mat'] = Proj_Mat
        #self.Proj_Mat = Proj_Mat
        #self.N_Orbitals = N_Orbitals
        #self.Nk = Nk
        #self.Hopping = Hopping
        del ar
Пример #48
0
    def check_inputDOS(self, ommin, ommax, N_om, Beta=10, broadening=0.01):

        delta_om = (ommax - ommin) / (N_om - 1)
        Mesh = numpy.zeros([N_om], numpy.float_)

        DOS = {}
        for bn in self.blocnames[self.SO]:
            DOS[bn] = numpy.zeros([N_om], numpy.float_)

        DOSproj = [{} for icrsh in range(self.N_inequiv_corr_shells)]
        DOSproj_orb = [{} for icrsh in range(self.N_inequiv_corr_shells)]
        for icrsh in range(self.N_inequiv_corr_shells):
            for bn in self.blocnames[self.corr_shells[self.invshellmap[icrsh]]
                                     [4]]:
                dl = self.corr_shells[self.invshellmap[icrsh]][3]
                DOSproj[icrsh][bn] = numpy.zeros([N_om], numpy.float_)
                DOSproj_orb[icrsh][bn] = numpy.zeros([dl, dl, N_om],
                                                     numpy.float_)

        for i in range(N_om):
            Mesh[i] = ommin + delta_om * i

        # init:
        Gloc = []
        for icrsh in range(self.N_corr_shells):
            b_list = [a for a, al in self.GFStruct_corr[icrsh]]
            glist = lambda: [
                GFBloc_ReFreq(Indices=al, Beta=Beta, MeshArray=Mesh)
                for a, al in self.GFStruct_corr[icrsh]
            ]
            Gloc.append(GF(NameList=b_list, BlockList=glist(), Copy=False))
        for icrsh in xrange(self.N_corr_shells):
            Gloc[icrsh].zero()  # initialize to zero

        for ik in xrange(self.Nk):

            Gupf = self.latticeGF_realfreq(ik=ik,
                                           mu=self.Chemical_Potential,
                                           broadening=broadening,
                                           Beta=Beta,
                                           Mesh=Mesh,
                                           withSigma=False)
            Gupf *= self.BZ_weights[ik]

            # non-projected DOS
            for iom in range(N_om):
                for sig, gf in Gupf:
                    asd = gf._data.array[:, :,
                                         iom].imag.trace() / (-3.1415926535)
                    DOS[sig][iom] += asd

            for icrsh in xrange(self.N_corr_shells):
                tmp = Gloc[icrsh].copy()
                for sig, gf in tmp:
                    tmp[sig] <<= self.downfold(ik, icrsh, sig, Gupf[sig],
                                               gf)  # downfolding G
                Gloc[icrsh] += tmp

        if (self.symm_op != 0): Gloc = self.Symm_corr.symmetrise(Gloc)

        if (self.use_rotations):
            for icrsh in xrange(self.N_corr_shells):
                for sig, gf in Gloc[icrsh]:
                    Gloc[icrsh][sig] <<= self.rotloc(icrsh,
                                                     gf,
                                                     direction='toLocal')

        # Gloc can now also be used to look at orbitally resolved quantities
        for ish in range(self.N_inequiv_corr_shells):
            for sig, gf in Gloc[self.invshellmap[ish]]:  # loop over spins
                for iom in range(N_om):
                    DOSproj[ish][sig][
                        iom] += gf._data.array[:, :, iom].imag.trace() / (
                            -3.1415926535)

                DOSproj_orb[ish][
                    sig][:, :, :] += gf._data.array[:, :, :].imag / (
                        -3.1415926535)

        # output:
        if (MPI.IS_MASTER_NODE()):
            for bn in self.blocnames[self.SO]:
                f = open('DOS%s.dat' % bn, 'w')
                for i in range(N_om):
                    f.write("%s    %s\n" % (Mesh[i], DOS[bn][i]))
                f.close()

                for ish in range(self.N_inequiv_corr_shells):
                    f = open('DOS%s_proj%s.dat' % (bn, ish), 'w')
                    for i in range(N_om):
                        f.write("%s    %s\n" % (Mesh[i], DOSproj[ish][bn][i]))
                    f.close()

                    for i in range(self.corr_shells[self.invshellmap[ish]][3]):
                        for j in range(
                                i, self.corr_shells[self.invshellmap[ish]][3]):
                            Fname = 'DOS' + bn + '_proj' + str(
                                ish) + '_' + str(i) + '_' + str(j) + '.dat'
                            f = open(Fname, 'w')
                            for iom in range(N_om):
                                f.write("%s    %s\n" %
                                        (Mesh[iom], DOSproj_orb[ish][bn][i, j,
                                                                         iom]))
                            f.close()
Пример #49
0
    def run(self):
        """
        """
        MPI.barrier()
        if MPI.size==1 : # single machine. Avoid the fork
            while not(self.Finished()):
                n = self.Next()
                if n!=None : 
                    self.Treate(self.The_Function(n),0)
            return

        # Code for multiprocessor machines
        RequestList,pid = [],0   # the pid of the child on the master
        node_running,node_stopped= MPI.size*[False],MPI.size*[False]

        if MPI.rank==0 :
          while not(self.Finished()) or pid or [n for n in node_running if n] != [] :
              # Treat the request which have self.Finished
              def keep_request(r) :
                  #if not(MPI.test(r)) :  return True
                  #if r.message !=None : self.Treate(*r.message)
                  #node_running[r.status.source] = False
                  T = r.test()
                  if T is None :  return True
                  value = T[0]
                  if value !=None : self.Treate(*value)
                  node_running[T[1].source] = False
                  return False
              RequestList = filter(keep_request,RequestList)
              # send new calculation to the nodes or "stop" them
              for node in [ n for n in range(1,MPI.size) if not(node_running[n] or node_stopped[n]) ] :
                  #open('tmp','a').write("master : comm to node %d %s\n"%(node,self.Finished()))
                  MPI.send(self.Finished(),node)
                  if not(self.Finished()) :
                      MPI.send(self.Next(),node) # send the data for the computation
                      node_running[node] = True
                      RequestList.append(MPI.irecv(node)) #Post the receive
                  else :
                      node_stopped[node] = True

              # Look if the child process on the master has self.Finished.
              if not(pid) or os.waitpid(pid,os.WNOHANG) :
                  if pid :
                      RR = cPickle.load(open("res_master",'r'))
                      if RR != None : self.Treate(*RR)
                  if not(self.Finished()) :
                      pid=os.fork();
                      currently_calculated_by_master = self.Next()
                      if pid==0 :  # we are on the child
                          if currently_calculated_by_master :
                              res = self.The_Function(currently_calculated_by_master)
                          else:
                              res = None
                          cPickle.dump((res,MPI.rank),open('res_master','w'))
                          os._exit(0) # Cf python doc. Used for child only.
                  else : pid=0
              if (pid): time.sleep(self.SleepTime) # so that most of the time is for the actual calculation on the master

        else : # not master
            while not(MPI.recv(0)) :  # master will first send a Finished flag
                omega = MPI.recv(0)
                if omega ==None :
                    res = None
                else :
                    res = self.The_Function(omega)
                MPI.send((res,MPI.rank),0)
                
        MPI.barrier()
Пример #50
0
    def read_Symmetry_input(self, orbits, symmfile, SymmSubGrp, SO, SP):
        """
        Reads input for the symmetrisations from symmfile, which is case.sympar or case.symqmc.
        """

        if not (MPI.IS_MASTER_NODE()): return

        MPI.report("Reading symmetry input from %s..." % symmfile)

        N_orbits = len(orbits)
        R = Read_Fortran_File(symmfile)

        try:
            Ns = int(R.next())  # Number of symmetry operations
            Natoms = int(R.next())  # number of atoms involved
            perm = [[int(R.next()) for i in xrange(Natoms)]
                    for j in xrange(Ns)]  # list of permutations of the atoms
            if SP:
                timeinv = [int(R.next()) for j in xrange(Ns)
                           ]  # timeinversion for SO xoupling
            else:
                timeinv = [0 for j in xrange(Ns)]

            # Now read matrices:
            mat = []
            for iNs in xrange(Ns):

                mat.append([
                    numpy.zeros([orbits[orb][3], orbits[orb][3]],
                                numpy.complex_) for orb in xrange(N_orbits)
                ])
                for orb in range(N_orbits):
                    for i in xrange(orbits[orb][3]):
                        for j in xrange(orbits[orb][3]):
                            mat[iNs][orb][i, j] = R.next()  # real part
                    for i in xrange(orbits[orb][3]):
                        for j in xrange(orbits[orb][3]):
                            mat[iNs][orb][i,
                                          j] += 1j * R.next()  # imaginary part

            # determine the inequivalent shells:
            #SHOULD BE FINALLY REMOVED, PUT IT FOR ALL ORBITALS!!!!!
            #self.inequiv_shells(orbits)
            mat_tinv = [
                numpy.identity(orbits[orb][3], numpy.complex_)
                for orb in range(N_orbits)
            ]

            if ((SO == 0) and (SP == 0)):
                # here we need an additional time inversion operation, so read it:
                for orb in range(N_orbits):
                    for i in xrange(orbits[orb][3]):
                        for j in xrange(orbits[orb][3]):
                            mat_tinv[orb][i, j] = R.next()  # real part
                    for i in xrange(orbits[orb][3]):
                        for j in xrange(orbits[orb][3]):
                            mat_tinv[orb][i,
                                          j] += 1j * R.next()  # imaginary part

        except StopIteration:  # a more explicit error if the file is corrupted.
            raise "Symmetry : reading file failed!"

        R.close()

        # Save it to the HDF:
        ar = HDF_Archive(self.HDFfile, 'a')
        if not (SymmSubGrp in ar): ar.create_group(SymmSubGrp)
        thingstowrite = [
            'Ns', 'Natoms', 'perm', 'orbits', 'SO', 'SP', 'timeinv', 'mat',
            'mat_tinv'
        ]
        for it in thingstowrite:
            exec "ar['%s']['%s'] = %s" % (SymmSubGrp, it, it)
        del ar