def __init__(self, S, build_master_only =True) : """ Construction : with named arguments only. Possible constructors from source S : - Parameters(d) : d is a dict to be copied (no deepcopy, just updated). - Parameters(f) : f is a string containing the path of a file The type of the file is determined from the extension: - .py, .txt : the file is executed in the Parameters - .xml : to be written mpi : if build_master_only is true, the contruction is only made on the master, the result is then bcasted to all the nodes. Otherwise, it is done on all nodes (not recommended to read files). """ if mpi.is_master_node() or not build_master_only : if type(S) == type(''): # detect the type of the file try : extension = S.split('.')[-1].lower() except IndexError: raise ValueError, "I am lost : I can not determine the extension of the file !" if extension in ['py','txt'] : execfile(S,{},self) else : raise ValueError, "Extension of the file not recognized" else : # S is therefore a dict try : self.update(S) except : print "Error in Parameter constructor. Is the source an iterable ?" raise # end of master only if build_master_only : mpi.bcast(self) # bcast it on the nodes
def __should_continue(self, n_iter_max) : """ stop test""" should_continue = True if mpi.is_master_node(): if (self.Iteration_Number > n_iter_max): should_continue = False should_continue = mpi.bcast(should_continue) return should_continue
def save(self): """Saves some quantities into an HDF5 arxiv""" if not (mpi.is_master_node()): return # do nothing on nodes ar=HDFArchive(self.hdf_file,'a') ar[self.lda_data]['chemical_potential'] = self.chemical_potential ar[self.lda_data]['dc_energ'] = self.dc_energ ar[self.lda_data]['dc_imp'] = self.dc_imp del ar
def __repack(self): """Calls the h5repack routine, in order to reduce the file size of the hdf5 archive. Should only be used BEFORE the first invokation of HDFArchive in the program, otherwise the hdf5 linking is broken!!!""" import subprocess if not (mpi.is_master_node()): return mpi.report("Repacking the file %s"%self.hdf_file) retcode = subprocess.call(["h5repack","-i%s"%self.hdf_file, "-otemphgfrt.h5"]) if (retcode!=0): mpi.report("h5repack failed!") else: subprocess.call(["mv","-f","temphgfrt.h5","%s"%self.hdf_file])
def read_input_from_hdf(self, subgrp, things_to_read, optional_things=[]): """ Reads data from the HDF file """ retval = True # init variables on all nodes: for it in things_to_read: exec "self.%s = 0"%it for it in optional_things: exec "self.%s = 0"%it if (mpi.is_master_node()): ar=HDFArchive(self.hdf_file,'a') if (subgrp in ar): # first read the necessary things: for it in things_to_read: if (it in ar[subgrp]): exec "self.%s = ar['%s']['%s']"%(it,subgrp,it) else: mpi.report("Loading %s failed!"%it) retval = False if ((retval) and (len(optional_things)>0)): # if necessary things worked, now read optional things: retval = {} for it in optional_things: if (it in ar[subgrp]): exec "self.%s = ar['%s']['%s']"%(it,subgrp,it) retval['%s'%it] = True else: retval['%s'%it] = False else: mpi.report("Loading failed: No %s subgroup in HDF5!"%subgrp) retval = False del ar # now do the broadcasting: for it in things_to_read: exec "self.%s = mpi.bcast(self.%s)"%(it,it) for it in optional_things: exec "self.%s = mpi.bcast(self.%s)"%(it,it) retval = mpi.bcast(retval) return retval
def __init__(self, hdf_file, subgroup = None): """Initialises the class. Reads the permutations and rotation matrizes from the file, and constructs the mapping for the given orbitals. For each orbit a matrix is read!!! SO: Flag for SO coupled calculations. SP: Spin polarisation yes/no """ assert type(hdf_file)==StringType,"hdf_file must be a filename"; self.hdf_file = hdf_file thingstoread = ['n_s','n_atoms','perm','orbits','SO','SP','time_inv','mat','mat_tinv'] for it in thingstoread: exec "self.%s = 0"%it if (mpi.is_master_node()): #Read the stuff on master: ar = HDFArchive(hdf_file,'a') if (subgroup is None): ar2 = ar else: ar2 = ar[subgroup] for it in thingstoread: exec "self.%s = ar2['%s']"%(it,it) del ar2 del ar #broadcasting for it in thingstoread: exec "self.%s = mpi.bcast(self.%s)"%(it,it) # now define the mapping of orbitals: # self.map[iorb]=jorb gives the permutation of the orbitals as given in the list, when the # permutation of the atoms is done: self.n_orbits = len(self.orbits) self.map = [ [0 for iorb in range(self.n_orbits)] for in_s in range(self.n_s) ] for in_s in range(self.n_s): for iorb in range(self.n_orbits): srch = copy.deepcopy(self.orbits[iorb]) srch[0] = self.perm[in_s][self.orbits[iorb][0]-1] self.map[in_s][iorb] = self.orbits.index(srch)
def __init__(self, hdf_file, mu = 0.0, h_field = 0.0, use_lda_blocks = False, lda_data = 'SumK_LDA', symm_corr_data = 'SymmCorr', par_proj_data = 'SumK_LDA_ParProj', symm_par_data = 'SymmPar', bands_data = 'SumK_LDA_Bands'): """ Initialises the class from data previously stored into an HDF5 """ if not (type(hdf_file)==StringType): mpi.report("Give a string for the HDF5 filename to read the input!") else: self.hdf_file = hdf_file self.lda_data = lda_data self.par_proj_data = par_proj_data self.bands_data = bands_data self.symm_par_data = symm_par_data self.symm_corr_data = symm_corr_data self.block_names = [ ['up','down'], ['ud'] ] self.n_spin_blocks_gf = [2,1] self.Gupf = None self.h_field = h_field # read input from HDF: things_to_read = ['energy_unit','n_k','k_dep_projection','SP','SO','charge_below','density_required', 'symm_op','n_shells','shells','n_corr_shells','corr_shells','use_rotations','rot_mat', 'rot_mat_time_inv','n_reps','dim_reps','T','n_orbitals','proj_mat','bz_weights','hopping'] optional_things = ['gf_struct_solver','map_inv','map','chemical_potential','dc_imp','dc_energ','deg_shells'] #ar=HDFArchive(self.hdf_file,'a') #del ar self.retval = self.read_input_from_hdf(subgrp=self.lda_data,things_to_read=things_to_read,optional_things=optional_things) #ar=HDFArchive(self.hdf_file,'a') #del ar if (self.SO) and (abs(self.h_field)>0.000001): self.h_field=0.0 mpi.report("For SO, the external magnetic field is not implemented, setting it to 0!!") self.inequiv_shells(self.corr_shells) # determine the number of inequivalent correlated shells # field to convert block_names to indices self.names_to_ind = [{}, {}] for ibl in range(2): for inm in range(self.n_spin_blocks_gf[ibl]): self.names_to_ind[ibl][self.block_names[ibl][inm]] = inm * self.SP #(self.Nspinblocs-1) # GF structure used for the local things in the k sums self.gf_struct_corr = [ [ (al, range( self.corr_shells[i][3])) for al in self.block_names[self.corr_shells[i][4]] ] for i in xrange(self.n_corr_shells) ] if not (self.retval['gf_struct_solver']): # No gf_struct was stored in HDF, so first set a standard one: self.gf_struct_solver = [ [ (al, range( self.corr_shells[self.invshellmap[i]][3]) ) for al in self.block_names[self.corr_shells[self.invshellmap[i]][4]] ] for i in xrange(self.n_inequiv_corr_shells) ] self.map = [ {} for i in xrange(self.n_inequiv_corr_shells) ] self.map_inv = [ {} for i in xrange(self.n_inequiv_corr_shells) ] for i in xrange(self.n_inequiv_corr_shells): for al in self.block_names[self.corr_shells[self.invshellmap[i]][4]]: self.map[i][al] = [al for j in range( self.corr_shells[self.invshellmap[i]][3] ) ] self.map_inv[i][al] = al if not (self.retval['dc_imp']): # init the double counting: self.__init_dc() if not (self.retval['chemical_potential']): self.chemical_potential = mu if not (self.retval['deg_shells']): self.deg_shells = [ [] for i in range(self.n_inequiv_corr_shells)] if self.symm_op: #mpi.report("Do the init for symm:") self.Symm_corr = Symmetry(hdf_file,subgroup=self.symm_corr_data) # determine the smallest blocs, if wanted: if (use_lda_blocks): dm=self.analyse_BS() # now save things again to HDF5: if (mpi.is_master_node()): ar=HDFArchive(self.hdf_file,'a') ar[self.lda_data]['h_field'] = self.h_field del ar self.save()
def dos_partial(self,broadening=0.01): """calculates the orbitally-resolved DOS""" assert hasattr(self,"Sigma_imp"), "Set Sigma First!!" #thingstoread = ['Dens_Mat_below','N_parproj','Proj_Mat_pc','rotmat_all'] #retval = self.read_input_from_HDF(SubGrp=self.par_proj_data, thingstoread=thingstoread) retval = self.read_par_proj_input_from_hdf() if not retval: return retval if self.symm_op: self.Symm_par = Symmetry(self.hdf_file,subgroup=self.symm_par_data) mu = self.Chemical_Potential GFStruct_proj = [ [ (al, range(self.shells[i][3])) for al in self.blocnames[self.SO] ] for i in xrange(self.N_shells) ] Gproj = [BlockGf(name_block_generator = [ (a,GfReFreq(indices = al, mesh = self.Sigma_imp[0].mesh)) for a,al in GFStruct_proj[ish] ], make_copies = False ) for ish in xrange(self.N_shells)] for ish in range(self.N_shells): Gproj[ish].zero() Msh = [x for x in self.Sigma_imp[0].mesh] n_om = len(Msh) DOS = {} for bn in self.blocnames[self.SO]: DOS[bn] = numpy.zeros([n_om],numpy.float_) DOSproj = [ {} for ish in range(self.N_shells) ] DOSproj_orb = [ {} for ish in range(self.N_shells) ] for ish in range(self.N_shells): for bn in self.blocnames[self.SO]: dl = self.shells[ish][3] DOSproj[ish][bn] = numpy.zeros([n_om],numpy.float_) DOSproj_orb[ish][bn] = numpy.zeros([dl,dl,n_om],numpy.float_) ikarray=numpy.array(range(self.Nk)) for ik in mpi.slice_array(ikarray): S = self.lattice_gf_realfreq(ik=ik,mu=mu,broadening=broadening) S *= self.BZ_weights[ik] # non-projected DOS for iom in range(n_om): for sig,gf in S: DOS[sig][iom] += gf._data.array[:,:,iom].imag.trace()/(-3.1415926535) #projected DOS: for ish in xrange(self.N_shells): tmp = Gproj[ish].copy() for ir in xrange(self.N_parproj[ish]): for sig,gf in tmp: tmp[sig] <<= self.downfold_pc(ik,ir,ish,sig,S[sig],gf) Gproj[ish] += tmp # collect data from mpi: for sig in DOS: DOS[sig] = mpi.all_reduce(mpi.world,DOS[sig],lambda x,y : x+y) for ish in xrange(self.N_shells): Gproj[ish] <<= mpi.all_reduce(mpi.world,Gproj[ish],lambda x,y : x+y) mpi.barrier() if (self.symm_op!=0): Gproj = self.Symm_par.symmetrize(Gproj) # rotation to local coord. system: if (self.use_rotations): for ish in xrange(self.N_shells): for sig,gf in Gproj[ish]: Gproj[ish][sig] <<= self.rotloc_all(ish,gf,direction='toLocal') for ish in range(self.N_shells): for sig,gf in Gproj[ish]: for iom in range(n_om): DOSproj[ish][sig][iom] += gf._data.array[:,:,iom].imag.trace()/(-3.1415926535) DOSproj_orb[ish][sig][:,:,:] += gf._data.array[:,:,:].imag / (-3.1415926535) if (mpi.is_master_node()): # output to files for bn in self.blocnames[self.SO]: f=open('./DOScorr%s.dat'%bn, 'w') for i in range(n_om): f.write("%s %s\n"%(Msh[i],DOS[bn][i])) f.close() # partial for ish in range(self.N_shells): f=open('DOScorr%s_proj%s.dat'%(bn,ish),'w') for i in range(n_om): f.write("%s %s\n"%(Msh[i],DOSproj[ish][bn][i])) f.close() for i in range(self.shells[ish][3]): for j in range(i,self.shells[ish][3]): Fname = './DOScorr'+bn+'_proj'+str(ish)+'_'+str(i)+'_'+str(j)+'.dat' f=open(Fname,'w') for iom in range(n_om): f.write("%s %s\n"%(Msh[iom],DOSproj_orb[ish][bn][i,j,iom])) f.close()
def analyse_BS(self, threshold = 0.00001, include_shells = None, dm = None): """ Determines the Greens function block structure from the simple point integration""" if (dm==None): dm = self.simple_point_dens_mat() dens_mat = [dm[self.invshellmap[ish]] for ish in xrange(self.n_inequiv_corr_shells) ] if include_shells is None: include_shells=range(self.n_inequiv_corr_shells) for ish in include_shells: #self.gf_struct_solver.append([]) self.gf_struct_solver[ish] = [] a_list = [a for a,al in self.gf_struct_corr[self.invshellmap[ish]] ] for a in a_list: dm = dens_mat[ish][a] dmbool = (abs(dm) > threshold) # gives an index list of entries larger that threshold offdiag = [] for i in xrange(len(dmbool)): for j in xrange(i,len(dmbool)): if ((dmbool[i,j])&(i!=j)): offdiag.append([i,j]) NBlocs = len(dmbool) blocs = [ [i] for i in range(NBlocs) ] for i in range(len(offdiag)): if (offdiag[i][0]!=offdiag[i][1]): for j in range(len(blocs[offdiag[i][1]])): blocs[offdiag[i][0]].append(blocs[offdiag[i][1]][j]) del blocs[offdiag[i][1]] for j in range(i+1,len(offdiag)): if (offdiag[j][0]==offdiag[i][1]): offdiag[j][0]=offdiag[i][0] if (offdiag[j][1]==offdiag[i][1]): offdiag[j][1]=offdiag[i][0] if (offdiag[j][0]>offdiag[i][1]): offdiag[j][0] -= 1 if (offdiag[j][1]>offdiag[i][1]): offdiag[j][1] -= 1 offdiag[j].sort() NBlocs-=1 for i in range(NBlocs): blocs[i].sort() self.gf_struct_solver[ish].append( ('%s%s'%(a,i),blocs[i]) ) # map is the mapping of the blocs from the SK blocs to the CTQMC blocs: self.map[ish][a] = range(len(dmbool)) for ibl in range(NBlocs): for j in range(len(blocs[ibl])): self.map[ish][a][blocs[ibl][j]] = '%s%s'%(a,ibl) self.map_inv[ish]['%s%s'%(a,ibl)] = a # now calculate degeneracies of orbitals: dm = {} for bl in self.gf_struct_solver[ish]: bln = bl[0] ind = bl[1] # get dm for the blocks: dm[bln] = numpy.zeros([len(ind),len(ind)],numpy.complex_) for i in range(len(ind)): for j in range(len(ind)): dm[bln][i,j] = dens_mat[ish][self.map_inv[ish][bln]][ind[i],ind[j]] for bl in self.gf_struct_solver[ish]: for bl2 in self.gf_struct_solver[ish]: if (dm[bl[0]].shape==dm[bl2[0]].shape) : if ( ( (abs(dm[bl[0]]-dm[bl2[0]])<threshold).all() ) and (bl[0]!=bl2[0]) ): # check if it was already there: ind1=-1 ind2=-2 for n,ind in enumerate(self.deg_shells[ish]): if (bl[0] in ind): ind1=n if (bl2[0] in ind): ind2=n if ((ind1<0)and(ind2>=0)): self.deg_shells[ish][ind2].append(bl[0]) elif ((ind1>=0)and(ind2<0)): self.deg_shells[ish][ind1].append(bl2[0]) elif ((ind1<0)and(ind2<0)): self.deg_shells[ish].append([bl[0],bl2[0]]) if (mpi.is_master_node()): ar=HDFArchive(self.hdf_file,'a') ar[self.lda_data]['gf_struct_solver'] = self.gf_struct_solver ar[self.lda_data]['map'] = self.map ar[self.lda_data]['map_inv'] = self.map_inv try: ar[self.lda_data]['deg_shells'] = self.deg_shells except: mpi.report("deg_shells not stored, degeneracies not found") del ar return dens_mat
def convert_dmft_input(self): """ Reads the input files, and stores the data in the HDFfile """ if not (mpi.is_master_node()): return # do it only on master: mpi.report("Reading input from %s..."%self.lda_file) # Read and write only on Master!!! # R is a generator : each R.Next() will return the next number in the file R = read_fortran_file(self.lda_file) try: energy_unit = R.next() # read the energy convertion factor n_k = int(R.next()) # read the number of k points k_dep_projection = 1 SP = int(R.next()) # flag for spin-polarised calculation SO = int(R.next()) # flag for spin-orbit calculation charge_below = R.next() # total charge below energy window density_required = R.next() # total density required, for setting the chemical potential symm_op = 1 # Use symmetry groups for the k-sum # the information on the non-correlated shells is not important here, maybe skip: n_shells = int(R.next()) # number of shells (e.g. Fe d, As p, O p) in the unit cell, # corresponds to index R in formulas # now read the information about the shells: shells = [ [ int(R.next()) for i in range(4) ] for icrsh in range(n_shells) ] # reads iatom, sort, l, dim n_corr_shells = int(R.next()) # number of corr. shells (e.g. Fe d, Ce f) in the unit cell, # corresponds to index R in formulas # now read the information about the shells: corr_shells = [ [ int(R.next()) for i in range(6) ] for icrsh in range(n_corr_shells) ] # reads iatom, sort, l, dim, SO flag, irep self.inequiv_shells(corr_shells) # determine the number of inequivalent correlated shells, has to be known for further reading... use_rotations = 1 rot_mat = [numpy.identity(corr_shells[icrsh][3],numpy.complex_) for icrsh in xrange(n_corr_shells)] # read the matrices rot_mat_time_inv = [0 for i in range(n_corr_shells)] for icrsh in xrange(n_corr_shells): for i in xrange(corr_shells[icrsh][3]): # read real part: for j in xrange(corr_shells[icrsh][3]): rot_mat[icrsh][i,j] = R.next() for i in xrange(corr_shells[icrsh][3]): # read imaginary part: for j in xrange(corr_shells[icrsh][3]): rot_mat[icrsh][i,j] += 1j * R.next() if (SP==1): # read time inversion flag: rot_mat_time_inv[icrsh] = int(R.next()) # Read here the infos for the transformation of the basis: n_reps = [1 for i in range(self.n_inequiv_corr_shells)] dim_reps = [0 for i in range(self.n_inequiv_corr_shells)] T = [] for icrsh in range(self.n_inequiv_corr_shells): n_reps[icrsh] = int(R.next()) # number of representatives ("subsets"), e.g. t2g and eg dim_reps[icrsh] = [int(R.next()) for i in range(n_reps[icrsh])] # dimensions of the subsets # The transformation matrix: # it is of dimension 2l+1, if no SO, and 2*(2l+1) with SO!! #T = [] #for ish in xrange(self.n_inequiv_corr_shells): ll = 2*corr_shells[self.invshellmap[icrsh]][2]+1 lmax = ll * (corr_shells[self.invshellmap[icrsh]][4] + 1) T.append(numpy.zeros([lmax,lmax],numpy.complex_)) # now read it from file: for i in xrange(lmax): for j in xrange(lmax): T[icrsh][i,j] = R.next() for i in xrange(lmax): for j in xrange(lmax): T[icrsh][i,j] += 1j * R.next() # Spin blocks to be read: n_spin_blocs = SP + 1 - SO # number of spins to read for Norbs and Ham, NOT Projectors # read the list of n_orbitals for all k points n_orbitals = [ [0 for isp in range(n_spin_blocs)] for ik in xrange(n_k)] for isp in range(n_spin_blocs): for ik in xrange(n_k): n_orbitals[ik][isp] = int(R.next()) #print n_orbitals # Initialise the projectors: proj_mat = [ [ [numpy.zeros([corr_shells[icrsh][3], n_orbitals[ik][isp]], numpy.complex_) for icrsh in range (n_corr_shells)] for isp in range(n_spin_blocs)] for ik in range(n_k) ] # Read the projectors from the file: for ik in xrange(n_k): for icrsh in range(n_corr_shells): no = corr_shells[icrsh][3] # first Real part for BOTH spins, due to conventions in dmftproj: for isp in range(n_spin_blocs): for i in xrange(no): for j in xrange(n_orbitals[ik][isp]): proj_mat[ik][isp][icrsh][i,j] = R.next() # now Imag part: for isp in range(n_spin_blocs): for i in xrange(no): for j in xrange(n_orbitals[ik][isp]): proj_mat[ik][isp][icrsh][i,j] += 1j * R.next() # now define the arrays for weights and hopping ... bz_weights = numpy.ones([n_k],numpy.float_)/ float(n_k) # w(k_index), default normalisation hopping = [ [numpy.zeros([n_orbitals[ik][isp],n_orbitals[ik][isp]],numpy.complex_) for isp in range(n_spin_blocs)] for ik in xrange(n_k) ] # weights in the file for ik in xrange(n_k) : bz_weights[ik] = R.next() # if the sum over spins is in the weights, take it out again!! sm = sum(bz_weights) bz_weights[:] /= sm # Grab the H # we use now the convention of a DIAGONAL Hamiltonian!!!! for isp in range(n_spin_blocs): for ik in xrange(n_k) : no = n_orbitals[ik][isp] for i in xrange(no): hopping[ik][isp][i,i] = R.next() * energy_unit #keep some things that we need for reading parproj: self.n_shells = n_shells self.shells = shells self.n_corr_shells = n_corr_shells self.corr_shells = corr_shells self.n_spin_blocs = n_spin_blocs self.n_orbitals = n_orbitals self.n_k = n_k self.SO = SO self.SP = SP self.energy_unit = energy_unit except StopIteration : # a more explicit error if the file is corrupted. raise "SumkLDA : reading file HMLT_file failed!" R.close() #print proj_mat[0] #----------------------------------------- # Store the input into HDF5: ar = HDFArchive(self.hdf_file,'a') if not (self.lda_subgrp in ar): ar.create_group(self.lda_subgrp) # The subgroup containing the data. If it does not exist, it is created. # If it exists, the data is overwritten!!! ar[self.lda_subgrp]['energy_unit'] = energy_unit ar[self.lda_subgrp]['n_k'] = n_k ar[self.lda_subgrp]['k_dep_projection'] = k_dep_projection ar[self.lda_subgrp]['SP'] = SP ar[self.lda_subgrp]['SO'] = SO ar[self.lda_subgrp]['charge_below'] = charge_below ar[self.lda_subgrp]['density_required'] = density_required ar[self.lda_subgrp]['symm_op'] = symm_op ar[self.lda_subgrp]['n_shells'] = n_shells ar[self.lda_subgrp]['shells'] = shells ar[self.lda_subgrp]['n_corr_shells'] = n_corr_shells ar[self.lda_subgrp]['corr_shells'] = corr_shells ar[self.lda_subgrp]['use_rotations'] = use_rotations ar[self.lda_subgrp]['rot_mat'] = rot_mat ar[self.lda_subgrp]['rot_mat_time_inv'] = rot_mat_time_inv ar[self.lda_subgrp]['n_reps'] = n_reps ar[self.lda_subgrp]['dim_reps'] = dim_reps ar[self.lda_subgrp]['T'] = T ar[self.lda_subgrp]['n_orbitals'] = n_orbitals ar[self.lda_subgrp]['proj_mat'] = proj_mat ar[self.lda_subgrp]['bz_weights'] = bz_weights ar[self.lda_subgrp]['hopping'] = hopping del ar # Symmetries are used, # Now do the symmetries for correlated orbitals: self.read_symmetry_input(orbits=corr_shells,symm_file=self.symm_file,symm_subgrp=self.symm_subgrp,SO=SO,SP=SP)
def convert_bands_input(self, bands_subgrp = 'SumK_LDA_Bands'): """ Converts the input for momentum resolved spectral functions, and stores it in bands_subgrp in the HDF5. """ if not (mpi.is_master_node()): return self.bands_subgrp = bands_subgrp mpi.report("Reading bands input from %s..."%self.band_file) R = read_fortran_file(self.band_file) try: n_k = int(R.next()) # read the list of n_orbitals for all k points n_orbitals = [ [0 for isp in range(self.n_spin_blocs)] for ik in xrange(n_k)] for isp in range(self.n_spin_blocs): for ik in xrange(n_k): n_orbitals[ik][isp] = int(R.next()) # Initialise the projectors: proj_mat = [ [ [numpy.zeros([self.corr_shells[icrsh][3], n_orbitals[ik][isp]], numpy.complex_) for icrsh in range (self.n_corr_shells)] for isp in range(self.n_spin_blocs)] for ik in range(n_k) ] # Read the projectors from the file: for ik in xrange(n_k): for icrsh in range(self.n_corr_shells): no = self.corr_shells[icrsh][3] # first Real part for BOTH spins, due to conventions in dmftproj: for isp in range(self.n_spin_blocs): for i in xrange(no): for j in xrange(n_orbitals[ik][isp]): proj_mat[ik][isp][icrsh][i,j] = R.next() # now Imag part: for isp in range(self.n_spin_blocs): for i in xrange(no): for j in xrange(n_orbitals[ik][isp]): proj_mat[ik][isp][icrsh][i,j] += 1j * R.next() hopping = [ [numpy.zeros([n_orbitals[ik][isp],n_orbitals[ik][isp]],numpy.complex_) for isp in range(self.n_spin_blocs)] for ik in xrange(n_k) ] # Grab the H # we use now the convention of a DIAGONAL Hamiltonian!!!! for isp in range(self.n_spin_blocs): for ik in xrange(n_k) : no = n_orbitals[ik][isp] for i in xrange(no): hopping[ik][isp][i,i] = R.next() * self.energy_unit # now read the partial projectors: n_parproj = [int(R.next()) for i in range(self.n_shells)] # Initialise P, here a double list of matrices: proj_mat_pc = [ [ [ [numpy.zeros([self.shells[ish][3], n_orbitals[ik][isp]], numpy.complex_) for ir in range(n_parproj[ish])] for ish in range (self.n_shells) ] for isp in range(self.n_spin_blocs) ] for ik in range(n_k) ] for ish in range(self.n_shells): for ik in xrange(n_k): for ir in range(n_parproj[ish]): for isp in range(self.n_spin_blocs): for i in xrange(self.shells[ish][3]): # read real part: for j in xrange(n_orbitals[ik][isp]): proj_mat_pc[ik][isp][ish][ir][i,j] = R.next() for i in xrange(self.shells[ish][3]): # read imaginary part: for j in xrange(n_orbitals[ik][isp]): proj_mat_pc[ik][isp][ish][ir][i,j] += 1j * R.next() except StopIteration : # a more explicit error if the file is corrupted. raise "SumkLDA : reading file HMLT_file failed!" R.close() # reading done! #----------------------------------------- # Store the input into HDF5: ar = HDFArchive(self.hdf_file,'a') if not (self.bands_subgrp in ar): ar.create_group(self.bands_subgrp) # The subgroup containing the data. If it does not exist, it is created. # If it exists, the data is overwritten!!! thingstowrite = ['n_k','n_orbitals','proj_mat','hopping','n_parproj','proj_mat_pc'] for it in thingstowrite: exec "ar['%s']['%s'] = %s"%(self.bands_subgrp,it,it) #ar[self.bands_subgrp]['n_k'] = n_k #ar[self.bands_subgrp]['n_orbitals'] = n_orbitals #ar[self.bands_subgrp]['proj_mat'] = proj_mat #self.proj_mat = proj_mat #self.n_orbitals = n_orbitals #self.n_k = n_k #self.hopping = hopping del ar
def read_symmetry_input(self, orbits, symm_file, symm_subgrp, SO, SP): """ Reads input for the symmetrisations from symm_file, which is case.sympar or case.symqmc. """ if not (mpi.is_master_node()): return mpi.report("Reading symmetry input from %s..."%symm_file) n_orbits = len(orbits) R=read_fortran_file(symm_file) try: n_s = int(R.next()) # Number of symmetry operations n_atoms = int(R.next()) # number of atoms involved perm = [ [int(R.next()) for i in xrange(n_atoms)] for j in xrange(n_s) ] # list of permutations of the atoms if SP: time_inv = [ int(R.next()) for j in xrange(n_s) ] # timeinversion for SO xoupling else: time_inv = [ 0 for j in xrange(n_s) ] # Now read matrices: mat = [] for in_s in xrange(n_s): mat.append( [ numpy.zeros([orbits[orb][3], orbits[orb][3]],numpy.complex_) for orb in xrange(n_orbits) ] ) for orb in range(n_orbits): for i in xrange(orbits[orb][3]): for j in xrange(orbits[orb][3]): mat[in_s][orb][i,j] = R.next() # real part for i in xrange(orbits[orb][3]): for j in xrange(orbits[orb][3]): mat[in_s][orb][i,j] += 1j * R.next() # imaginary part # determine the inequivalent shells: #SHOULD BE FINALLY REMOVED, PUT IT FOR ALL ORBITALS!!!!! #self.inequiv_shells(orbits) mat_tinv = [numpy.identity(orbits[orb][3],numpy.complex_) for orb in range(n_orbits)] if ((SO==0) and (SP==0)): # here we need an additional time inversion operation, so read it: for orb in range(n_orbits): for i in xrange(orbits[orb][3]): for j in xrange(orbits[orb][3]): mat_tinv[orb][i,j] = R.next() # real part for i in xrange(orbits[orb][3]): for j in xrange(orbits[orb][3]): mat_tinv[orb][i,j] += 1j * R.next() # imaginary part except StopIteration : # a more explicit error if the file is corrupted. raise "Symmetry : reading file failed!" R.close() # Save it to the HDF: ar=HDFArchive(self.hdf_file,'a') if not (symm_subgrp in ar): ar.create_group(symm_subgrp) thingstowrite = ['n_s','n_atoms','perm','orbits','SO','SP','time_inv','mat','mat_tinv'] for it in thingstowrite: exec "ar['%s']['%s'] = %s"%(symm_subgrp,it,it) del ar
def convert_parproj_input(self, par_proj_subgrp='SumK_LDA_ParProj', symm_par_subgrp='SymmPar'): """ Reads the input for the partial charges projectors from case.parproj, and stores it in the symm_par_subgrp group in the HDF5. """ if not (mpi.is_master_node()): return self.par_proj_subgrp = par_proj_subgrp self.symm_par_subgrp = symm_par_subgrp mpi.report("Reading parproj input from %s..."%self.parproj_file) Dens_Mat_below = [ [numpy.zeros([self.shells[ish][3],self.shells[ish][3]],numpy.complex_) for ish in range(self.n_shells)] for isp in range(self.n_spin_blocs) ] R = read_fortran_file(self.parproj_file) #try: n_parproj = [int(R.next()) for i in range(self.n_shells)] # Initialise P, here a double list of matrices: proj_mat_pc = [ [ [ [numpy.zeros([self.shells[ish][3], self.n_orbitals[ik][isp]], numpy.complex_) for ir in range(n_parproj[ish])] for ish in range (self.n_shells) ] for isp in range(self.n_spin_blocs) ] for ik in range(self.n_k) ] rot_mat_all = [numpy.identity(self.shells[ish][3],numpy.complex_) for ish in xrange(self.n_shells)] rot_mat_all_time_inv = [0 for i in range(self.n_shells)] for ish in range(self.n_shells): #print ish # read first the projectors for this orbital: for ik in xrange(self.n_k): for ir in range(n_parproj[ish]): for isp in range(self.n_spin_blocs): for i in xrange(self.shells[ish][3]): # read real part: for j in xrange(self.n_orbitals[ik][isp]): proj_mat_pc[ik][isp][ish][ir][i,j] = R.next() for isp in range(self.n_spin_blocs): for i in xrange(self.shells[ish][3]): # read imaginary part: for j in xrange(self.n_orbitals[ik][isp]): proj_mat_pc[ik][isp][ish][ir][i,j] += 1j * R.next() # now read the Density Matrix for this orbital below the energy window: for isp in range(self.n_spin_blocs): for i in xrange(self.shells[ish][3]): # read real part: for j in xrange(self.shells[ish][3]): Dens_Mat_below[isp][ish][i,j] = R.next() for isp in range(self.n_spin_blocs): for i in xrange(self.shells[ish][3]): # read imaginary part: for j in xrange(self.shells[ish][3]): Dens_Mat_below[isp][ish][i,j] += 1j * R.next() if (self.SP==0): Dens_Mat_below[isp][ish] /= 2.0 # Global -> local rotation matrix for this shell: for i in xrange(self.shells[ish][3]): # read real part: for j in xrange(self.shells[ish][3]): rot_mat_all[ish][i,j] = R.next() for i in xrange(self.shells[ish][3]): # read imaginary part: for j in xrange(self.shells[ish][3]): rot_mat_all[ish][i,j] += 1j * R.next() #print Dens_Mat_below[0][ish],Dens_Mat_below[1][ish] if (self.SP): rot_mat_all_time_inv[ish] = int(R.next()) #except StopIteration : # a more explicit error if the file is corrupted. # raise "Wien2kConverter: reading file for Projectors failed!" R.close() #----------------------------------------- # Store the input into HDF5: ar = HDFArchive(self.hdf_file,'a') if not (self.par_proj_subgrp in ar): ar.create_group(self.par_proj_subgrp) # The subgroup containing the data. If it does not exist, it is created. # If it exists, the data is overwritten!!! thingstowrite = ['Dens_Mat_below','n_parproj','proj_mat_pc','rot_mat_all','rot_mat_all_time_inv'] for it in thingstowrite: exec "ar['%s']['%s'] = %s"%(self.par_proj_subgrp,it,it) del ar # Symmetries are used, # Now do the symmetries for all orbitals: self.read_symmetry_input(orbits=self.shells,symm_file=self.symmpar_file,symm_subgrp=self.symm_par_subgrp,SO=self.SO,SP=self.SP)
def spaghettis(self,broadening,shift=0.0,plot_range=None, ishell=None, invert_Akw=False, fermi_surface=False): """ Calculates the correlated band structure with a real-frequency self energy. ATTENTION: Many things from the original input file are are overwritten!!!""" assert hasattr(self,"Sigma_imp"), "Set Sigma First!!" thingstoread = ['Nk','N_Orbitals','Proj_Mat','Hopping','N_parproj','Proj_Mat_pc'] retval = self.read_input_from_HDF(SubGrp=self.bands_data,thingstoread=thingstoread) if not retval: return retval if fermi_surface: ishell=None # print hamiltonian for checks: if ((self.SP==1)and(self.SO==0)): f1=open('hamup.dat','w') f2=open('hamdn.dat','w') for ik in xrange(self.Nk): for i in xrange(self.N_Orbitals[ik][0]): f1.write('%s %s\n'%(ik,self.Hopping[ik][0][i,i].real)) for i in xrange(self.N_Orbitals[ik][1]): f2.write('%s %s\n'%(ik,self.Hopping[ik][1][i,i].real)) f1.write('\n') f2.write('\n') f1.close() f2.close() else: f=open('ham.dat','w') for ik in xrange(self.Nk): for i in xrange(self.N_Orbitals[ik][0]): f.write('%s %s\n'%(ik,self.Hopping[ik][0][i,i].real)) f.write('\n') f.close() #========================================= # calculate A(k,w): mu = self.Chemical_Potential bln = self.blocnames[self.SO] # init DOS: M = [x for x in self.Sigma_imp[0].mesh] n_om = len(M) if plot_range is None: om_minplot = M[0]-0.001 om_maxplot = M[n_om-1] + 0.001 else: om_minplot = plot_range[0] om_maxplot = plot_range[1] if (ishell is None): Akw = {} for ibn in bln: Akw[ibn] = numpy.zeros([self.Nk, n_om ],numpy.float_) else: Akw = {} for ibn in bln: Akw[ibn] = numpy.zeros([self.shells[ishell][3],self.Nk, n_om ],numpy.float_) if fermi_surface: om_minplot = -2.0*broadening om_maxplot = 2.0*broadening Akw = {} for ibn in bln: Akw[ibn] = numpy.zeros([self.Nk,1],numpy.float_) if not (ishell is None): GFStruct_proj = [ (al, range(self.shells[ishell][3])) for al in bln ] Gproj = BlockGf(name_block_generator = [ (a,GfReFreq(indices = al, mesh = self.Sigma_imp[0].mesh)) for a,al in GFStruct_proj ], make_copies = False) Gproj.zero() for ik in xrange(self.Nk): S = self.lattice_gf_realfreq(ik=ik,mu=mu,broadening=broadening) if (ishell is None): # non-projected A(k,w) for iom in range(n_om): if (M[iom]>om_minplot) and (M[iom]<om_maxplot): if fermi_surface: for sig,gf in S: Akw[sig][ik,0] += gf._data.array[:,:,iom].imag.trace()/(-3.1415926535) * (M[1]-M[0]) else: for sig,gf in S: Akw[sig][ik,iom] += gf._data.array[:,:,iom].imag.trace()/(-3.1415926535) Akw[sig][ik,iom] += ik*shift # shift Akw for plotting in xmgrace else: # projected A(k,w): Gproj.zero() tmp = Gproj.copy() for ir in xrange(self.N_parproj[ishell]): for sig,gf in tmp: tmp[sig] <<= self.downfold_pc(ik,ir,ishell,sig,S[sig],gf) Gproj += tmp # TO BE FIXED: # rotate to local frame #if (self.use_rotations): # for sig,gf in Gproj: Gproj[sig] <<= self.rotloc(0,gf,direction='toLocal') for iom in range(n_om): if (M[iom]>om_minplot) and (M[iom]<om_maxplot): for ish in range(self.shells[ishell][3]): for ibn in bln: Akw[ibn][ish,ik,iom] = Gproj[ibn]._data.array[ish,ish,iom].imag/(-3.1415926535) # END k-LOOP if (mpi.is_master_node()): if (ishell is None): for ibn in bln: # loop over GF blocs: if (invert_Akw): maxAkw=Akw[ibn].max() minAkw=Akw[ibn].min() # open file for storage: if fermi_surface: f=open('FS_'+ibn+'.dat','w') else: f=open('Akw_'+ibn+'.dat','w') for ik in range(self.Nk): if fermi_surface: if (invert_Akw): Akw[ibn][ik,0] = 1.0/(minAkw-maxAkw)*(Akw[ibn][ik,0] - maxAkw) f.write('%s %s\n'%(ik,Akw[ibn][ik,0])) else: for iom in range(n_om): if (M[iom]>om_minplot) and (M[iom]<om_maxplot): if (invert_Akw): Akw[ibn][ik,iom] = 1.0/(minAkw-maxAkw)*(Akw[ibn][ik,iom] - maxAkw) if (shift>0.0001): f.write('%s %s\n'%(M[iom],Akw[ibn][ik,iom])) else: f.write('%s %s %s\n'%(ik,M[iom],Akw[ibn][ik,iom])) f.write('\n') f.close() else: for ibn in bln: for ish in range(self.shells[ishell][3]): if (invert_Akw): maxAkw=Akw[ibn][ish,:,:].max() minAkw=Akw[ibn][ish,:,:].min() f=open('Akw_'+ibn+'_proj'+str(ish)+'.dat','w') for ik in range(self.Nk): for iom in range(n_om): if (M[iom]>om_minplot) and (M[iom]<om_maxplot): if (invert_Akw): Akw[ibn][ish,ik,iom] = 1.0/(minAkw-maxAkw)*(Akw[ibn][ish,ik,iom] - maxAkw) if (shift>0.0001): f.write('%s %s\n'%(M[iom],Akw[ibn][ish,ik,iom])) else: f.write('%s %s %s\n'%(ik,M[iom],Akw[ibn][ish,ik,iom])) f.write('\n') f.close()
def calc_density_correction(self,filename = 'dens_mat.dat'): """ Calculates the density correction in order to feed it back to the DFT calculations.""" assert (type(filename)==StringType), "filename has to be a string!" ntoi = self.names_to_ind[self.SO] bln = self.block_names[self.SO] # Set up deltaN: deltaN = {} for ib in bln: deltaN[ib] = [ numpy.zeros( [self.n_orbitals[ik][ntoi[ib]],self.n_orbitals[ik][ntoi[ib]]], numpy.complex_) for ik in range(self.n_k)] ikarray=numpy.array(range(self.n_k)) dens = {} for ib in bln: dens[ib] = 0.0 for ik in mpi.slice_array(ikarray): S = self.lattice_gf_matsubara(ik=ik,mu=self.chemical_potential) for sig,g in S: deltaN[sig][ik] = S[sig].density() dens[sig] += self.bz_weights[ik] * S[sig].total_density() #put mpi Barrier: for sig in deltaN: for ik in range(self.n_k): deltaN[sig][ik] = mpi.all_reduce(mpi.world,deltaN[sig][ik],lambda x,y : x+y) dens[sig] = mpi.all_reduce(mpi.world,dens[sig],lambda x,y : x+y) mpi.barrier() # now save to file: if (mpi.is_master_node()): if (self.SP==0): f=open(filename,'w') else: f=open(filename+'up','w') f1=open(filename+'dn','w') # write chemical potential (in Rydberg): f.write("%.14f\n"%(self.chemical_potential/self.energy_unit)) if (self.SP!=0): f1.write("%.14f\n"%(self.chemical_potential/self.energy_unit)) # write beta in ryderg-1 f.write("%.14f\n"%(S.beta*self.energy_unit)) if (self.SP!=0): f1.write("%.14f\n"%(S.beta*self.energy_unit)) if (self.SP==0): for ik in range(self.n_k): f.write("%s\n"%self.n_orbitals[ik][0]) for inu in range(self.n_orbitals[ik][0]): for imu in range(self.n_orbitals[ik][0]): valre = (deltaN['up'][ik][inu,imu].real + deltaN['down'][ik][inu,imu].real) / 2.0 valim = (deltaN['up'][ik][inu,imu].imag + deltaN['down'][ik][inu,imu].imag) / 2.0 f.write("%.14f %.14f "%(valre,valim)) f.write("\n") f.write("\n") f.close() elif ((self.SP==1)and(self.SO==0)): for ik in range(self.n_k): f.write("%s\n"%self.n_orbitals[ik][0]) for inu in range(self.n_orbitals[ik][0]): for imu in range(self.n_orbitals[ik][0]): f.write("%.14f %.14f "%(deltaN['up'][ik][inu,imu].real,deltaN['up'][ik][inu,imu].imag)) f.write("\n") f.write("\n") f.close() for ik in range(self.n_k): f1.write("%s\n"%self.n_orbitals[ik][1]) for inu in range(self.n_orbitals[ik][1]): for imu in range(self.n_orbitals[ik][1]): f1.write("%.14f %.14f "%(deltaN['down'][ik][inu,imu].real,deltaN['down'][ik][inu,imu].imag)) f1.write("\n") f1.write("\n") f1.close() else: for ik in range(self.n_k): f.write("%s\n"%self.n_orbitals[ik][0]) for inu in range(self.n_orbitals[ik][0]): for imu in range(self.n_orbitals[ik][0]): f.write("%.14f %.14f "%(deltaN['ud'][ik][inu,imu].real,deltaN['ud'][ik][inu,imu].imag)) f.write("\n") f.write("\n") f.close() for ik in range(self.n_k): f1.write("%s\n"%self.n_orbitals[ik][0]) for inu in range(self.n_orbitals[ik][0]): for imu in range(self.n_orbitals[ik][0]): f1.write("%.14f %.14f "%(deltaN['ud'][ik][inu,imu].real,deltaN['ud'][ik][inu,imu].imag)) f1.write("\n") f1.write("\n") f1.close() return deltaN, dens
def check_input_dos(self, om_min, om_max, n_om, beta=10, broadening=0.01): delta_om = (om_max-om_min)/(n_om-1) mesh = numpy.zeros([n_om],numpy.float_) DOS = {} for bn in self.blocnames[self.SO]: DOS[bn] = numpy.zeros([n_om],numpy.float_) DOSproj = [ {} for icrsh in range(self.N_inequiv_corr_shells) ] DOSproj_orb = [ {} for icrsh in range(self.N_inequiv_corr_shells) ] for icrsh in range(self.N_inequiv_corr_shells): for bn in self.blocnames[self.corr_shells[self.invshellmap[icrsh]][4]]: dl = self.corr_shells[self.invshellmap[icrsh]][3] DOSproj[icrsh][bn] = numpy.zeros([n_om],numpy.float_) DOSproj_orb[icrsh][bn] = numpy.zeros([dl,dl,n_om],numpy.float_) for i in range(n_om): mesh[i] = om_min + delta_om * i # init: Gloc = [] for icrsh in range(self.N_corr_shells): b_list = [a for a,al in self.GFStruct_corr[icrsh]] glist = lambda : [ GfReFreq(indices = al, beta = beta, mesh_array = mesh) for a,al in self.GFStruct_corr[icrsh]] Gloc.append(BlockGf(name_list = b_list, block_list = glist(),make_copies=False)) for icrsh in xrange(self.N_corr_shells): Gloc[icrsh].zero() # initialize to zero for ik in xrange(self.Nk): Gupf=self.lattice_gf_realfreq(ik=ik,mu=self.Chemical_Potential,broadening=broadening,beta=beta,mesh=mesh,with_Sigma=False) Gupf *= self.BZ_weights[ik] # non-projected DOS for iom in range(n_om): for sig,gf in Gupf: asd = gf._data.array[:,:,iom].imag.trace()/(-3.1415926535) DOS[sig][iom] += asd for icrsh in xrange(self.N_corr_shells): tmp = Gloc[icrsh].copy() for sig,gf in tmp: tmp[sig] <<= self.downfold(ik,icrsh,sig,Gupf[sig],gf) # downfolding G Gloc[icrsh] += tmp if (self.symm_op!=0): Gloc = self.Symm_corr.symmetrize(Gloc) if (self.use_rotations): for icrsh in xrange(self.N_corr_shells): for sig,gf in Gloc[icrsh]: Gloc[icrsh][sig] <<= self.rotloc(icrsh,gf,direction='toLocal') # Gloc can now also be used to look at orbitally resolved quantities for ish in range(self.N_inequiv_corr_shells): for sig,gf in Gloc[self.invshellmap[ish]]: # loop over spins for iom in range(n_om): DOSproj[ish][sig][iom] += gf._data.array[:,:,iom].imag.trace()/(-3.1415926535) DOSproj_orb[ish][sig][:,:,:] += gf._data.array[:,:,:].imag/(-3.1415926535) # output: if (mpi.is_master_node()): for bn in self.blocnames[self.SO]: f=open('DOS%s.dat'%bn, 'w') for i in range(n_om): f.write("%s %s\n"%(mesh[i],DOS[bn][i])) f.close() for ish in range(self.N_inequiv_corr_shells): f=open('DOS%s_proj%s.dat'%(bn,ish),'w') for i in range(n_om): f.write("%s %s\n"%(mesh[i],DOSproj[ish][bn][i])) f.close() for i in range(self.corr_shells[self.invshellmap[ish]][3]): for j in range(i,self.corr_shells[self.invshellmap[ish]][3]): Fname = 'DOS'+bn+'_proj'+str(ish)+'_'+str(i)+'_'+str(j)+'.dat' f=open(Fname,'w') for iom in range(n_om): f.write("%s %s\n"%(mesh[iom],DOSproj_orb[ish][bn][i,j,iom])) f.close()
# Init Sigma for n, B in S.Sigma : B <<= gf_init.Const(2.0) # Now I write my DMFT loop... class myloop (DMFTLoopGeneric) : def Self_Consistency(self) : S.Transform_SymmetryBasis_toRealSpace (IN= S.Sigma, OUT = Sigma) # Embedding # Computes sum over BZ and returns density F = lambda mu : SK(mu = mu, Sigma = Sigma, field = None , result = G).total_density()/4 if Density_Required : self.Chemical_potential = dichotomy.dichotomy(function = F, x_init = self.Chemical_potential, y_value =Density_Required, precision_on_y = 0.01, delta_x=0.5, max_loops = 100, x_name="Chemical_Potential", y_name= "Total Density", verbosity = 3)[0] else: mpi.report("Total density = %.3f"%F(self.Chemical_potential)) S.Transform_RealSpace_to_SymmetryBasis (IN = G, OUT = S.G) # Extraction S.G0 = inverse(S.Sigma + inverse(S.G)) # Finally get S.G0 # Construct an instance and run. myloop(solver_list = S, Chemical_potential = 2.0).run(n_loops = 1) # Opens the results shelve if mpi.is_master_node(): Results = HDFArchive("cdmft_4_sites.output.h5", 'w') Results["G"] = S.G