def Dens(mu): dens = SK(mu=mu, Sigma=Sigma_lat).total_density() if abs(dens.imag) > 1e-20: mpi.report( "Warning: Imaginary part of density will be ignored ({})".format( str(abs(dens.imag)))) return dens.real
def Dens(mu): dens = 2 * extract_Gloc(mu)['nambu'][:4, :4].total_density() if abs(dens.imag) > 1e-20: mpi.report( "Warning: Imaginary part of density will be ignored ({})". format(str(abs(dens.imag)))) return dens.real
def __call__(self, prop_to_be_diagonal = 'eal'): '''Calculates the diagonalisation.''' if (prop_to_be_diagonal=='eal'): eal = self.SK.eff_atomic_levels()[0] elif (prop_to_be_diagonal=='dm'): eal = self.SK.simple_point_dens_mat()[0] else: mpi.report("Not a valid quantitiy to be diagonal! Choices are 'eal' or 'dm'") return 0 if (self.SK.SO==0): self.eig,self.w = numpy.linalg.eigh(eal['up']) # now calculate new Transformation matrix self.T = numpy.dot(self.T.transpose().conjugate(),self.w).conjugate().transpose() #return numpy.dot(self.w.transpose().conjugate(),numpy.dot(eal['up'],self.w)) else: self.eig,self.w = numpy.linalg.eigh(eal['ud']) # now calculate new Transformation matrix self.T = numpy.dot(self.T.transpose().conjugate(),self.w).conjugate().transpose() #MPI.report("SO not implemented yet!") #return 0 # measure for the 'unity' of the transformation: wsqr = sum(abs(self.w.diagonal())**2)/self.w.diagonal().size return wsqr
def repack(self): """ Calls the h5repack routine in order to reduce the file size of the hdf5 archive. Note ---- Should only be used before the first invokation of HDFArchive in the program, otherwise the hdf5 linking will be broken. """ import subprocess if not (mpi.is_master_node()): return mpi.report("Repacking the file %s" % self.hdf_file) retcode = subprocess.call([ hdf5_command_path + "/h5repack", "-i%s" % self.hdf_file, "-otemphgfrt.h5" ]) if retcode != 0: mpi.report("h5repack failed!") else: subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % self.hdf_file])
def __init__(self, SK=None, hdf_datafile=None): """ Initialization of the class. There are two ways to do so: - existing SumkLDA class : when you have an existing SumkLDA instance - from hdf5 archive : when you want to use data from hdf5 archive Giving the class instance overrides giving the string for the hdf5 archive. Parameters ---------- SK : class SumkLDA, optional Existing instance of SumkLDA class. hdf5_datafile : string, optional Name of hdf5 archive to be used. """ if SK is None: # build our own SK instance if hdf_datafile is None: mpi.report("trans_basis: give SK instance or HDF filename!") return 0 Converter = Wien2kConverter(filename=hdf_datafile, repacking=False) Converter.convert_dft_input() del Converter self.SK = SumkDFT(hdf_file=hdf_datafile + ".h5", use_dft_blocks=False) else: self.SK = SK self.T = copy.deepcopy(self.SK.T[0]) self.w = numpy.identity(SK.corr_shells[0]["dim"])
def __init__(self, Beta, Uint, JHund, l, Nmsb=1025, T=None, UseSpinOrbit=False, Verbosity=0): Solver.__init__(self, beta=Beta, l=l, n_msb=Nmsb, use_spin_orbit=UseSpinOrbit) self.Uint = Uint self.JHund = JHund self.T = T self.Verbosity = Verbosity msg = """ ********************************************************************************** Warning: You are using the old constructor for the solver. Beware that this will be deprecated in future versions. Please check the documentation. ********************************************************************************** """ mpi.report(msg)
def __init__(self, SK=None, hdf_datafile=None): """ Initialization of the class. There are two ways to do so: - existing SumkLDA class : when you have an existing SumkLDA instance - from hdf5 archive : when you want to use data from hdf5 archive Giving the class instance overrides giving the string for the hdf5 archive. Parameters ---------- SK : class SumkLDA, optional Existing instance of SumkLDA class. hdf5_datafile : string, optional Name of hdf5 archive to be used. """ if SK is None: # build our own SK instance if hdf_datafile is None: mpi.report("trans_basis: give SK instance or HDF filename!") return 0 Converter = Wien2kConverter(filename=hdf_datafile, repacking=False) Converter.convert_dft_input() del Converter self.SK = SumkDFT(hdf_file=hdf_datafile + '.h5', use_dft_blocks=False) else: self.SK = SK self.T = copy.deepcopy(self.SK.T[0]) self.w = numpy.identity(SK.corr_shells[0]['dim'])
def __init__(self, Beta, GFstruct, N_Matsubara_Frequencies=1025, **param): Solver.__init__(self, beta=Beta, gf_struct=GFstruct, n_w=N_Matsubara_Frequencies) self.params = param self.gen_keys = copy.deepcopy(self.__dict__) msg = """ ********************************************************************************** Warning: You are using the old constructor for the solver. Beware that this will be deprecated in future versions. Please check the documentation. ********************************************************************************** """ mpi.report(msg)
def __repack(self): """Calls the h5repack routine, in order to reduce the file size of the hdf5 archive. Should only be used BEFORE the first invokation of HDFArchive in the program, otherwise the hdf5 linking is broken!!!""" import subprocess if not (mpi.is_master_node()): return mpi.report("Repacking the file %s"%self.hdf_file) retcode = subprocess.call(["h5repack","-i%s"%self.hdf_file, "-otemphgfrt.h5"]) if (retcode!=0): mpi.report("h5repack failed!") else: subprocess.call(["mv","-f","temphgfrt.h5","%s"%self.hdf_file])
def __repack(self): """Calls the h5repack routine, in order to reduce the file size of the hdf5 archive. Should only be used BEFORE the first invokation of HDFArchive in the program, otherwise the hdf5 linking is broken!!!""" import subprocess if not (mpi.is_master_node()): return mpi.report("Repacking the file %s" % self.hdf_file) retcode = subprocess.call( ["h5repack", "-i%s" % self.hdf_file, "-otemphgfrt.h5"]) if (retcode != 0): mpi.report("h5repack failed!") else: subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % self.hdf_file])
def read_input_from_hdf(self, subgrp, things_to_read, optional_things=[]): """ Reads data from the HDF file """ retval = True # init variables on all nodes: for it in things_to_read: exec "self.%s = 0"%it for it in optional_things: exec "self.%s = 0"%it if (mpi.is_master_node()): ar=HDFArchive(self.hdf_file,'a') if (subgrp in ar): # first read the necessary things: for it in things_to_read: if (it in ar[subgrp]): exec "self.%s = ar['%s']['%s']"%(it,subgrp,it) else: mpi.report("Loading %s failed!"%it) retval = False if ((retval) and (len(optional_things)>0)): # if necessary things worked, now read optional things: retval = {} for it in optional_things: if (it in ar[subgrp]): exec "self.%s = ar['%s']['%s']"%(it,subgrp,it) retval['%s'%it] = True else: retval['%s'%it] = False else: mpi.report("Loading failed: No %s subgroup in HDF5!"%subgrp) retval = False del ar # now do the broadcasting: for it in things_to_read: exec "self.%s = mpi.bcast(self.%s)"%(it,it) for it in optional_things: exec "self.%s = mpi.bcast(self.%s)"%(it,it) retval = mpi.bcast(retval) return retval
def fit_tails(self): """Fits the tails using the constant value for the Re Sigma calculated from F=Sigma*G. Works only for blocks of size one.""" #if (len(self.gf_struct)==2*self.n_orb): if (self.blocssizeone): spinblocs = [v for v in self.map] mpi.report("Fitting tails manually") known_coeff = numpy.zeros([1,1,2],numpy.float_) msh = [x.imag for x in self.G[self.map[spinblocs[0]][0]].mesh ] fit_start = msh[self.fitting_Frequency_Start] fit_stop = msh[self.N_Frequencies_Accumulated] # Fit the tail of G just to get the density for n,g in self.G: g.fitTail([[[0,0,1]]],7,fit_start,2*fit_stop) densmat = self.G.density() for sig1 in spinblocs: for i in range(self.n_orb): coeff = 0.0 for sig2 in spinblocs: for j in range(self.n_orb): if (sig1==sig2): coeff += self.U[self.offset+i,self.offset+j] * densmat[self.map[sig1][j]][0,0].real else: coeff += self.Up[self.offset+i,self.offset+j] * densmat[self.map[sig2][j]][0,0].real known_coeff[0,0,1] = coeff self.Sigma[self.map[sig1][i]].fitTail(fixed_coef = known_coeff, order_max = 3, fit_start = fit_start, fit_stop = fit_stop) else: for n,sig in self.Sigma: known_coeff = numpy.zeros([sig.N1,sig.N2,1],numpy.float_) msh = [x.imag for x in sig.mesh] fit_start = msh[self.fitting_Frequency_Start] fit_stop = msh[self.N_Frequencies_Accumulated] sig.fitTail(fixed_coef = known_coeff, order_max = 3, fit_start = fit_start, fit_stop = fit_stop)
def calculate_diagonalisation_matrix(self, prop_to_be_diagonal='eal'): """ Calculates the diagonalisation matrix w, and stores it as member of the class. Parameters ---------- prop_to_be_diagonal : string, optional Defines the property to be diagonalized. - 'eal' : local hamiltonian (i.e. crystal field) - 'dm' : local density matrix Returns ------- wsqr : double Measure for the degree of rotation done by the diagonalisation. wsqr=1 means no rotation. """ if prop_to_be_diagonal == 'eal': prop = self.SK.eff_atomic_levels()[0] elif prop_to_be_diagonal == 'dm': prop = self.SK.density_matrix(method='using_point_integration')[0] else: mpi.report( "trans_basis: not a valid quantitiy to be diagonal. Choices are 'eal' or 'dm'." ) return 0 if self.SK.SO == 0: self.eig, self.w = numpy.linalg.eigh(prop['up']) # calculate new Transformation matrix self.T = numpy.dot(self.T.transpose().conjugate(), self.w).conjugate().transpose() else: self.eig, self.w = numpy.linalg.eigh(prop['ud']) # calculate new Transformation matrix self.T = numpy.dot(self.T.transpose().conjugate(), self.w).conjugate().transpose() # measure for the 'unity' of the transformation: wsqr = sum(abs(self.w.diagonal())**2) / self.w.diagonal().size return wsqr
def __init__(self, SK=None, hdf_datafile=None): '''Inits the class by reading the input.''' if (SK==None): # build our own SK instance if (hdf_datafile==None): mpi.report("Give SK instance or HDF filename!") return 0 Converter = Wien2kConverter(filename=hdf_datafile,repacking=False) Converter.convert_dmft_input() del Converter self.SK = SumkLDA(hdf_file=hdf_datafile+'.h5',use_lda_blocks=False) else: self.SK = SK self.T = copy.deepcopy(self.SK.T[0]) self.w = numpy.identity(SK.corr_shells[0][3])
def calculate_diagonalisation_matrix(self, prop_to_be_diagonal='eal'): """ Calculates the diagonalisation matrix w, and stores it as member of the class. Parameters ---------- prop_to_be_diagonal : string, optional Defines the property to be diagonalized. - 'eal' : local hamiltonian (i.e. crystal field) - 'dm' : local density matrix Returns ------- wsqr : double Measure for the degree of rotation done by the diagonalisation. wsqr=1 means no rotation. """ if prop_to_be_diagonal == 'eal': prop = self.SK.eff_atomic_levels()[0] elif prop_to_be_diagonal == 'dm': prop = self.SK.density_matrix(method='using_point_integration')[0] else: mpi.report( "trans_basis: not a valid quantitiy to be diagonal. Choices are 'eal' or 'dm'.") return 0 if self.SK.SO == 0: self.eig, self.w = numpy.linalg.eigh(prop['up']) # calculate new Transformation matrix self.T = numpy.dot(self.T.transpose().conjugate(), self.w).conjugate().transpose() else: self.eig, self.w = numpy.linalg.eigh(prop['ud']) # calculate new Transformation matrix self.T = numpy.dot(self.T.transpose().conjugate(), self.w).conjugate().transpose() # measure for the 'unity' of the transformation: wsqr = sum(abs(self.w.diagonal())**2) / self.w.diagonal().size return wsqr
def solve_self_consistent_dmft(p): ps = [] for dmft_iter in xrange(p.n_iter): mpi.report('--> DMFT Iteration: {:d}'.format(p.iter)) p = dmft_self_consistent_step(p) ps.append(p) mpi.report('--> DMFT Convergence: dG_l = {:f}'.format(p.dG_l)) if p.dG_l < p.G_l_tol: break if dmft_iter >= p.n_iter - 1: mpi.report('--> Warning: DMFT Not converged!') else: mpi.report('--> DMFT Converged: dG_l = {:f}'.format(p.dG_l)) return ps
def repack(self): """ Calls the h5repack routine in order to reduce the file size of the hdf5 archive. Note ---- Should only be used before the first invokation of HDFArchive in the program, otherwise the hdf5 linking will be broken. """ import subprocess if not (mpi.is_master_node()): return mpi.report("Repacking the file %s"%self.hdf_file) retcode = subprocess.call([hdf5_command_path+"/h5repack","-i%s"%self.hdf_file,"-otemphgfrt.h5"]) if retcode != 0: mpi.report("h5repack failed!") else: subprocess.call(["mv","-f","temphgfrt.h5","%s"%self.hdf_file])
def __init__(self, Beta, Norb, U_interact=None, J_Hund=None, GFStruct=False, map=False, use_spinflip=False, useMatrix = True, l=2, T=None, dimreps=None, irep=None, deg_orbs = [], Sl_Int = None): SolverMultiBand.__init__(self, beta=Beta, n_orb=Norb, gf_struct=GFStruct, map=map) self.U_interact = U_interact self.J_Hund = J_Hund self.use_spinflip = use_spinflip self.useMatrix = useMatrix self.l = l self.T = T self.dimreps = dimreps self.irep = irep self.deg_orbs = deg_orbs self.Sl_Int = Sl_Int self.gen_keys = copy.deepcopy(self.__dict__) msg = """ ********************************************************************************** Warning: You are using the old constructor for the solver. Beware that this will be deprecated in future versions. Please check the documentation. ********************************************************************************** """ mpi.report(msg)
def transport_distribution(self, beta, directions=['xx'], energy_window=None, Om_mesh=[0.0], with_Sigma=False, n_om=None, broadening=0.0): r""" Calculates the transport distribution .. math:: \Gamma_{\alpha\beta}\left(\omega+\Omega/2, \omega-\Omega/2\right) = \frac{1}{V} \sum_k Tr\left(v_{k,\alpha}A_{k}(\omega+\Omega/2)v_{k,\beta}A_{k}\left(\omega-\Omega/2\right)\right) in the direction :math:`\alpha\beta`. The velocities :math:`v_{k}` are read from the transport subgroup of the hdf5 archive. Parameters ---------- beta : double Inverse temperature :math:`\beta`. directions : list of double, optional :math:`\alpha\beta` e.g.: ['xx','yy','zz','xy','xz','yz']. energy_window : list of double, optional Specifies the upper and lower limit of the frequency integration for :math:`\Omega=0.0`. The window is automatically enlarged by the largest :math:`\Omega` value, hence the integration is performed in the interval [energy_window[0]-max(Om_mesh), energy_window[1]+max(Om_mesh)]. Om_mesh : list of double, optional :math:`\Omega` frequency mesh of the optical conductivity. For the conductivity and the Seebeck coefficient :math:`\Omega=0.0` has to be part of the mesh. In the current version Om_mesh is repined to the mesh provided by the self-energy! The actual mesh is printed on the screen and stored as member Om_mesh. with_Sigma : boolean, optional Determines whether the calculation is performed with or without self energy. If this parameter is set to False the self energy is set to zero (i.e. the DFT band structure :math:`A(k,\omega)` is used). Note: For with_Sigma=False it is necessary to specify the parameters energy_window, n_om and broadening. n_om : integer, optional Number of equidistant frequency points in the interval [energy_window[0]-max(Om_mesh), energy_window[1]+max(Om_mesh)]. This parameters is only used if with_Sigma = False. broadening : double, optional Lorentzian broadening. It is necessary to specify the boradening if with_Sigma = False, otherwise this parameter can be set to 0.0. """ # Check if wien converter was called and read transport subgroup form # hdf file if mpi.is_master_node(): ar = HDFArchive(self.hdf_file, 'r') if not (self.transp_data in ar): raise IOError, "transport_distribution: No %s subgroup in hdf file found! Call convert_transp_input first." % self.transp_data # check if outputs file was converted if not ('n_symmetries' in ar['dft_misc_input']): raise IOError, "transport_distribution: n_symmetries missing. Check if case.outputs file is present and call convert_misc_input() or convert_dft_input()." self.read_transport_input_from_hdf() if mpi.is_master_node(): # k-dependent-projections. assert self.k_dep_projection == 1, "transport_distribution: k dependent projection is not implemented!" # positive Om_mesh assert all( Om >= 0.0 for Om in Om_mesh), "transport_distribution: Om_mesh should not contain negative values!" # Check if energy_window is sufficiently large and correct if (energy_window[0] >= energy_window[1] or energy_window[0] >= 0 or energy_window[1] <= 0): assert 0, "transport_distribution: energy_window wrong!" if (abs(self.fermi_dis(energy_window[0], beta) * self.fermi_dis(-energy_window[0], beta)) > 1e-5 or abs(self.fermi_dis(energy_window[1], beta) * self.fermi_dis(-energy_window[1], beta)) > 1e-5): mpi.report( "\n####################################################################") mpi.report( "transport_distribution: WARNING - energy window might be too narrow!") mpi.report( "####################################################################\n") # up and down are equivalent if SP = 0 n_inequiv_spin_blocks = self.SP + 1 - self.SO self.directions = directions dir_to_int = {'x': 0, 'y': 1, 'z': 2} # calculate A(k,w) ####################################### # Define mesh for Green's function and in the specified energy window if (with_Sigma == True): self.omega = numpy.array([round(x.real, 12) for x in self.Sigma_imp_w[0].mesh]) mesh = None mu = self.chemical_potential n_om = len(self.omega) mpi.report("Using omega mesh provided by Sigma!") if energy_window is not None: # Find according window in Sigma mesh ioffset = numpy.sum( self.omega < energy_window[0] - max(Om_mesh)) self.omega = self.omega[numpy.logical_and(self.omega >= energy_window[ 0] - max(Om_mesh), self.omega <= energy_window[1] + max(Om_mesh))] n_om = len(self.omega) # Truncate Sigma to given omega window # In the future there should be an option in gf to manipulate the mesh (e.g. truncate) directly. # For now we stick with this: for icrsh in range(self.n_corr_shells): Sigma_save = self.Sigma_imp_w[icrsh].copy() spn = self.spin_block_names[self.corr_shells[icrsh]['SO']] glist = lambda: [GfReFreq(indices=inner, window=(self.omega[ 0], self.omega[-1]), n_points=n_om) for block, inner in self.gf_struct_sumk[icrsh]] self.Sigma_imp_w[icrsh] = BlockGf( name_list=spn, block_list=glist(), make_copies=False) for i, g in self.Sigma_imp_w[icrsh]: for iL in g.indices: for iR in g.indices: for iom in xrange(n_om): g.data[iom, iL, iR] = Sigma_save[ i].data[ioffset + iom, iL, iR] else: assert n_om is not None, "transport_distribution: Number of omega points (n_om) needed to calculate transport distribution!" assert energy_window is not None, "transport_distribution: Energy window needed to calculate transport distribution!" assert broadening != 0.0 and broadening is not None, "transport_distribution: Broadening necessary to calculate transport distribution!" self.omega = numpy.linspace( energy_window[0] - max(Om_mesh), energy_window[1] + max(Om_mesh), n_om) mesh = [energy_window[0] - max(Om_mesh), energy_window[1] + max(Om_mesh), n_om] mu = 0.0 # Define mesh for optic conductivity d_omega = round(numpy.abs(self.omega[0] - self.omega[1]), 12) iOm_mesh = numpy.array([round((Om / d_omega), 0) for Om in Om_mesh]) self.Om_mesh = iOm_mesh * d_omega if mpi.is_master_node(): print "Chemical potential: ", mu print "Using n_om = %s points in the energy_window [%s,%s]" % (n_om, self.omega[0], self.omega[-1]), print "where the omega vector is:" print self.omega print "Calculation requested for Omega mesh: ", numpy.array(Om_mesh) print "Omega mesh automatically repined to: ", self.Om_mesh self.Gamma_w = {direction: numpy.zeros( (len(self.Om_mesh), n_om), dtype=numpy.float_) for direction in self.directions} # Sum over all k-points ikarray = numpy.array(range(self.n_k)) for ik in mpi.slice_array(ikarray): # Calculate G_w for ik and initialize A_kw G_w = self.lattice_gf(ik, mu, iw_or_w="w", beta=beta, broadening=broadening, mesh=mesh, with_Sigma=with_Sigma) A_kw = [numpy.zeros((self.n_orbitals[ik][isp], self.n_orbitals[ik][isp], n_om), dtype=numpy.complex_) for isp in range(n_inequiv_spin_blocks)] for isp in range(n_inequiv_spin_blocks): # copy data from G_w (swapaxes is used to have omega in the 3rd # dimension) A_kw[isp] = copy.deepcopy(G_w[self.spin_block_names[self.SO][ isp]].data.swapaxes(0, 1).swapaxes(1, 2)) # calculate A(k,w) for each frequency for iw in xrange(n_om): A_kw[isp][:, :, iw] = -1.0 / (2.0 * numpy.pi * 1j) * ( A_kw[isp][:, :, iw] - numpy.conjugate(numpy.transpose(A_kw[isp][:, :, iw]))) b_min = max(self.band_window[isp][ ik, 0], self.band_window_optics[isp][ik, 0]) b_max = min(self.band_window[isp][ ik, 1], self.band_window_optics[isp][ik, 1]) A_i = slice( b_min - self.band_window[isp][ik, 0], b_max - self.band_window[isp][ik, 0] + 1) v_i = slice(b_min - self.band_window_optics[isp][ ik, 0], b_max - self.band_window_optics[isp][ik, 0] + 1) # loop over all symmetries for R in self.rot_symmetries: # get transformed velocity under symmetry R vel_R = copy.deepcopy(self.velocities_k[isp][ik]) for nu1 in range(self.band_window_optics[isp][ik, 1] - self.band_window_optics[isp][ik, 0] + 1): for nu2 in range(self.band_window_optics[isp][ik, 1] - self.band_window_optics[isp][ik, 0] + 1): vel_R[nu1][nu2][:] = numpy.dot( R, vel_R[nu1][nu2][:]) # calculate Gamma_w for each direction from the velocities # vel_R and the spectral function A_kw for direction in self.directions: for iw in xrange(n_om): for iq in range(len(self.Om_mesh)): if(iw + iOm_mesh[iq] >= n_om or self.omega[iw] < -self.Om_mesh[iq] + energy_window[0] or self.omega[iw] > self.Om_mesh[iq] + energy_window[1]): continue self.Gamma_w[direction][iq, iw] += (numpy.dot(numpy.dot(numpy.dot(vel_R[v_i, v_i, dir_to_int[direction[0]]], A_kw[isp][A_i, A_i, iw + iOm_mesh[iq]]), vel_R[v_i, v_i, dir_to_int[direction[1]]]), A_kw[isp][A_i, A_i, iw]).trace().real * self.bz_weights[ik]) for direction in self.directions: self.Gamma_w[direction] = (mpi.all_reduce(mpi.world, self.Gamma_w[direction], lambda x, y: x + y) / self.cellvolume(self.lattice_type, self.lattice_constants, self.lattice_angles)[1] / self.n_symmetries)
n_iw = 1025 n_tau = 10001 p = {} p["max_time"] = -1 p["random_name"] = "" p["random_seed"] = 123 * mpi.rank + 567 p["length_cycle"] = 50 p["n_warmup_cycles"] = 50000/10 p["n_cycles"] = 3200000/10 p["use_norm_as_weight"] = True p["measure_density_matrix"] = False results_file_name = "kanamori_offdiag." + ("qn." if use_qn else "") + "h5" mpi.report("Welcome to Kanamori (off-diagonal) benchmark.") gf_struct = set_operator_structure(spin_names,orb_names,True) mkind = get_mkind(True,None) # Hamiltonian H = h_int_kanamori(spin_names,orb_names, np.array([[0,U-3*J],[U-3*J,0]]), np.array([[U,U-2*J],[U-2*J,U]]), J,True) if use_qn: QN = [sum([n(*mkind("up",o)) for o in orb_names],Operator()), sum([n(*mkind("dn",o)) for o in orb_names],Operator())] for o in orb_names: dn = n(*mkind("up",o)) - n(*mkind("dn",o))
def solve_lattice_bse(g_wk, gamma_wnn, tail_corr_nwf=None): r""" Compute the generalized lattice susceptibility :math:`\chi_{abcd}(\omega, \mathbf{k})` using the Bethe-Salpeter equation (BSE). Parameters ---------- g_wk : Single-particle Green's function :math:`G_{ab}(\omega, \mathbf{k})`. gamma_wnn : Local particle-hole vertex function :math:`\Gamma_{abcd}(\omega, \nu, \nu')` tail_corr_nwf : Number of fermionic freqiencies to use in the tail correction of the sum over fermionic frequencies. Returns ------- chi0_wk : Generalized lattice susceptibility :math:`\chi_{abcd}(\omega, \mathbf{k})` """ fmesh_g = g_wk.mesh.components[0] kmesh = g_wk.mesh.components[1] bmesh = gamma_wnn.mesh.components[0] fmesh = gamma_wnn.mesh.components[1] nk = len(kmesh) nw = (len(bmesh) + 1) / 2 nwf = len(fmesh) / 2 nwf_g = len(fmesh_g) / 2 if mpi.is_master_node(): print tprf_banner(), "\n" print 'Lattcie BSE with local vertex approximation.\n' print 'nk =', nk print 'nw =', nw print 'nwf =', nwf print 'nwf_g =', nwf_g print 'tail_corr_nwf =', tail_corr_nwf print if tail_corr_nwf is None: tail_corr_nwf = nwf mpi.report('--> chi0_wnk_tail_corr') chi0_wnk_tail_corr = get_chi0_wnk(g_wk, nw=nw, nwf=tail_corr_nwf) mpi.report('--> trace chi0_wnk_tail_corr (WARNING! NO TAIL FIT. FIXME!)') chi0_wk_tail_corr = chi0q_sum_nu_tail_corr_PH(chi0_wnk_tail_corr) #chi0_wk_tail_corr = chi0q_sum_nu(chi0_wnk_tail_corr) mpi.barrier() mpi.report('B1 ' + str(chi0_wk_tail_corr[Idx(0), Idx(0, 0, 0)][0, 0, 0, 0])) mpi.barrier() mpi.report('--> chi0_wnk_tail_corr to chi0_wnk') if tail_corr_nwf != nwf: mpi.report('--> fixed_fermionic_window_python_wnk') chi0_wnk = fixed_fermionic_window_python_wnk(chi0_wnk_tail_corr, nwf=nwf) else: chi0_wnk = chi0_wnk_tail_corr.copy() del chi0_wnk_tail_corr mpi.barrier() mpi.report('C ' + str(chi0_wnk[Idx(0), Idx(0), Idx(0, 0, 0)][0, 0, 0, 0])) mpi.barrier() mpi.report('--> trace chi0_wnk') chi0_wk = chi0q_sum_nu(chi0_wnk) mpi.barrier() mpi.report('D ' + str(chi0_wk[Idx(0), Idx(0, 0, 0)][0, 0, 0, 0])) mpi.barrier() dchi_wk = chi0_wk_tail_corr - chi0_wk chi0_kw = Gf(mesh=MeshProduct(kmesh, bmesh), target_shape=chi0_wk_tail_corr.target_shape) chi0_kw.data[:] = chi0_wk_tail_corr.data.swapaxes(0, 1) del chi0_wk del chi0_wk_tail_corr assert (chi0_wnk.mesh.components[0] == bmesh) assert (chi0_wnk.mesh.components[1] == fmesh) assert (chi0_wnk.mesh.components[2] == kmesh) # -- Lattice BSE calc with built in trace mpi.report('--> chi_kw from BSE') #mpi.report('DEBUG BSE INACTIVE'*72) chi_kw = chiq_sum_nu_from_chi0q_and_gamma_PH(chi0_wnk, gamma_wnn) #chi_kw = chi0_kw.copy() mpi.barrier() mpi.report('--> chi_kw from BSE (done)') del chi0_wnk mpi.report('--> chi_kw tail corrected (using chi0_wnk)') for k in kmesh: chi_kw[ k, :] += dchi_wk[:, k] # -- account for high freq of chi_0 (better than nothing) del dchi_wk mpi.report('--> solve_lattice_bse, done.') return chi_kw, chi0_kw
def imtime_bubble_chi0_wk(g_wk, nw=1): wmesh, kmesh = g_wk.mesh.components norb = g_wk.target_shape[0] beta = wmesh.beta nw_g = len(wmesh) nk = len(kmesh) ntau = 4 * nw_g ntot = np.prod(nk) * norb**4 + np.prod(nk) * (nw_g + ntau) * norb**2 nbytes = ntot * np.complex128().nbytes ngb = nbytes / 1024.**3 if mpi.is_master_node(): print tprf_banner(), "\n" print 'beta =', beta print 'nk =', nk print 'nw =', nw_g print 'norb =', norb print print 'Approx. Memory Utilization: %2.2f GB\n' % ngb mpi.report('--> fourier_wk_to_wr') g_wr = fourier_wk_to_wr(g_wk) del g_wk mpi.report('--> fourier_wr_to_tr') g_tr = fourier_wr_to_tr(g_wr) del g_wr if nw == 1: mpi.report('--> chi0_w0r_from_grt_PH (bubble in tau & r)') chi0_wr = chi0_w0r_from_grt_PH(g_tr) del g_tr else: mpi.report('--> chi0_tr_from_grt_PH (bubble in tau & r)') chi0_tr = chi0_tr_from_grt_PH(g_tr) del g_tr mpi.report('--> chi_wr_from_chi_tr') chi0_wr = chi_wr_from_chi_tr(chi0_tr, nw=nw) del chi_tr mpi.report('--> chi_wk_from_chi_wr (r->k)') chi0_wk = chi_wk_from_chi_wr(chi0_wr) del chi0_wr return chi0_wk
#!/bin/env pytriqs import pytriqs.utility.mpi as mpi from pytriqs.archive import HDFArchive from triqs_cthyb import SolverCore from pytriqs.operators import Operator, n from pytriqs.gf import Gf, inverse, iOmega_n mpi.report("Welcome to nonint (non-interacting many-band systems) test.") mpi.report("This test is aimed to reveal excessive state truncation issues.") beta = 40.0 N_max = 10 n_iw = 1025 n_tau = 10001 p = {} p["max_time"] = -1 p["random_name"] = "" p["random_seed"] = 123 * mpi.rank + 567 p["length_cycle"] = 50 p["n_warmup_cycles"] = 50000 p["n_cycles"] = 1200000 for modes in range(1,N_max+1): V = [0.2]*modes e = [-0.2]*modes #gf_struct = {str(n):[0] for n in range(0,len(V))} gf_struct = [ [str(bidx), [0]] for bidx in range(0,len(V)) ]
def read_config(config_file): """ Reads the config file (default dmft_config.ini) it consists out of 2 sections. Comments are in general possible with with the delimiters ';' or '#'. However, this is only possible in the beginning of a line not within the line! Parameters ---------- seedname : str or list of str seedname for h5 archive or for multiple if calculations should be connected jobname : str or list of str, optional, default=seedname one or multiple jobnames specifying the output directories csc : bool, optional, default=False are we doing a CSC calculation? plo_cfg : str, optional, default='plo.cfg' config file for PLOs for the converter h_int_type : int interaction type # 1=dens-dens 2=kanamori 3=full-slater U : float or comma seperated list of floats U values for impurities if only one value is given, the same U is assumed for all impurities J : float or comma seperated list of floats J values for impurities if only one value is given, the same J is assumed for all impurities beta : float inverse temperature for Greens function etc n_iter_dmft_first : int, optional, default = 10 number of iterations in first dmft cycle to converge dmft solution n_iter_dmft_per : int, optional, default = 2 number of iterations per dmft step in CSC calculations n_iter_dmft : int number of iterations per dmft cycle after first cycle n_iter_dft : int, optional, default = 8 number of dft iterations per cycle dc_type : int 1: held formula, needs to be used with slater-kanamori h_int_type=2 prec_mu : float general precision for determining the chemical potential at any time calc_mu is called dc_dmft : bool DC with DMFT occupation in each iteration -> True DC with DFT occupations after each DFT cycle -> False n_LegCoeff : int, needed if measure_g_l=True number of legendre coefficients h5_save_freq : int, optional default=5 how often is the output saved to the h5 archive magnetic : bool, optional, default=False are we doing a magnetic calculations? If yes put magnetic to True magmom : list of float seperated by comma, optional default=[] init magnetic moments if magnetic is on. length must be #imps. This will be used as percentage factor for each imp in the initial self energy, pos sign for up (1+fac)*sigma, negative sign for down (1-fac)*sigma channel enforce_off_diag : bool, optional, default=False enforce off diagonal elements in block structure finder h_field : float, optional, default=0.0 magnetic field sigma_mix : float, optional, default=1.0 mixing sigma with previous iteration sigma for better convergency. 1.0 means no mixing dc : bool, optional, default=True dc correction on yes or no? calc_energies : bool, optional, default=True calc energies explicitly within the dmft loop block_threshold : float, optional, default=1e-05 threshold for finding block structures in the input data (off-diag yes or no) spin_names : list of str, optional, default=up,down names for spin channels, usually no need to specifiy this load_sigma : bool, optional, default=False load a old sigma from h5 file path_to_sigma : str, needed if load_sigma is true path to h5 file from which the sigma should be loaded load_sigma_iter : int, optional, default= last iteration load the sigma from a specific iteration if wanted occ_conv_crit : float, optional, default= -1 stop the calculation if a certain treshold is reached in the last occ_conv_it iterations occ_conv_it : int, optional, default= 10 how many iterations should be considered for convergence? sampling_iterations : int, optional, default = 0 for how many iterations should the solution sampled after the CSC loop is converged fixed_mu_value : float, optional default -> set fixed_mu = False fixed mu calculations store_dft_eigenvals : bool, optional, default= False stores the dft eigenvals from LOCPROJ file in h5 archive rm_complex : bool, optional, default=False removes the complex parts from G0 before the solver runs afm_order : bool, optional, default=False copy self energies instead of solving explicitly for afm order set_rot : string, optional, default='none' use density_mat_dft to diagonalize occupations = 'den' use hloc_dft to diagonalize occupations = 'hloc' __Solver Parameters:__ ---------- length_cycle : int length of each cycle; number of sweeps before measurement is taken n_warmup_cycles : int number of warmup cycles before real measurement sets in n_cycles_tot : int total number of sweeps measure_g_l : bool measure Legendre Greens function max_time : int, optional, default=-1 maximum amount the solver is allowed to spend in eacht iteration imag_threshold : float, optional, default= 10e-15 thresold for imag part of G0_tau. be warned if symmetries are off in projection scheme imag parts can occur in G0_tau measure_g_tau : bool,optional, default=True should the solver measure G(tau)? move_double : bool, optional, default=True double moves in solver perform_tail_fit : bool, optional, default=False tail fitting if legendre is off? fit_max_moment : int, needed if perform_tail_fit = true max moment to be fitted fit_min_n : int, needed if perform_tail_fit = true number of start matsubara frequency to start with fit_max_n : int, needed if perform_tail_fit = true number of highest matsubara frequency to fit __Returns:__ general_parameters : dict solver_parameters : dict """ config = cp.ConfigParser() config.read(config_file) solver_parameters = {} general_parameters = {} # required parameters general_parameters['seedname'] = map( str, str(config['general']['seedname'].replace(" ", "")).split(',')) general_parameters['h_int_type'] = int(config['general']['h_int_type']) general_parameters['U'] = map(float, str(config['general']['U']).split(',')) general_parameters['J'] = map(float, str(config['general']['J']).split(',')) general_parameters['beta'] = float(config['general']['beta']) general_parameters['n_iter_dmft'] = int(config['general']['n_iter_dmft']) general_parameters['dc_type'] = int(config['general']['dc_type']) general_parameters['prec_mu'] = float(config['general']['prec_mu']) general_parameters['dc_dmft'] = config['general'].getboolean('dc_dmft') # if csc we need the following input Parameters if 'csc' in config['general']: general_parameters['csc'] = config['general'].getboolean('csc') if 'n_iter_dmft_first' in config['general']: general_parameters['n_iter_dmft_first'] = int( config['general']['n_iter_dmft_first']) else: general_parameters['n_iter_dmft_first'] = 10 if 'n_iter_dmft_per' in config['general']: general_parameters['n_iter_dmft_per'] = int( config['general']['n_iter_dmft_per']) else: general_parameters['n_iter_dmft_per'] = 2 if 'n_iter_dft' in config['general']: general_parameters['n_iter_dft'] = int( config['general']['n_iter_dft']) else: general_parameters['n_iter_dft'] = 6 if 'plo_cfg' in config['general']: general_parameters['plo_cfg'] = str(config['general']['plo_cfg']) else: general_parameters['plo_cfg'] = 'plo.cfg' if general_parameters['n_iter_dmft'] < general_parameters[ 'n_iter_dmft_first']: mpi.report( '*** error: total number of iterations should be at least = n_iter_dmft_first ***' ) mpi.MPI.COMM_WORLD.Abort(1) else: general_parameters['csc'] = False # optional stuff if 'jobname' in config['general']: general_parameters['jobname'] = map( str, str(config['general']['jobname'].replace(" ", "")).split(',')) if len(general_parameters['jobname']) != len( general_parameters['seedname']): mpi.report('*** jobname must have same length as seedname. ***') mpi.MPI.COMM_WORLD.Abort(1) else: general_parameters['jobname'] = general_parameters['seedname'] if 'h5_save_freq' in config['general']: general_parameters['h5_save_freq'] = int( config['general']['h5_save_freq']) else: general_parameters['h5_save_freq'] = 5 if 'magnetic' in config['general']: general_parameters['magnetic'] = config['general'].getboolean( 'magnetic') else: general_parameters['magnetic'] = False if 'magmom' in config['general']: general_parameters['magmom'] = map( float, str(config['general']['magmom']).split(',')) else: general_parameters['magmom'] = [] if 'h_field' in config['general']: general_parameters['h_field'] = float(config['general']['h_field']) else: general_parameters['h_field'] = 0.0 if 'sigma_mix' in config['general']: general_parameters['sigma_mix'] = float(config['general']['sigma_mix']) else: general_parameters['sigma_mix'] = 1.0 if 'dc' in config['general']: general_parameters['dc'] = config['general'].getboolean('dc') else: general_parameters['dc'] = True if 'calc_energies' in config['general']: general_parameters['calc_energies'] = config['general'].getboolean( 'calc_energies') else: general_parameters['calc_energies'] = True if 'block_threshold' in config['general']: general_parameters['block_threshold'] = float( config['general']['block_threshold']) else: general_parameters['block_threshold'] = 1e-05 if 'enforce_off_diag' in config['general']: general_parameters['enforce_off_diag'] = config['general'].getboolean( 'enforce_off_diag') else: general_parameters['enforce_off_diag'] = False if 'spin_names' in config['general']: general_parameters['spin_names'] = map( str, str(config['general']['spin_names']).split(',')) else: general_parameters['spin_names'] = ['up', 'down'] if 'load_sigma' in config['general']: general_parameters['load_sigma'] = config['general'].getboolean( 'load_sigma') else: general_parameters['load_sigma'] = False if general_parameters['load_sigma'] == True: general_parameters['path_to_sigma'] = str( config['general']['path_to_sigma']) if 'load_sigma_iter' in config['general']: general_parameters['load_sigma_iter'] = int( config['general']['load_sigma_iter']) else: general_parameters['load_sigma_iter'] = -1 if 'occ_conv_crit' in config['general']: general_parameters['occ_conv_crit'] = float( config['general']['occ_conv_crit']) else: general_parameters['occ_conv_crit'] = -1 if 'occ_conv_it' in config['general']: general_parameters['occ_conv_it'] = int( config['general']['occ_conv_it']) else: general_parameters['occ_conv_it'] = 10 if 'sampling_iterations' in config['general']: general_parameters['sampling_iterations'] = int( config['general']['sampling_iterations']) else: general_parameters['sampling_iterations'] = 0 if 'fixed_mu_value' in config['general']: general_parameters['fixed_mu_value'] = float( config['general']['fixed_mu_value']) general_parameters['fixed_mu'] = True else: general_parameters['fixed_mu'] = False if 'dft_mu' in config['general']: general_parameters['dft_mu'] = float(config['general']['dft_mu']) else: general_parameters['dft_mu'] = 0.0 if 'store_dft_eigenvals' in config['general']: general_parameters['store_dft_eigenvals'] = config[ 'general'].getboolean('store_dft_eigenvals') else: general_parameters['store_dft_eigenvals'] = False if 'rm_complex' in config['general']: general_parameters['rm_complex'] = config['general'].getboolean( 'rm_complex') else: general_parameters['rm_complex'] = False if 'afm_order' in config['general']: general_parameters['afm_order'] = config['general'].getboolean( 'afm_order') else: general_parameters['afm_order'] = False if 'set_rot' in config['general']: general_parameters['set_rot'] = str(config['general']['set_rot']) else: general_parameters['set_rot'] = 'none' # solver related parameters # required parameters solver_parameters["length_cycle"] = int( config['solver_parameters']['length_cycle']) solver_parameters["n_warmup_cycles"] = int( config['solver_parameters']['n_warmup_cycles']) solver_parameters["n_cycles"] = int( float(config['solver_parameters']['n_cycles_tot']) / (mpi.size)) solver_parameters['measure_G_l'] = config['solver_parameters'].getboolean( 'measure_g_l') # optional stuff if 'max_time' in config['solver_parameters']: solver_parameters["max_time"] = int( config['solver_parameters']['max_time']) if 'imag_threshold' in config['solver_parameters']: solver_parameters["imag_threshold"] = float( config['solver_parameters']['imag_threshold']) if 'measure_g_tau' in config['solver_parameters']: solver_parameters["measure_G_tau"] = config[ 'solver_parameters'].getboolean('measure_g_tau') else: solver_parameters["measure_G_tau"] = True if 'move_double' in config['solver_parameters']: solver_parameters["move_double"] = config[ 'solver_parameters'].getboolean('move_double') else: solver_parameters["move_double"] = True #tailfitting only if legendre is off if solver_parameters['measure_G_l'] == True: # little workaround since #leg coefficients is not directly a solver parameter general_parameters["n_LegCoeff"] = int( config['solver_parameters']['n_LegCoeff']) solver_parameters["perform_tail_fit"] = False else: if 'perform_tail_fit' in config['solver_parameters']: solver_parameters["perform_tail_fit"] = config[ 'solver_parameters'].getboolean('perform_tail_fit') else: solver_parameters["perform_tail_fit"] = False if solver_parameters["perform_tail_fit"] == True: solver_parameters["fit_max_moment"] = int( config['solver_parameters']['fit_max_moment']) solver_parameters["fit_min_n"] = int( config['solver_parameters']['fit_min_n']) solver_parameters["fit_max_n"] = int( config['solver_parameters']['fit_max_n']) return general_parameters, solver_parameters
def convert_bands_input(self, bands_subgrp='SumK_LDA_Bands'): """ Converts the input for momentum resolved spectral functions, and stores it in bands_subgrp in the HDF5. """ if not (mpi.is_master_node()): return self.bands_subgrp = bands_subgrp mpi.report("Reading bands input from %s..." % self.band_file) R = read_fortran_file(self.band_file) try: n_k = int(R.next()) # read the list of n_orbitals for all k points n_orbitals = numpy.zeros([n_k, self.n_spin_blocs], numpy.int) for isp in range(self.n_spin_blocs): for ik in xrange(n_k): n_orbitals[ik, isp] = int(R.next()) # Initialise the projectors: #proj_mat = [ [ [numpy.zeros([self.corr_shells[icrsh][3], n_orbitals[ik][isp]], numpy.complex_) # for icrsh in range (self.n_corr_shells)] # for isp in range(self.n_spin_blocs)] # for ik in range(n_k) ] proj_mat = numpy.zeros([ n_k, self.n_spin_blocs, self.n_corr_shells, max(numpy.array(self.corr_shells)[:, 3]), max(n_orbitals) ], numpy.complex_) # Read the projectors from the file: for ik in xrange(n_k): for icrsh in range(self.n_corr_shells): no = self.corr_shells[icrsh][3] # first Real part for BOTH spins, due to conventions in dmftproj: for isp in range(self.n_spin_blocs): for i in xrange(no): for j in xrange(n_orbitals[ik, isp]): proj_mat[ik, isp, icrsh, i, j] = R.next() # now Imag part: for isp in range(self.n_spin_blocs): for i in xrange(no): for j in xrange(n_orbitals[ik, isp]): proj_mat[ik, isp, icrsh, i, j] += 1j * R.next() #hopping = [ [numpy.zeros([n_orbitals[ik][isp],n_orbitals[ik][isp]],numpy.complex_) # for isp in range(self.n_spin_blocs)] for ik in xrange(n_k) ] hopping = numpy.zeros( [n_k, self.n_spin_blocs, max(n_orbitals), max(n_orbitals)], numpy.complex_) # Grab the H # we use now the convention of a DIAGONAL Hamiltonian!!!! for isp in range(self.n_spin_blocs): for ik in xrange(n_k): no = n_orbitals[ik, isp] for i in xrange(no): hopping[ik, isp, i, i] = R.next() * self.energy_unit # now read the partial projectors: n_parproj = [int(R.next()) for i in range(self.n_shells)] n_parproj = numpy.array(n_parproj) # Initialise P, here a double list of matrices: #proj_mat_pc = [ [ [ [numpy.zeros([self.shells[ish][3], n_orbitals[ik][isp]], numpy.complex_) # for ir in range(n_parproj[ish])] # for ish in range (self.n_shells) ] # for isp in range(self.n_spin_blocs) ] # for ik in range(n_k) ] proj_mat_pc = numpy.zeros([ n_k, self.n_spin_blocs, self.n_shells, max(n_parproj), max(numpy.array(self.shells)[:, 3]), max(n_orbitals) ], numpy.complex_) for ish in range(self.n_shells): for ik in xrange(n_k): for ir in range(n_parproj[ish]): for isp in range(self.n_spin_blocs): for i in xrange( self.shells[ish][3]): # read real part: for j in xrange(n_orbitals[ik, isp]): proj_mat_pc[ik, isp, ish, ir, i, j] = R.next() for i in xrange(self.shells[ish] [3]): # read imaginary part: for j in xrange(n_orbitals[ik, isp]): proj_mat_pc[ik, isp, ish, ir, i, j] += 1j * R.next() except StopIteration: # a more explicit error if the file is corrupted. raise "SumkLDA : reading file HMLT_file failed!" R.close() # reading done! #----------------------------------------- # Store the input into HDF5: ar = HDFArchive(self.hdf_file, 'a') if not (self.bands_subgrp in ar): ar.create_group(self.bands_subgrp) # The subgroup containing the data. If it does not exist, it is created. # If it exists, the data is overwritten!!! thingstowrite = [ 'n_k', 'n_orbitals', 'proj_mat', 'hopping', 'n_parproj', 'proj_mat_pc' ] for it in thingstowrite: exec "ar['%s']['%s'] = %s" % (self.bands_subgrp, it, it) #ar[self.bands_subgrp]['n_k'] = n_k #ar[self.bands_subgrp]['n_orbitals'] = n_orbitals #ar[self.bands_subgrp]['proj_mat'] = proj_mat #self.proj_mat = proj_mat #self.n_orbitals = n_orbitals #self.n_k = n_k #self.hopping = hopping del ar
def read_symmetry_input(self, orbits, symm_file, symm_subgrp, SO, SP): """ Reads input for the symmetrisations from symm_file, which is case.sympar or case.symqmc. """ if not (mpi.is_master_node()): return mpi.report("Reading symmetry input from %s..."%symm_file) n_orbits = len(orbits) R=read_fortran_file(symm_file) try: n_s = int(R.next()) # Number of symmetry operations n_atoms = int(R.next()) # number of atoms involved perm = [ [int(R.next()) for i in xrange(n_atoms)] for j in xrange(n_s) ] # list of permutations of the atoms if SP: time_inv = [ int(R.next()) for j in xrange(n_s) ] # timeinversion for SO xoupling else: time_inv = [ 0 for j in xrange(n_s) ] # Now read matrices: mat = [] for in_s in xrange(n_s): mat.append( [ numpy.zeros([orbits[orb][3], orbits[orb][3]],numpy.complex_) for orb in xrange(n_orbits) ] ) for orb in range(n_orbits): for i in xrange(orbits[orb][3]): for j in xrange(orbits[orb][3]): mat[in_s][orb][i,j] = R.next() # real part for i in xrange(orbits[orb][3]): for j in xrange(orbits[orb][3]): mat[in_s][orb][i,j] += 1j * R.next() # imaginary part # determine the inequivalent shells: #SHOULD BE FINALLY REMOVED, PUT IT FOR ALL ORBITALS!!!!! #self.inequiv_shells(orbits) mat_tinv = [numpy.identity(orbits[orb][3],numpy.complex_) for orb in range(n_orbits)] if ((SO==0) and (SP==0)): # here we need an additional time inversion operation, so read it: for orb in range(n_orbits): for i in xrange(orbits[orb][3]): for j in xrange(orbits[orb][3]): mat_tinv[orb][i,j] = R.next() # real part for i in xrange(orbits[orb][3]): for j in xrange(orbits[orb][3]): mat_tinv[orb][i,j] += 1j * R.next() # imaginary part except StopIteration : # a more explicit error if the file is corrupted. raise "Symmetry : reading file failed!" R.close() # Save it to the HDF: ar=HDFArchive(self.hdf_file,'a') if not (symm_subgrp in ar): ar.create_group(symm_subgrp) thingstowrite = ['n_s','n_atoms','perm','orbits','SO','SP','time_inv','mat','mat_tinv'] for it in thingstowrite: exec "ar['%s']['%s'] = %s"%(symm_subgrp,it,it) del ar
def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter): """ """ mpi.report(" Waiting for VASP lock to appear...") while not is_vasp_lock_present(): time.sleep(1) vasp_running = True iter = 0 while vasp_running: if debug: print bcolors.RED + "rank %s" % (mpi.rank) + bcolors.ENDC mpi.report(" Waiting for VASP lock to disappear...") mpi.barrier() while is_vasp_lock_present(): time.sleep(1) # if debug: print bcolors.YELLOW + " waiting: rank %s"%(mpi.rank) + bcolors.ENDC if not is_vasp_running(vasp_pid): mpi.report(" VASP stopped") vasp_running = False break # Tell VASP to stop if the maximum number of iterations is reached iter += 1 if iter == n_iter: if mpi.is_master_node(): print "\n Maximum number of iterations reached." print " Aborting VASP iterations...\n" f_stop = open('STOPCAR', 'wt') f_stop.write("LABORT = .TRUE.\n") f_stop.close() if debug: print bcolors.MAGENTA + "rank %s" % (mpi.rank) + bcolors.ENDC err = 0 exc = None if debug: print bcolors.BLUE + "plovasp: rank %s" % (mpi.rank) + bcolors.ENDC if mpi.is_master_node(): converter.generate_and_output_as_text(cfg_file, vasp_dir='./') # Read energy from OSZICAR dft_energy = get_dft_energy() mpi.barrier() if debug: print bcolors.GREEN + "rank %s" % (mpi.rank) + bcolors.ENDC corr_energy, dft_dc = dmft_cycle() mpi.barrier() if mpi.is_master_node(): total_energy = dft_energy + corr_energy - dft_dc print print "=" * 80 print " Total energy: ", total_energy print " DFT energy: ", dft_energy print " Corr. energy: ", corr_energy print " DFT DC: ", dft_dc print "=" * 80 print if mpi.is_master_node() and vasp_running: open('./vasp.lock', 'a').close() if mpi.is_master_node(): total_energy = dft_energy + corr_energy - dft_dc with open('TOTENERGY', 'w') as f: f.write(" Total energy: %s\n" % (total_energy)) f.write(" DFT energy: %s\n" % (dft_energy)) f.write(" Corr. energy: %s\n" % (corr_energy)) f.write(" DFT DC: %s\n" % (dft_dc)) f.write(" Energy correction: %s\n" % (corr_energy - dft_dc)) mpi.report("***Done")
def convert_bands_input(self, bands_subgrp = 'SumK_LDA_Bands'): """ Converts the input for momentum resolved spectral functions, and stores it in bands_subgrp in the HDF5. """ if not (mpi.is_master_node()): return self.bands_subgrp = bands_subgrp mpi.report("Reading bands input from %s..."%self.band_file) R = read_fortran_file(self.band_file) try: n_k = int(R.next()) # read the list of n_orbitals for all k points n_orbitals = numpy.zeros([n_k,self.n_spin_blocs],numpy.int) for isp in range(self.n_spin_blocs): for ik in xrange(n_k): n_orbitals[ik,isp] = int(R.next()) # Initialise the projectors: #proj_mat = [ [ [numpy.zeros([self.corr_shells[icrsh][3], n_orbitals[ik][isp]], numpy.complex_) # for icrsh in range (self.n_corr_shells)] # for isp in range(self.n_spin_blocs)] # for ik in range(n_k) ] proj_mat = numpy.zeros([n_k,self.n_spin_blocs,self.n_corr_shells,max(numpy.array(self.corr_shells)[:,3]),max(n_orbitals)],numpy.complex_) # Read the projectors from the file: for ik in xrange(n_k): for icrsh in range(self.n_corr_shells): no = self.corr_shells[icrsh][3] # first Real part for BOTH spins, due to conventions in dmftproj: for isp in range(self.n_spin_blocs): for i in xrange(no): for j in xrange(n_orbitals[ik,isp]): proj_mat[ik,isp,icrsh,i,j] = R.next() # now Imag part: for isp in range(self.n_spin_blocs): for i in xrange(no): for j in xrange(n_orbitals[ik,isp]): proj_mat[ik,isp,icrsh,i,j] += 1j * R.next() #hopping = [ [numpy.zeros([n_orbitals[ik][isp],n_orbitals[ik][isp]],numpy.complex_) # for isp in range(self.n_spin_blocs)] for ik in xrange(n_k) ] hopping = numpy.zeros([n_k,self.n_spin_blocs,max(n_orbitals),max(n_orbitals)],numpy.complex_) # Grab the H # we use now the convention of a DIAGONAL Hamiltonian!!!! for isp in range(self.n_spin_blocs): for ik in xrange(n_k) : no = n_orbitals[ik,isp] for i in xrange(no): hopping[ik,isp,i,i] = R.next() * self.energy_unit # now read the partial projectors: n_parproj = [int(R.next()) for i in range(self.n_shells)] n_parproj = numpy.array(n_parproj) # Initialise P, here a double list of matrices: #proj_mat_pc = [ [ [ [numpy.zeros([self.shells[ish][3], n_orbitals[ik][isp]], numpy.complex_) # for ir in range(n_parproj[ish])] # for ish in range (self.n_shells) ] # for isp in range(self.n_spin_blocs) ] # for ik in range(n_k) ] proj_mat_pc = numpy.zeros([n_k,self.n_spin_blocs,self.n_shells,max(n_parproj),max(numpy.array(self.shells)[:,3]),max(n_orbitals)],numpy.complex_) for ish in range(self.n_shells): for ik in xrange(n_k): for ir in range(n_parproj[ish]): for isp in range(self.n_spin_blocs): for i in xrange(self.shells[ish][3]): # read real part: for j in xrange(n_orbitals[ik,isp]): proj_mat_pc[ik,isp,ish,ir,i,j] = R.next() for i in xrange(self.shells[ish][3]): # read imaginary part: for j in xrange(n_orbitals[ik,isp]): proj_mat_pc[ik,isp,ish,ir,i,j] += 1j * R.next() except StopIteration : # a more explicit error if the file is corrupted. raise "SumkLDA : reading file HMLT_file failed!" R.close() # reading done! #----------------------------------------- # Store the input into HDF5: ar = HDFArchive(self.hdf_file,'a') if not (self.bands_subgrp in ar): ar.create_group(self.bands_subgrp) # The subgroup containing the data. If it does not exist, it is created. # If it exists, the data is overwritten!!! thingstowrite = ['n_k','n_orbitals','proj_mat','hopping','n_parproj','proj_mat_pc'] for it in thingstowrite: exec "ar['%s']['%s'] = %s"%(self.bands_subgrp,it,it) #ar[self.bands_subgrp]['n_k'] = n_k #ar[self.bands_subgrp]['n_orbitals'] = n_orbitals #ar[self.bands_subgrp]['proj_mat'] = proj_mat #self.proj_mat = proj_mat #self.n_orbitals = n_orbitals #self.n_k = n_k #self.hopping = hopping del ar
def convert_parproj_input(self, par_proj_subgrp='SumK_LDA_ParProj', symm_par_subgrp='SymmPar'): """ Reads the input for the partial charges projectors from case.parproj, and stores it in the symm_par_subgrp group in the HDF5. """ if not (mpi.is_master_node()): return self.par_proj_subgrp = par_proj_subgrp self.symm_par_subgrp = symm_par_subgrp mpi.report("Reading parproj input from %s..."%self.parproj_file) dens_mat_below = [ [numpy.zeros([self.shells[ish][3],self.shells[ish][3]],numpy.complex_) for ish in range(self.n_shells)] for isp in range(self.n_spin_blocs) ] R = read_fortran_file(self.parproj_file) #try: n_parproj = [int(R.next()) for i in range(self.n_shells)] n_parproj = numpy.array(n_parproj) # Initialise P, here a double list of matrices: #proj_mat_pc = [ [ [ [numpy.zeros([self.shells[ish][3], self.n_orbitals[ik][isp]], numpy.complex_) # for ir in range(n_parproj[ish])] # for ish in range (self.n_shells) ] # for isp in range(self.n_spin_blocs) ] # for ik in range(self.n_k) ] proj_mat_pc = numpy.zeros([self.n_k,self.n_spin_blocs,self.n_shells,max(n_parproj),max(numpy.array(self.shells)[:,3]),max(self.n_orbitals)],numpy.complex_) rot_mat_all = [numpy.identity(self.shells[ish][3],numpy.complex_) for ish in xrange(self.n_shells)] rot_mat_all_time_inv = [0 for i in range(self.n_shells)] for ish in range(self.n_shells): #print ish # read first the projectors for this orbital: for ik in xrange(self.n_k): for ir in range(n_parproj[ish]): for isp in range(self.n_spin_blocs): for i in xrange(self.shells[ish][3]): # read real part: for j in xrange(self.n_orbitals[ik][isp]): proj_mat_pc[ik,isp,ish,ir,i,j] = R.next() for isp in range(self.n_spin_blocs): for i in xrange(self.shells[ish][3]): # read imaginary part: for j in xrange(self.n_orbitals[ik][isp]): proj_mat_pc[ik,isp,ish,ir,i,j] += 1j * R.next() # now read the Density Matrix for this orbital below the energy window: for isp in range(self.n_spin_blocs): for i in xrange(self.shells[ish][3]): # read real part: for j in xrange(self.shells[ish][3]): dens_mat_below[isp][ish][i,j] = R.next() for isp in range(self.n_spin_blocs): for i in xrange(self.shells[ish][3]): # read imaginary part: for j in xrange(self.shells[ish][3]): dens_mat_below[isp][ish][i,j] += 1j * R.next() if (self.SP==0): dens_mat_below[isp][ish] /= 2.0 # Global -> local rotation matrix for this shell: for i in xrange(self.shells[ish][3]): # read real part: for j in xrange(self.shells[ish][3]): rot_mat_all[ish][i,j] = R.next() for i in xrange(self.shells[ish][3]): # read imaginary part: for j in xrange(self.shells[ish][3]): rot_mat_all[ish][i,j] += 1j * R.next() #print Dens_Mat_below[0][ish],Dens_Mat_below[1][ish] if (self.SP): rot_mat_all_time_inv[ish] = int(R.next()) #except StopIteration : # a more explicit error if the file is corrupted. # raise "Wien2kConverter: reading file for Projectors failed!" R.close() #----------------------------------------- # Store the input into HDF5: ar = HDFArchive(self.hdf_file,'a') if not (self.par_proj_subgrp in ar): ar.create_group(self.par_proj_subgrp) # The subgroup containing the data. If it does not exist, it is created. # If it exists, the data is overwritten!!! thingstowrite = ['dens_mat_below','n_parproj','proj_mat_pc','rot_mat_all','rot_mat_all_time_inv'] for it in thingstowrite: exec "ar['%s']['%s'] = %s"%(self.par_proj_subgrp,it,it) del ar # Symmetries are used, # Now do the symmetries for all orbitals: self.read_symmetry_input(orbits=self.shells,symm_file=self.symmpar_file,symm_subgrp=self.symm_par_subgrp,SO=self.SO,SP=self.SP)
def convert_dmft_input(self, first_real_part_matrix = True, only_upper_triangle = False, weights_in_file = False): """ Reads the input files, and stores the data in the HDFfile """ if not (mpi.is_master_node()): return # do it only on master: mpi.report("Reading input from %s..."%self.lda_file) # Read and write only on Master!!! # R is a generator : each R.Next() will return the next number in the file R = Read_Fortran_File(self.lda_file) try: energy_unit = 1.0 # the energy conversion factor is 1.0, we assume eV in files n_k = int(R.next()) # read the number of k points k_dep_projection = 0 SP = 0 # no spin-polarision SO = 0 # no spin-orbit charge_below = 0.0 # total charge below energy window is set to 0 density_required = R.next() # density required, for setting the chemical potential symm_op = 0 # No symmetry groups for the k-sum # the information on the non-correlated shells is needed for defining dimension of matrices: n_shells = int(R.next()) # number of shells considered in the Wanniers # corresponds to index R in formulas # now read the information about the shells: shells = [ [ int(R.next()) for i in range(4) ] for icrsh in range(n_shells) ] # reads iatom, sort, l, dim n_corr_shells = int(R.next()) # number of corr. shells (e.g. Fe d, Ce f) in the unit cell, # corresponds to index R in formulas # now read the information about the shells: corr_shells = [ [ int(R.next()) for i in range(6) ] for icrsh in range(n_corr_shells) ] # reads iatom, sort, l, dim, SO flag, irep self.inequiv_shells(corr_shells) # determine the number of inequivalent correlated shells, has to be known for further reading... use_rotations = 0 rot_mat = [numpy.identity(corr_shells[icrsh][3],numpy.complex_) for icrsh in xrange(n_corr_shells)] rot_mat_time_inv = [0 for i in range(n_corr_shells)] # Representative representations are read from file n_reps = [1 for i in range(self.n_inequiv_corr_shells)] dim_reps = [0 for i in range(self.n_inequiv_corr_shells)] for icrsh in range(self.n_inequiv_corr_shells): n_reps[icrsh] = int(R.next()) # number of representatives ("subsets"), e.g. t2g and eg dim_reps[icrsh] = [int(R.next()) for i in range(n_reps[icrsh])] # dimensions of the subsets # The transformation matrix: # it is of dimension 2l+1, it is taken to be standard d (as in Wien2k) T = [] for icrsh in range(self.n_inequiv_corr_shells): #for ish in xrange(self.N_inequiv_corr_shells): ll = 2*corr_shells[self.invshellmap[icrsh]][2]+1 lmax = ll * (corr_shells[self.invshellmap[icrsh]][4] + 1) T.append(numpy.zeros([lmax,lmax],numpy.complex_)) T[icrsh] = numpy.array([[0.0, 0.0, 1.0, 0.0, 0.0], [1.0/sqrt(2.0), 0.0, 0.0, 0.0, 1.0/sqrt(2.0)], [-1.0/sqrt(2.0), 0.0, 0.0, 0.0, 1.0/sqrt(2.0)], [0.0, 1.0/sqrt(2.0), 0.0, -1.0/sqrt(2.0), 0.0], [0.0, 1.0/sqrt(2.0), 0.0, 1.0/sqrt(2.0), 0.0]]) # Spin blocks to be read: n_spin_blocks = SP + 1 - SO # number of spins to read for Norbs and Ham, NOT Projectors # define the number of N_Orbitals for all k points: it is the number of total bands and independent of k! n_orb = sum([ shells[ish][3] for ish in range(n_shells)]) #n_orbitals = [ [n_orb for isp in range(n_spin_blocks)] for ik in xrange(n_k)] n_orbitals = numpy.ones([n_k,n_spin_blocks],numpy.int) * n_orb #print N_Orbitals # Initialise the projectors: #proj_mat = [ [ [numpy.zeros([corr_shells[icrsh][3], n_orbitals[ik][isp]], numpy.complex_) # for icrsh in range (n_corr_shells)] # for isp in range(n_spin_blocks)] # for ik in range(n_k) ] proj_mat = numpy.zeros([n_k,n_spin_blocks,n_corr_shells,max(numpy.array(corr_shells)[:,3]),max(n_orbitals)],numpy.complex_) # Read the projectors from the file: for ik in xrange(n_k): for icrsh in range(n_corr_shells): for isp in range(n_spin_blocks): # calculate the offset: offset = 0 no = 0 for i in range(n_shells): if (no==0): if ((shells[i][0]==corr_shells[icrsh][0]) and (shells[i][1]==corr_shells[icrsh][1])): no = corr_shells[icrsh][3] else: offset += shells[i][3] proj_mat[ik,isp,icrsh,0:no,offset:offset+no] = numpy.identity(no) # now define the arrays for weights and hopping ... bz_weights = numpy.ones([n_k],numpy.float_)/ float(n_k) # w(k_index), default normalisation #hopping = [ [numpy.zeros([n_orbitals[ik][isp],n_orbitals[ik][isp]],numpy.complex_) # for isp in range(n_spin_blocks)] for ik in xrange(n_k) ] hopping = numpy.zeros([n_k,n_spin_blocks,max(n_orbitals),max(n_orbitals)],numpy.complex_) if (weights_in_file): # weights in the file for ik in xrange(n_k) : bz_weights[ik] = R.next() # if the sum over spins is in the weights, take it out again!! sm = sum(bz_weights) bz_weights[:] /= sm # Grab the H for ik in xrange(n_k) : for isp in range(n_spin_blocks): no = n_orbitals[ik,isp] if (first_real_part_matrix): for i in xrange(no): if (only_upper_triangle): istart = i else: istart = 0 for j in xrange(istart,no): hopping[ik,isp,i,j] = R.next() for i in xrange(no): if (only_upper_triangle): istart = i else: istart = 0 for j in xrange(istart,no): hopping[ik,isp,i,j] += R.next() * 1j if ((only_upper_triangle)and(i!=j)): hopping[ik,isp,j,i] = hopping[ik,isp,i,j].conjugate() else: for i in xrange(no): if (only_upper_triangle): istart = i else: istart = 0 for j in xrange(istart,no): hopping[ik,isp,i,j] = R.next() hopping[ik,isp,i,j] += R.next() * 1j if ((only_upper_triangle)and(i!=j)): hopping[ik,isp,j,i] = hopping[ik,isp,i,j].conjugate() #keep some things that we need for reading parproj: self.n_shells = n_shells self.shells = shells self.n_corr_shells = n_corr_shells self.corr_shells = corr_shells self.n_spin_blocks = n_spin_blocks self.n_orbitals = n_orbitals self.n_k = n_k self.SO = SO self.SP = SP self.energy_unit = energy_unit except StopIteration : # a more explicit error if the file is corrupted. raise "SumK_LDA : reading file HMLT_file failed!" R.close() #print Proj_Mat[0] #----------------------------------------- # Store the input into HDF5: ar = HDFArchive(self.hdf_file,'a') if not (self.lda_subgrp in ar): ar.create_group(self.lda_subgrp) # The subgroup containing the data. If it does not exist, it is created. # If it exists, the data is overwritten!!! ar[self.lda_subgrp]['energy_unit'] = energy_unit ar[self.lda_subgrp]['n_k'] = n_k ar[self.lda_subgrp]['k_dep_projection'] = k_dep_projection ar[self.lda_subgrp]['SP'] = SP ar[self.lda_subgrp]['SO'] = SO ar[self.lda_subgrp]['charge_below'] = charge_below ar[self.lda_subgrp]['density_required'] = density_required ar[self.lda_subgrp]['symm_op'] = symm_op ar[self.lda_subgrp]['n_shells'] = n_shells ar[self.lda_subgrp]['shells'] = shells ar[self.lda_subgrp]['n_corr_shells'] = n_corr_shells ar[self.lda_subgrp]['corr_shells'] = corr_shells ar[self.lda_subgrp]['use_rotations'] = use_rotations ar[self.lda_subgrp]['rot_mat'] = rot_mat ar[self.lda_subgrp]['rot_mat_time_inv'] = rot_mat_time_inv ar[self.lda_subgrp]['n_reps'] = n_reps ar[self.lda_subgrp]['dim_reps'] = dim_reps ar[self.lda_subgrp]['T'] = T ar[self.lda_subgrp]['n_orbitals'] = n_orbitals ar[self.lda_subgrp]['proj_mat'] = proj_mat ar[self.lda_subgrp]['bz_weights'] = bz_weights ar[self.lda_subgrp]['hopping'] = hopping del ar
def convert_dft_input(self, first_real_part_matrix=True, only_upper_triangle=False, weights_in_file=False): """ Reads the appropriate files and stores the data for the dft_subgrp in the hdf5 archive. Parameters ---------- first_real_part_matrix : boolean, optional Should all the real components for given k be read in first, followed by the imaginary parts? only_upper_triangle : boolean, optional Should only the upper triangular part of H(k) be read in? weights_in_file : boolean, optional Are the k-point weights to be read in? """ # Read and write only on the master node if not (mpi.is_master_node()): return mpi.report("Reading input from %s..." % self.dft_file) # R is a generator : each R.Next() will return the next number in the # file R = ConverterTools.read_fortran_file(self, self.dft_file, self.fortran_to_replace) try: # the energy conversion factor is 1.0, we assume eV in files energy_unit = 1.0 # read the number of k points n_k = int(R.next()) k_dep_projection = 0 SP = 0 # no spin-polarision SO = 0 # no spin-orbit # total charge below energy window is set to 0 charge_below = 0.0 # density required, for setting the chemical potential density_required = R.next() symm_op = 0 # No symmetry groups for the k-sum # the information on the non-correlated shells is needed for # defining dimension of matrices: # number of shells considered in the Wanniers n_shells = int(R.next()) # corresponds to index R in formulas # now read the information about the shells (atom, sort, l, dim): shell_entries = ['atom', 'sort', 'l', 'dim'] shells = [{name: int(val) for name, val in zip(shell_entries, R)} for ish in range(n_shells)] # number of corr. shells (e.g. Fe d, Ce f) in the unit cell, n_corr_shells = int(R.next()) # corresponds to index R in formulas # now read the information about the shells (atom, sort, l, dim, SO # flag, irep): corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep'] corr_shells = [{ name: int(val) for name, val in zip(corr_shell_entries, R) } for icrsh in range(n_corr_shells)] # determine the number of inequivalent correlated shells and maps, # needed for further reading [n_inequiv_shells, corr_to_inequiv, inequiv_to_corr ] = ConverterTools.det_shell_equivalence(self, corr_shells) use_rotations = 0 rot_mat = [ numpy.identity(corr_shells[icrsh]['dim'], numpy.complex_) for icrsh in range(n_corr_shells) ] rot_mat_time_inv = [0 for i in range(n_corr_shells)] # Representative representations are read from file n_reps = [1 for i in range(n_inequiv_shells)] dim_reps = [0 for i in range(n_inequiv_shells)] T = [] for ish in range(n_inequiv_shells): # number of representatives ("subsets"), e.g. t2g and eg n_reps[ish] = int(R.next()) dim_reps[ish] = [int(R.next()) for i in range(n_reps[ish]) ] # dimensions of the subsets # The transformation matrix: # is of dimension 2l+1, it is taken to be standard d (as in # Wien2k) ll = 2 * corr_shells[inequiv_to_corr[ish]]['l'] + 1 lmax = ll * (corr_shells[inequiv_to_corr[ish]]['SO'] + 1) T.append(numpy.zeros([lmax, lmax], numpy.complex_)) T[ish] = numpy.array( [[0.0, 0.0, 1.0, 0.0, 0.0], [1.0 / sqrt(2.0), 0.0, 0.0, 0.0, 1.0 / sqrt(2.0)], [-1.0 / sqrt(2.0), 0.0, 0.0, 0.0, 1.0 / sqrt(2.0)], [0.0, 1.0 / sqrt(2.0), 0.0, -1.0 / sqrt(2.0), 0.0], [0.0, 1.0 / sqrt(2.0), 0.0, 1.0 / sqrt(2.0), 0.0]]) # Spin blocks to be read: # number of spins to read for Norbs and Ham, NOT Projectors n_spin_blocs = SP + 1 - SO # define the number of n_orbitals for all k points: it is the # number of total bands and independent of k! n_orbitals = numpy.ones([n_k, n_spin_blocs], numpy.int) * sum( [sh['dim'] for sh in shells]) # Initialise the projectors: proj_mat = numpy.zeros([ n_k, n_spin_blocs, n_corr_shells, max([crsh['dim'] for crsh in corr_shells]), numpy.max(n_orbitals) ], numpy.complex_) # Read the projectors from the file: for ik in range(n_k): for icrsh in range(n_corr_shells): for isp in range(n_spin_blocs): # calculate the offset: offset = 0 n_orb = 0 for ish in range(n_shells): if (n_orb == 0): if (shells[ish]['atom'] == corr_shells[icrsh]['atom']) and ( shells[ish]['sort'] == corr_shells[icrsh]['sort']): n_orb = corr_shells[icrsh]['dim'] else: offset += shells[ish]['dim'] proj_mat[ik, isp, icrsh, 0:n_orb, offset:offset + n_orb] = numpy.identity(n_orb) # now define the arrays for weights and hopping ... # w(k_index), default normalisation bz_weights = numpy.ones([n_k], numpy.float_) / float(n_k) hopping = numpy.zeros([ n_k, n_spin_blocs, numpy.max(n_orbitals), numpy.max(n_orbitals) ], numpy.complex_) if (weights_in_file): # weights in the file for ik in range(n_k): bz_weights[ik] = R.next() # if the sum over spins is in the weights, take it out again!! sm = sum(bz_weights) bz_weights[:] /= sm # Grab the H for isp in range(n_spin_blocs): for ik in range(n_k): n_orb = n_orbitals[ik, isp] # first read all real components for given k, then read # imaginary parts if (first_real_part_matrix): for i in range(n_orb): if (only_upper_triangle): istart = i else: istart = 0 for j in range(istart, n_orb): hopping[ik, isp, i, j] = R.next() for i in range(n_orb): if (only_upper_triangle): istart = i else: istart = 0 for j in range(istart, n_orb): hopping[ik, isp, i, j] += R.next() * 1j if ((only_upper_triangle) and (i != j)): hopping[ik, isp, j, i] = hopping[ik, isp, i, j].conjugate() else: # read (real,im) tuple for i in range(n_orb): if (only_upper_triangle): istart = i else: istart = 0 for j in range(istart, n_orb): hopping[ik, isp, i, j] = R.next() hopping[ik, isp, i, j] += R.next() * 1j if ((only_upper_triangle) and (i != j)): hopping[ik, isp, j, i] = hopping[ik, isp, i, j].conjugate() # keep some things that we need for reading parproj: things_to_set = [ 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'n_spin_blocs', 'n_orbitals', 'n_k', 'SO', 'SP', 'energy_unit' ] for it in things_to_set: setattr(self, it, locals()[it]) except StopIteration: # a more explicit error if the file is corrupted. raise "HK Converter : reading file dft_file failed!" R.close() # Save to the HDF5: with HDFArchive(self.hdf_file, 'a') as ar: if not (self.dft_subgrp in ar): ar.create_group(self.dft_subgrp) things_to_save = [ 'energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required', 'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat', 'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights', 'hopping', 'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr' ] for it in things_to_save: ar[self.dft_subgrp][it] = locals()[it]
print "="*80 print if mpi.is_master_node() and vasp_running: open('./vasp.lock', 'a').close() if mpi.is_master_node(): total_energy = dft_energy + corr_energy - dft_dc with open('TOTENERGY', 'w') as f: f.write(" Total energy: %s\n"%(total_energy)) f.write(" DFT energy: %s\n"%(dft_energy)) f.write(" Corr. energy: %s\n"%(corr_energy)) f.write(" DFT DC: %s\n"%(dft_dc)) f.write(" Energy correction: %s\n"%(corr_energy - dft_dc)) mpi.report("***Done") if __name__ == '__main__': try: vasp_pid = int(sys.argv[1]) except (ValueError, KeyError): if mpi.is_master_node(): print "VASP process pid must be provided as the first argument" raise # if len(sys.argv) > 1: # vasp_path = sys.argv[1] # else: # try: # vasp_path = os.environ['VASP_DIR'] # except KeyError:
def get_chi0_wnk(g_wk, nw=1, nwf=None): fmesh = g_wk.mesh.components[0] kmesh = g_wk.mesh.components[1] if nwf is None: nwf = len(fmesh) / 2 mpi.barrier() mpi.report('g_wk ' + str(g_wk[Idx(2), Idx(0, 1, 2)][0, 0])) n = np.sum(g_wk.data) / len(kmesh) mpi.report('n ' + str(n)) mpi.barrier() mpi.report('--> g_wr from g_wk') g_wr = fourier_wk_to_wr(g_wk) mpi.barrier() mpi.report('g_wr ' + str(g_wr[Idx(2), Idx(0, 1, 2)][0, 0])) n_r = np.sum(g_wr.data, axis=0)[0] mpi.report('n_r=0 ' + str(n_r[0, 0])) mpi.barrier() mpi.report('--> chi0_wnr from g_wr') chi0_wnr = chi0r_from_gr_PH(nw=nw, nnu=nwf, gr=g_wr) #mpi.report('--> chi0_wnr from g_wr (nompi)') #chi0_wnr_nompi = chi0r_from_gr_PH_nompi(nw=nw, nnu=nwf, gr=g_wr) del g_wr #abs_diff = np.abs(chi0_wnr.data - chi0_wnr_nompi.data) #mpi.report('shape = ' + str(abs_diff.shape)) #idx = np.argmax(abs_diff) #mpi.report('argmax = ' + str(idx)) #diff = np.max(abs_diff) #mpi.report('diff = %6.6f' % diff) #del chi0_wnr #chi0_wnr = chi0_wnr_nompi #exit() mpi.barrier() mpi.report('chi0_wnr ' + str(chi0_wnr[Idx(0), Idx(0), Idx(0, 0, 0)][0, 0, 0, 0])) chi0_r0 = np.sum(chi0_wnr[:, :, Idx(0, 0, 0)].data) mpi.report('chi0_r0 ' + str(chi0_r0)) mpi.barrier() mpi.report('--> chi0_wnk from chi0_wnr') chi0_wnk = chi0q_from_chi0r(chi0_wnr) del chi0_wnr mpi.barrier() mpi.report('chi0_wnk ' + str(chi0_wnk[Idx(0), Idx(0), Idx(0, 0, 0)][0, 0, 0, 0])) chi0 = np.sum(chi0_wnk.data) / len(kmesh) mpi.report('chi0 = ' + str(chi0)) mpi.barrier() #if mpi.is_master_node(): if False: from triqs_tprf.ParameterCollection import ParameterCollection p = ParameterCollection() p.g_wk = g_wk p.g_wr = g_wr p.chi0_wnr = chi0_wnr p.chi0_wnk = chi0_wnk print '--> Writing debug info for BSE' with HDFArchive('data_debug_bse.h5', 'w') as arch: arch['p'] = p mpi.barrier() return chi0_wnk
def convert_dmft_input(self): """ Reads the input files, and stores the data in the HDFfile """ if not (mpi.is_master_node()): return # do it only on master: mpi.report("Reading input from %s..."%self.lda_file) # Read and write only on Master!!! # R is a generator : each R.Next() will return the next number in the file R = read_fortran_file(self.lda_file) try: energy_unit = R.next() # read the energy convertion factor n_k = int(R.next()) # read the number of k points k_dep_projection = 1 SP = int(R.next()) # flag for spin-polarised calculation SO = int(R.next()) # flag for spin-orbit calculation charge_below = R.next() # total charge below energy window density_required = R.next() # total density required, for setting the chemical potential symm_op = 1 # Use symmetry groups for the k-sum # the information on the non-correlated shells is not important here, maybe skip: n_shells = int(R.next()) # number of shells (e.g. Fe d, As p, O p) in the unit cell, # corresponds to index R in formulas shells = [ [ int(R.next()) for i in range(4) ] for icrsh in range(n_shells) ] # reads iatom, sort, l, dim #shells = numpy.array(shells) n_corr_shells = int(R.next()) # number of corr. shells (e.g. Fe d, Ce f) in the unit cell, # corresponds to index R in formulas # now read the information about the shells: corr_shells = [ [ int(R.next()) for i in range(6) ] for icrsh in range(n_corr_shells) ] # reads iatom, sort, l, dim, SO flag, irep self.inequiv_shells(corr_shells) # determine the number of inequivalent correlated shells, has to be known for further reading... #corr_shells = numpy.array(corr_shells) use_rotations = 1 rot_mat = [numpy.identity(corr_shells[icrsh][3],numpy.complex_) for icrsh in xrange(n_corr_shells)] # read the matrices rot_mat_time_inv = [0 for i in range(n_corr_shells)] for icrsh in xrange(n_corr_shells): for i in xrange(corr_shells[icrsh][3]): # read real part: for j in xrange(corr_shells[icrsh][3]): rot_mat[icrsh][i,j] = R.next() for i in xrange(corr_shells[icrsh][3]): # read imaginary part: for j in xrange(corr_shells[icrsh][3]): rot_mat[icrsh][i,j] += 1j * R.next() if (SP==1): # read time inversion flag: rot_mat_time_inv[icrsh] = int(R.next()) # Read here the infos for the transformation of the basis: n_reps = [1 for i in range(self.n_inequiv_corr_shells)] dim_reps = [0 for i in range(self.n_inequiv_corr_shells)] T = [] for icrsh in range(self.n_inequiv_corr_shells): n_reps[icrsh] = int(R.next()) # number of representatives ("subsets"), e.g. t2g and eg dim_reps[icrsh] = [int(R.next()) for i in range(n_reps[icrsh])] # dimensions of the subsets # The transformation matrix: # it is of dimension 2l+1, if no SO, and 2*(2l+1) with SO!! #T = [] #for ish in xrange(self.n_inequiv_corr_shells): ll = 2*corr_shells[self.invshellmap[icrsh]][2]+1 lmax = ll * (corr_shells[self.invshellmap[icrsh]][4] + 1) T.append(numpy.zeros([lmax,lmax],numpy.complex_)) # now read it from file: for i in xrange(lmax): for j in xrange(lmax): T[icrsh][i,j] = R.next() for i in xrange(lmax): for j in xrange(lmax): T[icrsh][i,j] += 1j * R.next() # Spin blocks to be read: n_spin_blocs = SP + 1 - SO # read the list of n_orbitals for all k points n_orbitals = numpy.zeros([n_k,n_spin_blocs],numpy.int) #n_orbitals = [ [0 for isp in range(n_spin_blocs)] for ik in xrange(n_k)] for isp in range(n_spin_blocs): for ik in xrange(n_k): #n_orbitals[ik][isp] = int(R.next()) n_orbitals[ik,isp] = int(R.next()) #print n_orbitals # Initialise the projectors: #proj_mat = [ [ [numpy.zeros([corr_shells[icrsh][3], n_orbitals[ik][isp]], numpy.complex_) # for icrsh in range (n_corr_shells)] # for isp in range(n_spin_blocs)] # for ik in range(n_k) ] proj_mat = numpy.zeros([n_k,n_spin_blocs,n_corr_shells,max(numpy.array(corr_shells)[:,3]),max(n_orbitals)],numpy.complex_) # Read the projectors from the file: for ik in xrange(n_k): for icrsh in range(n_corr_shells): no = corr_shells[icrsh][3] # first Real part for BOTH spins, due to conventions in dmftproj: for isp in range(n_spin_blocs): for i in xrange(no): for j in xrange(n_orbitals[ik][isp]): #proj_mat[ik][isp][icrsh][i,j] = R.next() proj_mat[ik,isp,icrsh,i,j] = R.next() # now Imag part: for isp in range(n_spin_blocs): for i in xrange(no): for j in xrange(n_orbitals[ik][isp]): #proj_mat[ik][isp][icrsh][i,j] += 1j * R.next() proj_mat[ik,isp,icrsh,i,j] += 1j * R.next() # now define the arrays for weights and hopping ... bz_weights = numpy.ones([n_k],numpy.float_)/ float(n_k) # w(k_index), default normalisation #hopping = [ [numpy.zeros([n_orbitals[ik][isp],n_orbitals[ik][isp]],numpy.complex_) # for isp in range(n_spin_blocs)] for ik in xrange(n_k) ] hopping = numpy.zeros([n_k,n_spin_blocs,max(n_orbitals),max(n_orbitals)],numpy.complex_) # weights in the file for ik in xrange(n_k) : bz_weights[ik] = R.next() # if the sum over spins is in the weights, take it out again!! sm = sum(bz_weights) bz_weights[:] /= sm # Grab the H # we use now the convention of a DIAGONAL Hamiltonian!!!! for isp in range(n_spin_blocs): for ik in xrange(n_k) : no = n_orbitals[ik][isp] for i in xrange(no): #hopping[ik][isp][i,i] = R.next() * energy_unit hopping[ik,isp,i,i] = R.next() * energy_unit #keep some things that we need for reading parproj: self.n_shells = n_shells self.shells = shells self.n_corr_shells = n_corr_shells self.corr_shells = corr_shells self.n_spin_blocs = n_spin_blocs self.n_orbitals = n_orbitals self.n_k = n_k self.SO = SO self.SP = SP self.energy_unit = energy_unit except StopIteration : # a more explicit error if the file is corrupted. raise "SumkLDA : reading file HMLT_file failed!" R.close() #print proj_mat[0] #----------------------------------------- # Store the input into HDF5: ar = HDFArchive(self.hdf_file,'a') if not (self.lda_subgrp in ar): ar.create_group(self.lda_subgrp) # The subgroup containing the data. If it does not exist, it is created. # If it exists, the data is overwritten!!! ar[self.lda_subgrp]['energy_unit'] = energy_unit ar[self.lda_subgrp]['n_k'] = n_k ar[self.lda_subgrp]['k_dep_projection'] = k_dep_projection ar[self.lda_subgrp]['SP'] = SP ar[self.lda_subgrp]['SO'] = SO ar[self.lda_subgrp]['charge_below'] = charge_below ar[self.lda_subgrp]['density_required'] = density_required ar[self.lda_subgrp]['symm_op'] = symm_op ar[self.lda_subgrp]['n_shells'] = n_shells ar[self.lda_subgrp]['shells'] = shells ar[self.lda_subgrp]['n_corr_shells'] = n_corr_shells ar[self.lda_subgrp]['corr_shells'] = corr_shells ar[self.lda_subgrp]['use_rotations'] = use_rotations ar[self.lda_subgrp]['rot_mat'] = rot_mat ar[self.lda_subgrp]['rot_mat_time_inv'] = rot_mat_time_inv ar[self.lda_subgrp]['n_reps'] = n_reps ar[self.lda_subgrp]['dim_reps'] = dim_reps ar[self.lda_subgrp]['T'] = T ar[self.lda_subgrp]['n_orbitals'] = n_orbitals ar[self.lda_subgrp]['proj_mat'] = proj_mat ar[self.lda_subgrp]['bz_weights'] = bz_weights ar[self.lda_subgrp]['hopping'] = hopping del ar # Symmetries are used, # Now do the symmetries for correlated orbitals: self.read_symmetry_input(orbits=corr_shells,symm_file=self.symm_file,symm_subgrp=self.symm_subgrp,SO=SO,SP=SP)
def convert_parproj_input(self, par_proj_subgrp='SumK_LDA_ParProj', symm_par_subgrp='SymmPar'): """ Reads the input for the partial charges projectors from case.parproj, and stores it in the symm_par_subgrp group in the HDF5. """ if not (mpi.is_master_node()): return self.par_proj_subgrp = par_proj_subgrp self.symm_par_subgrp = symm_par_subgrp mpi.report("Reading parproj input from %s..." % self.parproj_file) dens_mat_below = [[ numpy.zeros([self.shells[ish][3], self.shells[ish][3]], numpy.complex_) for ish in range(self.n_shells) ] for isp in range(self.n_spin_blocs)] R = read_fortran_file(self.parproj_file) #try: n_parproj = [int(R.next()) for i in range(self.n_shells)] n_parproj = numpy.array(n_parproj) # Initialise P, here a double list of matrices: #proj_mat_pc = [ [ [ [numpy.zeros([self.shells[ish][3], self.n_orbitals[ik][isp]], numpy.complex_) # for ir in range(n_parproj[ish])] # for ish in range (self.n_shells) ] # for isp in range(self.n_spin_blocs) ] # for ik in range(self.n_k) ] proj_mat_pc = numpy.zeros([ self.n_k, self.n_spin_blocs, self.n_shells, max(n_parproj), max(numpy.array(self.shells)[:, 3]), max(self.n_orbitals) ], numpy.complex_) rot_mat_all = [ numpy.identity(self.shells[ish][3], numpy.complex_) for ish in xrange(self.n_shells) ] rot_mat_all_time_inv = [0 for i in range(self.n_shells)] for ish in range(self.n_shells): #print ish # read first the projectors for this orbital: for ik in xrange(self.n_k): for ir in range(n_parproj[ish]): for isp in range(self.n_spin_blocs): for i in xrange( self.shells[ish][3]): # read real part: for j in xrange(self.n_orbitals[ik][isp]): proj_mat_pc[ik, isp, ish, ir, i, j] = R.next() for isp in range(self.n_spin_blocs): for i in xrange( self.shells[ish][3]): # read imaginary part: for j in xrange(self.n_orbitals[ik][isp]): proj_mat_pc[ik, isp, ish, ir, i, j] += 1j * R.next() # now read the Density Matrix for this orbital below the energy window: for isp in range(self.n_spin_blocs): for i in xrange(self.shells[ish][3]): # read real part: for j in xrange(self.shells[ish][3]): dens_mat_below[isp][ish][i, j] = R.next() for isp in range(self.n_spin_blocs): for i in xrange(self.shells[ish][3]): # read imaginary part: for j in xrange(self.shells[ish][3]): dens_mat_below[isp][ish][i, j] += 1j * R.next() if (self.SP == 0): dens_mat_below[isp][ish] /= 2.0 # Global -> local rotation matrix for this shell: for i in xrange(self.shells[ish][3]): # read real part: for j in xrange(self.shells[ish][3]): rot_mat_all[ish][i, j] = R.next() for i in xrange(self.shells[ish][3]): # read imaginary part: for j in xrange(self.shells[ish][3]): rot_mat_all[ish][i, j] += 1j * R.next() #print Dens_Mat_below[0][ish],Dens_Mat_below[1][ish] if (self.SP): rot_mat_all_time_inv[ish] = int(R.next()) #except StopIteration : # a more explicit error if the file is corrupted. # raise "Wien2kConverter: reading file for Projectors failed!" R.close() #----------------------------------------- # Store the input into HDF5: ar = HDFArchive(self.hdf_file, 'a') if not (self.par_proj_subgrp in ar): ar.create_group(self.par_proj_subgrp) # The subgroup containing the data. If it does not exist, it is created. # If it exists, the data is overwritten!!! thingstowrite = [ 'dens_mat_below', 'n_parproj', 'proj_mat_pc', 'rot_mat_all', 'rot_mat_all_time_inv' ] for it in thingstowrite: exec "ar['%s']['%s'] = %s" % (self.par_proj_subgrp, it, it) del ar # Symmetries are used, # Now do the symmetries for all orbitals: self.read_symmetry_input(orbits=self.shells, symm_file=self.symmpar_file, symm_subgrp=self.symm_par_subgrp, SO=self.SO, SP=self.SP)
L = TBLattice ( units = [(1, 0, 0) , (0, 1, 0) ], hopping = hop) SL = TBSuperLattice(tb_lattice =L, super_lattice_units = [ (2, 0), (0, 2) ]) # SumK function that will perform the sum over the BZ SK = SumkDiscreteFromLattice (lattice = SL, n_points = 8, method = "Riemann") # Defines G and Sigma with a block structure compatible with the SumK function G= BlockGf( name_block_generator = [ (s, GfImFreq(indices = SK.GFBlocIndices, mesh = S.G.mesh)) for s in ['up', 'down'] ], make_copies = False) Sigma = G.copy() # Init Sigma for n, B in S.Sigma : B <<= 2.0 S.symm_to_real(gf_in = S.Sigma, gf_out = Sigma) # Embedding # Computes sum over BZ and returns density dens = (SK(mu = Chemical_potential, Sigma = Sigma, field = None , result = G).total_density()/4) mpi.report("Total density = %.3f"%dens) S.real_to_symm(gf_in = G, gf_out = S.G) # Extraction S.G0 = inverse(S.Sigma + inverse(S.G)) # Finally get S.G0 # Solve the impurity problem S.solve(n_cycles = 3000, n_warmup_cycles = 0, length_cycle = 10, n_legendre = 30) # Opens the results shelve if mpi.is_master_node(): Results = HDFArchive("cdmft_4_sites.output.h5", 'w') Results["G"] = S.G Results["Gl"] = S.G_legendre
def read_symmetry_input(self, orbits, symm_file, symm_subgrp, SO, SP): """ Reads input for the symmetrisations from symm_file, which is case.sympar or case.symqmc. """ if not (mpi.is_master_node()): return mpi.report("Reading symmetry input from %s..." % symm_file) n_orbits = len(orbits) R = read_fortran_file(symm_file) try: n_s = int(R.next()) # Number of symmetry operations n_atoms = int(R.next()) # number of atoms involved perm = [[int(R.next()) for i in xrange(n_atoms)] for j in xrange(n_s)] # list of permutations of the atoms if SP: time_inv = [int(R.next()) for j in xrange(n_s) ] # timeinversion for SO xoupling else: time_inv = [0 for j in xrange(n_s)] # Now read matrices: mat = [] for in_s in xrange(n_s): mat.append([ numpy.zeros([orbits[orb][3], orbits[orb][3]], numpy.complex_) for orb in xrange(n_orbits) ]) for orb in range(n_orbits): for i in xrange(orbits[orb][3]): for j in xrange(orbits[orb][3]): mat[in_s][orb][i, j] = R.next() # real part for i in xrange(orbits[orb][3]): for j in xrange(orbits[orb][3]): mat[in_s][orb][ i, j] += 1j * R.next() # imaginary part # determine the inequivalent shells: #SHOULD BE FINALLY REMOVED, PUT IT FOR ALL ORBITALS!!!!! #self.inequiv_shells(orbits) mat_tinv = [ numpy.identity(orbits[orb][3], numpy.complex_) for orb in range(n_orbits) ] if ((SO == 0) and (SP == 0)): # here we need an additional time inversion operation, so read it: for orb in range(n_orbits): for i in xrange(orbits[orb][3]): for j in xrange(orbits[orb][3]): mat_tinv[orb][i, j] = R.next() # real part for i in xrange(orbits[orb][3]): for j in xrange(orbits[orb][3]): mat_tinv[orb][i, j] += 1j * R.next() # imaginary part except StopIteration: # a more explicit error if the file is corrupted. raise "Symmetry : reading file failed!" R.close() # Save it to the HDF: ar = HDFArchive(self.hdf_file, 'a') if not (symm_subgrp in ar): ar.create_group(symm_subgrp) thingstowrite = [ 'n_s', 'n_atoms', 'perm', 'orbits', 'SO', 'SP', 'time_inv', 'mat', 'mat_tinv' ] for it in thingstowrite: exec "ar['%s']['%s'] = %s" % (symm_subgrp, it, it) del ar
chemical_potential,dc_imp,dc_energ = SK.load(['chemical_potential','dc_imp','dc_energ']) S.Sigma_iw << mpi.bcast(S.Sigma_iw) chemical_potential = mpi.bcast(chemical_potential) dc_imp = mpi.bcast(dc_imp) dc_energ = mpi.bcast(dc_energ) SK.set_mu(chemical_potential) SK.set_dc(dc_imp,dc_energ) for iteration_number in range(1,loops+1): if mpi.is_master_node(): print "Iteration = ", iteration_number SK.symm_deg_gf(S.Sigma_iw,orb=0) # symmetrise Sigma SK.set_Sigma([ S.Sigma_iw ]) # set Sigma into the SumK class chemical_potential = SK.calc_mu( precision = prec_mu ) # find the chemical potential for given density S.G_iw << SK.extract_G_loc()[0] # calc the local Green function mpi.report("Total charge of Gloc : %.6f"%S.G_iw.total_density()) # Init the DC term and the real part of Sigma, if no previous runs found: if (iteration_number==1 and previous_present==False): dm = S.G_iw.density() SK.calc_dc(dm, U_interact = U, J_hund = J, orb = 0, use_dc_formula = dc_type) S.Sigma_iw << SK.dc_imp[0]['up'][0,0] # Calculate new G0_iw to input into the solver: if mpi.is_master_node(): # We can do a mixing of Delta in order to stabilize the DMFT iterations: S.G0_iw << S.Sigma_iw + inverse(S.G_iw) ar = HDFArchive(dft_filename+'.h5','a') if (iteration_number>1 or previous_present): mpi.report("Mixing input Delta with factor %s"%delta_mix) Delta = (delta_mix * delta(S.G0_iw)) + (1.0-delta_mix) * ar['dmft_output']['Delta_iw']
def convert_dmft_input(self): """ Reads the input files, and stores the data in the HDFfile """ if not (mpi.is_master_node()): return # do it only on master: mpi.report("Reading input from %s..." % self.lda_file) # Read and write only on Master!!! # R is a generator : each R.Next() will return the next number in the file R = read_fortran_file(self.lda_file) try: energy_unit = R.next() # read the energy convertion factor n_k = int(R.next()) # read the number of k points k_dep_projection = 1 SP = int(R.next()) # flag for spin-polarised calculation SO = int(R.next()) # flag for spin-orbit calculation charge_below = R.next() # total charge below energy window density_required = R.next( ) # total density required, for setting the chemical potential symm_op = 1 # Use symmetry groups for the k-sum # the information on the non-correlated shells is not important here, maybe skip: n_shells = int(R.next( )) # number of shells (e.g. Fe d, As p, O p) in the unit cell, # corresponds to index R in formulas shells = [[int(R.next()) for i in range(4)] for icrsh in range(n_shells) ] # reads iatom, sort, l, dim #shells = numpy.array(shells) n_corr_shells = int(R.next( )) # number of corr. shells (e.g. Fe d, Ce f) in the unit cell, # corresponds to index R in formulas # now read the information about the shells: corr_shells = [[int(R.next()) for i in range(6)] for icrsh in range(n_corr_shells) ] # reads iatom, sort, l, dim, SO flag, irep self.inequiv_shells( corr_shells ) # determine the number of inequivalent correlated shells, has to be known for further reading... #corr_shells = numpy.array(corr_shells) use_rotations = 1 rot_mat = [ numpy.identity(corr_shells[icrsh][3], numpy.complex_) for icrsh in xrange(n_corr_shells) ] # read the matrices rot_mat_time_inv = [0 for i in range(n_corr_shells)] for icrsh in xrange(n_corr_shells): for i in xrange(corr_shells[icrsh][3]): # read real part: for j in xrange(corr_shells[icrsh][3]): rot_mat[icrsh][i, j] = R.next() for i in xrange(corr_shells[icrsh][3]): # read imaginary part: for j in xrange(corr_shells[icrsh][3]): rot_mat[icrsh][i, j] += 1j * R.next() if (SP == 1): # read time inversion flag: rot_mat_time_inv[icrsh] = int(R.next()) # Read here the infos for the transformation of the basis: n_reps = [1 for i in range(self.n_inequiv_corr_shells)] dim_reps = [0 for i in range(self.n_inequiv_corr_shells)] T = [] for icrsh in range(self.n_inequiv_corr_shells): n_reps[icrsh] = int(R.next( )) # number of representatives ("subsets"), e.g. t2g and eg dim_reps[icrsh] = [ int(R.next()) for i in range(n_reps[icrsh]) ] # dimensions of the subsets # The transformation matrix: # it is of dimension 2l+1, if no SO, and 2*(2l+1) with SO!! #T = [] #for ish in xrange(self.n_inequiv_corr_shells): ll = 2 * corr_shells[self.invshellmap[icrsh]][2] + 1 lmax = ll * (corr_shells[self.invshellmap[icrsh]][4] + 1) T.append(numpy.zeros([lmax, lmax], numpy.complex_)) # now read it from file: for i in xrange(lmax): for j in xrange(lmax): T[icrsh][i, j] = R.next() for i in xrange(lmax): for j in xrange(lmax): T[icrsh][i, j] += 1j * R.next() # Spin blocks to be read: n_spin_blocs = SP + 1 - SO # read the list of n_orbitals for all k points n_orbitals = numpy.zeros([n_k, n_spin_blocs], numpy.int) #n_orbitals = [ [0 for isp in range(n_spin_blocs)] for ik in xrange(n_k)] for isp in range(n_spin_blocs): for ik in xrange(n_k): #n_orbitals[ik][isp] = int(R.next()) n_orbitals[ik, isp] = int(R.next()) #print n_orbitals # Initialise the projectors: #proj_mat = [ [ [numpy.zeros([corr_shells[icrsh][3], n_orbitals[ik][isp]], numpy.complex_) # for icrsh in range (n_corr_shells)] # for isp in range(n_spin_blocs)] # for ik in range(n_k) ] proj_mat = numpy.zeros([ n_k, n_spin_blocs, n_corr_shells, max(numpy.array(corr_shells)[:, 3]), max(n_orbitals) ], numpy.complex_) # Read the projectors from the file: for ik in xrange(n_k): for icrsh in range(n_corr_shells): no = corr_shells[icrsh][3] # first Real part for BOTH spins, due to conventions in dmftproj: for isp in range(n_spin_blocs): for i in xrange(no): for j in xrange(n_orbitals[ik][isp]): #proj_mat[ik][isp][icrsh][i,j] = R.next() proj_mat[ik, isp, icrsh, i, j] = R.next() # now Imag part: for isp in range(n_spin_blocs): for i in xrange(no): for j in xrange(n_orbitals[ik][isp]): #proj_mat[ik][isp][icrsh][i,j] += 1j * R.next() proj_mat[ik, isp, icrsh, i, j] += 1j * R.next() # now define the arrays for weights and hopping ... bz_weights = numpy.ones([n_k], numpy.float_) / float( n_k) # w(k_index), default normalisation #hopping = [ [numpy.zeros([n_orbitals[ik][isp],n_orbitals[ik][isp]],numpy.complex_) # for isp in range(n_spin_blocs)] for ik in xrange(n_k) ] hopping = numpy.zeros( [n_k, n_spin_blocs, max(n_orbitals), max(n_orbitals)], numpy.complex_) # weights in the file for ik in xrange(n_k): bz_weights[ik] = R.next() # if the sum over spins is in the weights, take it out again!! sm = sum(bz_weights) bz_weights[:] /= sm # Grab the H # we use now the convention of a DIAGONAL Hamiltonian!!!! for isp in range(n_spin_blocs): for ik in xrange(n_k): no = n_orbitals[ik][isp] for i in xrange(no): #hopping[ik][isp][i,i] = R.next() * energy_unit hopping[ik, isp, i, i] = R.next() * energy_unit #keep some things that we need for reading parproj: self.n_shells = n_shells self.shells = shells self.n_corr_shells = n_corr_shells self.corr_shells = corr_shells self.n_spin_blocs = n_spin_blocs self.n_orbitals = n_orbitals self.n_k = n_k self.SO = SO self.SP = SP self.energy_unit = energy_unit except StopIteration: # a more explicit error if the file is corrupted. raise "SumkLDA : reading file HMLT_file failed!" R.close() #print proj_mat[0] #----------------------------------------- # Store the input into HDF5: ar = HDFArchive(self.hdf_file, 'a') if not (self.lda_subgrp in ar): ar.create_group(self.lda_subgrp) # The subgroup containing the data. If it does not exist, it is created. # If it exists, the data is overwritten!!! ar[self.lda_subgrp]['energy_unit'] = energy_unit ar[self.lda_subgrp]['n_k'] = n_k ar[self.lda_subgrp]['k_dep_projection'] = k_dep_projection ar[self.lda_subgrp]['SP'] = SP ar[self.lda_subgrp]['SO'] = SO ar[self.lda_subgrp]['charge_below'] = charge_below ar[self.lda_subgrp]['density_required'] = density_required ar[self.lda_subgrp]['symm_op'] = symm_op ar[self.lda_subgrp]['n_shells'] = n_shells ar[self.lda_subgrp]['shells'] = shells ar[self.lda_subgrp]['n_corr_shells'] = n_corr_shells ar[self.lda_subgrp]['corr_shells'] = corr_shells ar[self.lda_subgrp]['use_rotations'] = use_rotations ar[self.lda_subgrp]['rot_mat'] = rot_mat ar[self.lda_subgrp]['rot_mat_time_inv'] = rot_mat_time_inv ar[self.lda_subgrp]['n_reps'] = n_reps ar[self.lda_subgrp]['dim_reps'] = dim_reps ar[self.lda_subgrp]['T'] = T ar[self.lda_subgrp]['n_orbitals'] = n_orbitals ar[self.lda_subgrp]['proj_mat'] = proj_mat ar[self.lda_subgrp]['bz_weights'] = bz_weights ar[self.lda_subgrp]['hopping'] = hopping del ar # Symmetries are used, # Now do the symmetries for correlated orbitals: self.read_symmetry_input(orbits=corr_shells, symm_file=self.symm_file, symm_subgrp=self.symm_subgrp, SO=SO, SP=SP)
def solve(self, U_int, J_hund, T=None, verbosity=0, Iteration_Number=1, Test_Convergence=0.0001): """Calculation of the impurity Greens function using Hubbard-I""" if self.Converged : mpi.report("Solver %(name)s has already converged: SKIPPING"%self.__dict__) return if mpi.is_master_node(): self.verbosity = verbosity else: self.verbosity = 0 #self.Nmoments = 5 ur,ujmn,umn=self.__set_umatrix(U=U_int,J=J_hund,T=T) M = [x for x in self.G.mesh] self.zmsb = numpy.array([x for x in M],numpy.complex_) # # for the tails: # tailtempl={} # for sig,g in self.G: # tailtempl[sig] = copy.deepcopy(g.tail) # for i in range(9): tailtempl[sig][i] *= 0.0 # self.__save_eal('eal.dat',Iteration_Number) # mpi.report( "Starting Fortran solver %(name)s"%self.__dict__) self.Sigma_Old <<= self.Sigma self.G_Old <<= self.G # # call the fortran solver: # temp = 1.0/self.beta # gf,tail,self.atocc,self.atmag = gf_hi_fullu(e0f=self.ealmat, ur=ur, umn=umn, ujmn=ujmn, # zmsb=self.zmsb, nmom=self.Nmoments, ns=self.Nspin, temp=temp, verbosity = self.verbosity) #self.sig = sigma_atomic_fullu(gf=self.gf,e0f=self.eal,zmsb=self.zmsb,ns=self.Nspin,nlm=self.Nlm) def print_matrix(m): for row in m: print ''.join(map("{0:12.7f}".format, row)) # Hartree-Fock solver self.Sigma.zero() dm = self.G.density() if mpi.is_master_node(): # print # print " Reduced U-matrix:" # print " U:" # print_matrix(ujmn) # print " Up:" # print_matrix(umn) # ## sig_test = {bl: numpy.zeros((self.Nlm, self.Nlm)) for bl in dm} # sig_test = {} # sig_test['up'] = numpy.dot(umn, dm['up'].real) + numpy.dot(ujmn, dm['down'].real) # sig_test['down'] = numpy.dot(umn, dm['down'].real) + numpy.dot(ujmn, dm['up'].real) # print " Sigma test:" # print_matrix(sig_test['up']) print print " Density matrix (up):" print_matrix(dm['up']) print print " Density matrix (down):" print_matrix(dm['down']) if self.dudarev: Ueff = U_int - J_hund corr_energy = 0.0 dft_dc = 0.0 for bl1 in dm: # (U - J) * (1/2 - n) self.Sigma[bl1] << Ueff * (0.5 * numpy.identity(self.Nlm) - dm[bl1]) # 1/2 (U - J) * \sum_{\sig} [\sum_{m} n_{m,m \sig} - \sum_{m1,m2} n_{m1,m2 \sig} n_{m2,m1 \sig}] corr_energy += 0.5 * Ueff * (dm[bl1].trace() - (dm[bl1]*dm[bl1].conj()).sum()).real # V[n] * n^{\dagger} dft_dc += (self.Sigma[bl1](0) * dm[bl1].conj()).sum().real else: # !!!!! # !!!!! Mind the order of indices in the 4-index matrix! # !!!!! for il1 in xrange(self.Nlm): for il2 in xrange(self.Nlm): for il3 in xrange(self.Nlm): for il4 in xrange(self.Nlm): for bl1 in dm: for bl2 in dm: self.Sigma[bl1][il1, il2] += ur[il1, il3, il2, il4] * dm[bl2][il3, il4] if bl1 == bl2: self.Sigma[bl1][il1, il2] -= ur[il1, il3, il4, il2] * dm[bl1][il3, il4] if mpi.is_master_node() and self.verbosity > 0: print print " Sigma (up):" print_matrix(self.Sigma['up'](0).real) print print " Sigma (down):" print_matrix(self.Sigma['down'](0).real) # if (self.verbosity==0): # # No fortran output, so give basic results here # mpi.report("Atomic occupancy in Hubbard I Solver : %s"%self.atocc) # mpi.report("Atomic magn. mom. in Hubbard I Solver : %s"%self.atmag) # transfer the data to the GF class: if (self.UseSpinOrbit): nlmtot = self.Nlm*2 # only one block in this case! else: nlmtot = self.Nlm # M={} # isp=-1 # for a,al in self.gf_struct: # isp+=1 # M[a] = numpy.array(gf[isp*nlmtot:(isp+1)*nlmtot,isp*nlmtot:(isp+1)*nlmtot,:]).transpose(2,0,1).copy() # for i in range(min(self.Nmoments,8)): # tailtempl[a][i+1] = tail[i][isp*nlmtot:(isp+1)*nlmtot,isp*nlmtot:(isp+1)*nlmtot] # # #glist = lambda : [ GfImFreq(indices = al, beta = self.beta, n_points = self.Nmsb, data =M[a], tail =self.tailtempl[a]) # # for a,al in self.gf_struct] # glist = lambda : [ GfImFreq(indices = al, beta = self.beta, n_points = self.Nmsb) for a,al in self.gf_struct] # self.G = BlockGf(name_list = self.a_list, block_list = glist(),make_copies=False) # # self.__copy_Gf(self.G,M,tailtempl) # # # Self energy: # self.G0 <<= iOmega_n # # M = [ self.ealmat[isp*nlmtot:(isp+1)*nlmtot,isp*nlmtot:(isp+1)*nlmtot] for isp in range((2*self.Nlm)/nlmtot) ] # self.G0 -= M # self.Sigma <<= self.G0 - inverse(self.G) # # # invert G0 # self.G0.invert() def test_distance(G1,G2, dist) : def f(G1,G2) : #print abs(G1.data - G2.data) dS = max(abs(G1.data - G2.data).flatten()) aS = max(abs(G1.data).flatten()) if mpi.is_master_node(): print " Distances:", dS, " vs ", aS * dist return dS <= aS*dist return reduce(lambda x,y : x and y, [f(g1,g2) for (i1,g1),(i2,g2) in izip(G1,G2)]) mpi.report("\nChecking Sigma for convergence...\nUsing tolerance %s"%Test_Convergence) self.Converged = test_distance(self.Sigma,self.Sigma_Old,Test_Convergence) if self.Converged : mpi.report("Solver HAS CONVERGED") else : mpi.report("Solver has not yet converged") return corr_energy, dft_dc
def five_plus_five(use_interaction=True): results_file_name = "5_plus_5." + ("int." if use_interaction else "") + "h5" # Block structure of GF L = 2 # d-orbital spin_names = ("up", "dn") orb_names = cubic_names(L) # Input parameters beta = 40. mu = 26 U = 4.0 J = 0.7 F0 = U F2 = J * (14.0 / (1.0 + 0.63)) F4 = F2 * 0.63 # Dump the local Hamiltonian to a text file (set to None to disable dumping) H_dump = "H.txt" # Dump Delta parameters to a text file (set to None to disable dumping) Delta_dump = "Delta_params.txt" # Hybridization function parameters # Delta(\tau) is diagonal in the basis of cubic harmonics # Each component of Delta(\tau) is represented as a list of single-particle # terms parametrized by pairs (V_k,\epsilon_k). delta_params = { "xy": { 'V': 0.2, 'e': -0.2 }, "yz": { 'V': 0.2, 'e': -0.15 }, "z^2": { 'V': 0.2, 'e': -0.1 }, "xz": { 'V': 0.2, 'e': 0.05 }, "x^2-y^2": { 'V': 0.2, 'e': 0.4 } } atomic_levels = { ('up_xy', 0): -0.2, ('dn_xy', 0): -0.2, ('up_yz', 0): -0.15, ('dn_yz', 0): -0.15, ('up_z^2', 0): -0.1, ('dn_z^2', 0): -0.1, ('up_xz', 0): 0.05, ('dn_xz', 0): 0.05, ('up_x^2-y^2', 0): 0.4, ('dn_x^2-y^2', 0): 0.4 } n_iw = 1025 n_tau = 10001 p = {} p["max_time"] = -1 p["random_name"] = "" p["random_seed"] = 123 * mpi.rank + 567 p["length_cycle"] = 50 #p["n_warmup_cycles"] = 5000 p["n_warmup_cycles"] = 500 p["n_cycles"] = int(1.e1 / mpi.size) #p["n_cycles"] = int(5.e5 / mpi.size) #p["n_cycles"] = int(5.e6 / mpi.size) p["partition_method"] = "autopartition" p["measure_G_tau"] = True p["move_shift"] = True p["move_double"] = True p["measure_pert_order"] = False p["performance_analysis"] = False p["use_trace_estimator"] = False mpi.report("Welcome to 5+5 (5 orbitals + 5 bath sites) test.") gf_struct = set_operator_structure(spin_names, orb_names, False) mkind = get_mkind(False, None) H = Operator() if use_interaction: # Local Hamiltonian U_mat = U_matrix(L, [F0, F2, F4], basis='cubic') H += h_int_slater(spin_names, orb_names, U_mat, False, H_dump=H_dump) else: mu = 0. p["h_int"] = H # Quantum numbers (N_up and N_down) QN = [Operator(), Operator()] for cn in orb_names: for i, sn in enumerate(spin_names): QN[i] += n(*mkind(sn, cn)) if p["partition_method"] == "quantum_numbers": p["quantum_numbers"] = QN mpi.report("Constructing the solver...") # Construct the solver S = SolverCore(beta=beta, gf_struct=gf_struct, n_tau=n_tau, n_iw=n_iw) mpi.report("Preparing the hybridization function...") H_hyb = Operator() # Set hybridization function if Delta_dump: Delta_dump_file = open(Delta_dump, 'w') for sn, cn in product(spin_names, orb_names): bn, i = mkind(sn, cn) V = delta_params[cn]['V'] e = delta_params[cn]['e'] delta_w = Gf(mesh=MeshImFreq(beta, 'Fermion', n_iw), target_shape=[]) delta_w << (V**2) * inverse(iOmega_n - e) S.G0_iw[bn][i, i] << inverse(iOmega_n + mu - atomic_levels[(bn, i)] - delta_w) cnb = cn + '_b' # bath level a = sn + '_' + cn b = sn + '_' + cn + '_b' H_hyb += ( atomic_levels[(bn,i)] - mu ) * n(a, 0) + \ n(b,0) * e + V * ( c(a,0) * c_dag(b,0) + c(b,0) * c_dag(a,0) ) # Dump Delta parameters if Delta_dump: Delta_dump_file.write(bn + '\t') Delta_dump_file.write(str(V) + '\t') Delta_dump_file.write(str(e) + '\n') if mpi.is_master_node(): filename_ham = 'data_Ham%s.h5' % ('_int' if use_interaction else '') with HDFArchive(filename_ham, 'w') as arch: arch['H'] = H_hyb + H arch['gf_struct'] = gf_struct arch['beta'] = beta mpi.report("Running the simulation...") # Solve the problem S.solve(**p) # Save the results if mpi.is_master_node(): Results = HDFArchive(results_file_name, 'w') Results['G_tau'] = S.G_tau Results['G0_iw'] = S.G0_iw Results['use_interaction'] = use_interaction Results['delta_params'] = delta_params Results['spin_names'] = spin_names Results['orb_names'] = orb_names import __main__ Results.create_group("log") log = Results["log"] log["version"] = version.version log["triqs_hash"] = version.triqs_hash log["cthyb_hash"] = version.cthyb_hash log["script"] = inspect.getsource(__main__)
n_iw = 1025 n_tau = 10001 p = {} p["max_time"] = -1 p["random_name"] = "" p["random_seed"] = 123 * mpi.rank + 567 p["length_cycle"] = 50 p["n_warmup_cycles"] = 20000 p["n_cycles"] = 1000000 results_file_name = "spinless" if use_qn: results_file_name += ".qn" results_file_name += ".h5" mpi.report( "Welcome to spinless (spinless electrons on a correlated dimer) test.") H = U * n("tot", "A") * n("tot", "B") QN = [] if use_qn: QN.append(n("tot", "A") + n("tot", "B")) p["partition_method"] = "quantum_numbers" p["quantum_numbers"] = QN gf_struct = {"tot": ["A", "B"]} mpi.report("Constructing the solver...") ## Construct the solver S = SolverCore(beta=beta, gf_struct=gf_struct, n_iw=n_iw, n_tau=n_tau)
def get_chi0_wnk(g_wk, nw=1, nwf=None): r""" Compute the generalized lattice bubble susceptibility :math:`\chi^{(0)}_{abcd}(\omega, \nu, \mathbf{k})` from the single-particle Green's function :math:`G_{ab}(\omega, \mathbf{k})`. Parameters ---------- g_wk : Single-particle Green's function :math:`G_{ab}(\omega, \mathbf{k})`. nw : Number of bosonic freqiencies in :math:`\chi`. nwf : Number of fermionic freqiencies in :math:`\chi`. Returns ------- chi0_wnk : Generalized lattice bubble susceptibility :math:`\chi^{(0)}_{abcd}(\omega, \nu, \mathbf{k})` """ fmesh = g_wk.mesh.components[0] kmesh = g_wk.mesh.components[1] if nwf is None: nwf = len(fmesh) / 2 mpi.barrier() mpi.report('g_wk ' + str(g_wk[Idx(2), Idx(0, 0, 0)][0, 0])) n = np.sum(g_wk.data) / len(kmesh) mpi.report('n ' + str(n)) mpi.barrier() mpi.report('--> g_wr from g_wk') g_wr = fourier_wk_to_wr(g_wk) mpi.barrier() mpi.report('g_wr ' + str(g_wr[Idx(2), Idx(0, 0, 0)][0, 0])) n_r = np.sum(g_wr.data, axis=0)[0] mpi.report('n_r=0 ' + str(n_r[0, 0])) mpi.barrier() mpi.report('--> chi0_wnr from g_wr') chi0_wnr = chi0r_from_gr_PH(nw=nw, nn=nwf, g_nr=g_wr) #mpi.report('--> chi0_wnr from g_wr (nompi)') #chi0_wnr_nompi = chi0r_from_gr_PH_nompi(nw=nw, nn=nwf, g_wr=g_wr) del g_wr #abs_diff = np.abs(chi0_wnr.data - chi0_wnr_nompi.data) #mpi.report('shape = ' + str(abs_diff.shape)) #idx = np.argmax(abs_diff) #mpi.report('argmax = ' + str(idx)) #diff = np.max(abs_diff) #mpi.report('diff = %6.6f' % diff) #del chi0_wnr #chi0_wnr = chi0_wnr_nompi #exit() mpi.barrier() mpi.report('chi0_wnr ' + str(chi0_wnr[Idx(0), Idx(0), Idx(0, 0, 0)][0, 0, 0, 0])) chi0_r0 = np.sum(chi0_wnr[:, :, Idx(0, 0, 0)].data) mpi.report('chi0_r0 ' + str(chi0_r0)) mpi.barrier() mpi.report('--> chi0_wnk from chi0_wnr') chi0_wnk = chi0q_from_chi0r(chi0_wnr) del chi0_wnr mpi.barrier() mpi.report('chi0_wnk ' + str(chi0_wnk[Idx(0), Idx(0), Idx(0, 0, 0)][0, 0, 0, 0])) chi0 = np.sum(chi0_wnk.data) / len(kmesh) mpi.report('chi0 = ' + str(chi0)) mpi.barrier() #if mpi.is_master_node(): if False: from triqs_tprf.ParameterCollection import ParameterCollection p = ParameterCollection() p.g_wk = g_wk p.g_wr = g_wr p.chi0_wnr = chi0_wnr p.chi0_wnk = chi0_wnk print '--> Writing debug info for BSE' with HDFArchive('data_debug_bse.h5', 'w') as arch: arch['p'] = p mpi.barrier() return chi0_wnk
#!/bin/env pytriqs import pytriqs.utility.mpi as mpi from pytriqs.gf import GfImFreq, iOmega_n, inverse from pytriqs.operators import n from pytriqs.archive import HDFArchive from triqs_cthyb import SolverCore mpi.report( "Welcome to asymm_bath test (1 band with a small asymmetric hybridization function)." ) mpi.report("This test helps to detect sampling problems.") # H_loc parameters beta = 40.0 ed = -1.0 U = 2.0 epsilon = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5] V = 0.2 # Parameters n_iw = 1025 n_tau = 10001 p = {} p["max_time"] = -1 p["random_name"] = "" p["random_seed"] = 123 * mpi.rank + 567 p["length_cycle"] = 50 p["n_warmup_cycles"] = 20000
def anderson(use_qn=True, use_blocks=True): spin_names = ("up","dn") mkind = lambda spin: (spin,0) if use_blocks else ("tot",spin) # Input parameters beta = 10.0 U = 2.0 mu = 1.0 h = 0.1 V = 0.5 epsilon = 2.3 n_iw = 1025 n_tau = 10001 p = {} p["max_time"] = -1 p["random_name"] = "" p["random_seed"] = 123 * mpi.rank + 567 p["length_cycle"] = 50 p["n_warmup_cycles"] = 50000 p["n_cycles"] = 5000000 p["measure_density_matrix"] = True p["use_norm_as_weight"] = True results_file_name = "anderson" if use_blocks: results_file_name += ".block" if use_qn: results_file_name += ".qn" results_file_name += ".h5" mpi.report("Welcome to Anderson (1 correlated site + symmetric bath) test.") H = U*n(*mkind("up"))*n(*mkind("dn")) QN = [] if use_qn: for spin in spin_names: QN.append(n(*mkind(spin))) p["quantum_numbers"] = QN p["partition_method"] = "quantum_numbers" gf_struct = {} for spin in spin_names: bn, i = mkind(spin) gf_struct.setdefault(bn,[]).append(i) gf_struct = [ [key, value] for key, value in gf_struct.items() ] # convert from dict to list of lists mpi.report("Constructing the solver...") # Construct the solver S = SolverCore(beta=beta, gf_struct=gf_struct, n_tau=n_tau, n_iw=n_iw) mpi.report("Preparing the hybridization function...") # Set hybridization function delta_w = GfImFreq(indices = [0], beta=beta) delta_w << (V**2) * inverse(iOmega_n - epsilon) + (V**2) * inverse(iOmega_n + epsilon) for spin in spin_names: bn, i = mkind(spin) S.G0_iw[bn][i,i] << inverse(iOmega_n + mu - {'up':h,'dn':-h}[spin] - delta_w) mpi.report("Running the simulation...") # Solve the problem S.solve(h_int=H, **p) # Save the results if mpi.is_master_node(): static_observables = {'Nup' : n(*mkind("up")), 'Ndn' : n(*mkind("dn")), 'unity' : Operator(1.0)} dm = S.density_matrix for oname in static_observables.keys(): print oname, trace_rho_op(dm,static_observables[oname],S.h_loc_diagonalization) with HDFArchive(results_file_name,'w') as Results: Results['G_tau'] = S.G_tau
#!/bin/env pytriqs import pytriqs.utility.mpi as mpi from pytriqs.gf.local import * from pytriqs.operators import * from pytriqs.archive import HDFArchive from pytriqs.applications.impurity_solvers.cthyb import * mpi.report("Welcome to asymm_bath test (1 band with a small asymmetric hybridization function).") mpi.report("This test helps to detect sampling problems.") # H_loc parameters beta = 40.0 ed = -1.0 U = 2.0 epsilon = [0.0,0.1,0.2,0.3,0.4,0.5] V = 0.2 # Parameters n_iw = 1025 n_tau = 10001 p = {} p["max_time"] = -1 p["random_name"] = "" p["random_seed"] = 123 * mpi.rank + 567 p["length_cycle"] = 50 p["n_warmup_cycles"] = 20000 p["n_cycles"] = 1000000 p["performance_analysis"] = True
for use_qn in (True, False): n_iw = 1025 n_tau = 10001 p = {} p["max_time"] = -1 p["random_name"] = "" p["random_seed"] = 123 * mpi.rank + 567 p["length_cycle"] = 50 p["n_warmup_cycles"] = 50000 p["n_cycles"] = 3000000 results_file_name = "kanamori" + (".qn" if use_qn else "") + ".h5" mpi.report("Welcome to Kanamori benchmark.") gf_struct = set_operator_structure(spin_names,orb_names,False) mkind = get_mkind(False,None) ## Hamiltonian H = h_int_kanamori(spin_names,orb_names, np.array([[0,U-3*J],[U-3*J,0]]), np.array([[U,U-2*J],[U-2*J,U]]), J,False) if use_qn: QN = [sum([n(*mkind("up",o)) for o in orb_names],Operator()), sum([n(*mkind("dn",o)) for o in orb_names],Operator())] for o in orb_names: dn = n(*mkind("up",o)) - n(*mkind("dn",o))
def set_U_matrix(U_interact,J_hund,n_orb,l,use_matrix=True,T=None,sl_int=None,use_spinflip=False,dim_reps=None,irep=None): """ Set up the interaction vertex""" offset = 0 U4ind = None U = None Up = None if (use_matrix): if not (sl_int is None): Umat = Umatrix(l=l) assert len(sl_int)==(l+1),"sl_int has the wrong length" if (type(sl_int)==ListType): Rcl = numpy.array(sl_int) else: Rcl = sl_int Umat(T=T,Rcl=Rcl) else: if ((U_interact==None)and(J_hund==None)): mpi.report("Give U,J or Slater integrals!!!") assert 0 Umat = Umatrix(U_interact=U_interact, J_hund=J_hund, l=l) Umat(T=T) Umat.reduce_matrix() if (Umat.N==Umat.Nmat): # Transformation T is of size 2l+1 U = Umat.U Up = Umat.Up else: # Transformation is of size 2(2l+1) U = Umat.U # now we have the reduced matrices U and Up, we need it for tail fitting anyways if (use_spinflip): #Take the 4index Umatrix # check for imaginary matrix elements: if (abs(Umat.Ufull.imag)>0.0001).any(): mpi.report("WARNING: complex interaction matrix!! Ignoring imaginary part for the moment!") mpi.report("If you want to change this, look into Wien2k/solver_multiband.py") U4ind = Umat.Ufull.real # this will be changed for arbitrary irep: # use only one subgroup of orbitals? if not (irep is None): #print irep, dim_reps assert not (dim_reps is None), "Dimensions of the representatives are missing!" assert n_orb==dim_reps[irep-1],"Dimensions of dimrep and n_orb do not fit!" for ii in range(irep-1): offset += dim_reps[ii] else: if ((U_interact==None)and(J_hund==None)): mpi.report("For Kanamori representation, give U and J!!") assert 0 U = numpy.zeros([n_orb,n_orb],numpy.float_) Up = numpy.zeros([n_orb,n_orb],numpy.float_) for i in range(n_orb): for j in range(n_orb): if (i==j): Up[i,i] = U_interact else: Up[i,j] = U_interact - 2.0*J_hund U[i,j] = U_interact - 3.0*J_hund return U, Up, U4ind, offset
def __init__(self, hdf_file, mu = 0.0, h_field = 0.0, use_lda_blocks = False, lda_data = 'SumK_LDA', symm_corr_data = 'SymmCorr', par_proj_data = 'SumK_LDA_ParProj', symm_par_data = 'SymmPar', bands_data = 'SumK_LDA_Bands'): """ Initialises the class from data previously stored into an HDF5 """ if not (type(hdf_file)==StringType): mpi.report("Give a string for the HDF5 filename to read the input!") else: self.hdf_file = hdf_file self.lda_data = lda_data self.par_proj_data = par_proj_data self.bands_data = bands_data self.symm_par_data = symm_par_data self.symm_corr_data = symm_corr_data self.block_names = [ ['up','down'], ['ud'] ] self.n_spin_blocks_gf = [2,1] self.Gupf = None self.h_field = h_field # read input from HDF: things_to_read = ['energy_unit','n_k','k_dep_projection','SP','SO','charge_below','density_required', 'symm_op','n_shells','shells','n_corr_shells','corr_shells','use_rotations','rot_mat', 'rot_mat_time_inv','n_reps','dim_reps','T','n_orbitals','proj_mat','bz_weights','hopping'] optional_things = ['gf_struct_solver','map_inv','map','chemical_potential','dc_imp','dc_energ','deg_shells'] #ar=HDFArchive(self.hdf_file,'a') #del ar self.retval = self.read_input_from_hdf(subgrp=self.lda_data,things_to_read=things_to_read,optional_things=optional_things) #ar=HDFArchive(self.hdf_file,'a') #del ar if (self.SO) and (abs(self.h_field)>0.000001): self.h_field=0.0 mpi.report("For SO, the external magnetic field is not implemented, setting it to 0!!") self.inequiv_shells(self.corr_shells) # determine the number of inequivalent correlated shells # field to convert block_names to indices self.names_to_ind = [{}, {}] for ibl in range(2): for inm in range(self.n_spin_blocks_gf[ibl]): self.names_to_ind[ibl][self.block_names[ibl][inm]] = inm * self.SP #(self.Nspinblocs-1) # GF structure used for the local things in the k sums self.gf_struct_corr = [ [ (al, range( self.corr_shells[i][3])) for al in self.block_names[self.corr_shells[i][4]] ] for i in xrange(self.n_corr_shells) ] if not (self.retval['gf_struct_solver']): # No gf_struct was stored in HDF, so first set a standard one: self.gf_struct_solver = [ [ (al, range( self.corr_shells[self.invshellmap[i]][3]) ) for al in self.block_names[self.corr_shells[self.invshellmap[i]][4]] ] for i in xrange(self.n_inequiv_corr_shells) ] self.map = [ {} for i in xrange(self.n_inequiv_corr_shells) ] self.map_inv = [ {} for i in xrange(self.n_inequiv_corr_shells) ] for i in xrange(self.n_inequiv_corr_shells): for al in self.block_names[self.corr_shells[self.invshellmap[i]][4]]: self.map[i][al] = [al for j in range( self.corr_shells[self.invshellmap[i]][3] ) ] self.map_inv[i][al] = al if not (self.retval['dc_imp']): # init the double counting: self.__init_dc() if not (self.retval['chemical_potential']): self.chemical_potential = mu if not (self.retval['deg_shells']): self.deg_shells = [ [] for i in range(self.n_inequiv_corr_shells)] if self.symm_op: #mpi.report("Do the init for symm:") self.Symm_corr = Symmetry(hdf_file,subgroup=self.symm_corr_data) # determine the smallest blocs, if wanted: if (use_lda_blocks): dm=self.analyse_BS() # now save things again to HDF5: if (mpi.is_master_node()): ar=HDFArchive(self.hdf_file,'a') ar[self.lda_data]['h_field'] = self.h_field del ar self.save()
def analyse_BS(self, threshold = 0.00001, include_shells = None, dm = None): """ Determines the Greens function block structure from the simple point integration""" if (dm==None): dm = self.simple_point_dens_mat() dens_mat = [dm[self.invshellmap[ish]] for ish in xrange(self.n_inequiv_corr_shells) ] if include_shells is None: include_shells=range(self.n_inequiv_corr_shells) for ish in include_shells: #self.gf_struct_solver.append([]) self.gf_struct_solver[ish] = [] gf_struct_temp = [] a_list = [a for a,al in self.gf_struct_corr[self.invshellmap[ish]] ] for a in a_list: dm = dens_mat[ish][a] dmbool = (abs(dm) > threshold) # gives an index list of entries larger that threshold offdiag = [] for i in xrange(len(dmbool)): for j in xrange(i,len(dmbool)): if ((dmbool[i,j])&(i!=j)): offdiag.append([i,j]) NBlocs = len(dmbool) blocs = [ [i] for i in range(NBlocs) ] for i in range(len(offdiag)): if (offdiag[i][0]!=offdiag[i][1]): for j in range(len(blocs[offdiag[i][1]])): blocs[offdiag[i][0]].append(blocs[offdiag[i][1]][j]) del blocs[offdiag[i][1]] for j in range(i+1,len(offdiag)): if (offdiag[j][0]==offdiag[i][1]): offdiag[j][0]=offdiag[i][0] if (offdiag[j][1]==offdiag[i][1]): offdiag[j][1]=offdiag[i][0] if (offdiag[j][0]>offdiag[i][1]): offdiag[j][0] -= 1 if (offdiag[j][1]>offdiag[i][1]): offdiag[j][1] -= 1 offdiag[j].sort() NBlocs-=1 for i in range(NBlocs): blocs[i].sort() self.gf_struct_solver[ish].append( ('%s%s'%(a,i),range(len(blocs[i]))) ) gf_struct_temp.append( ('%s%s'%(a,i),blocs[i]) ) # map is the mapping of the blocs from the SK blocs to the CTQMC blocs: self.map[ish][a] = range(len(dmbool)) for ibl in range(NBlocs): for j in range(len(blocs[ibl])): self.map[ish][a][blocs[ibl][j]] = '%s%s'%(a,ibl) self.map_inv[ish]['%s%s'%(a,ibl)] = a # now calculate degeneracies of orbitals: dm = {} for bl in gf_struct_temp: bln = bl[0] ind = bl[1] # get dm for the blocks: dm[bln] = numpy.zeros([len(ind),len(ind)],numpy.complex_) for i in range(len(ind)): for j in range(len(ind)): dm[bln][i,j] = dens_mat[ish][self.map_inv[ish][bln]][ind[i],ind[j]] for bl in gf_struct_temp: for bl2 in gf_struct_temp: if (dm[bl[0]].shape==dm[bl2[0]].shape) : if ( ( (abs(dm[bl[0]]-dm[bl2[0]])<threshold).all() ) and (bl[0]!=bl2[0]) ): # check if it was already there: ind1=-1 ind2=-2 for n,ind in enumerate(self.deg_shells[ish]): if (bl[0] in ind): ind1=n if (bl2[0] in ind): ind2=n if ((ind1<0)and(ind2>=0)): self.deg_shells[ish][ind2].append(bl[0]) elif ((ind1>=0)and(ind2<0)): self.deg_shells[ish][ind1].append(bl2[0]) elif ((ind1<0)and(ind2<0)): self.deg_shells[ish].append([bl[0],bl2[0]]) if (mpi.is_master_node()): ar=HDFArchive(self.hdf_file,'a') ar[self.lda_data]['gf_struct_solver'] = self.gf_struct_solver ar[self.lda_data]['map'] = self.map ar[self.lda_data]['map_inv'] = self.map_inv try: ar[self.lda_data]['deg_shells'] = self.deg_shells except: mpi.report("deg_shells not stored, degeneracies not found") del ar return dens_mat
def solve_lattice_bse(g_wk, gamma_wnn, tail_corr_nwf=None): fmesh_g = g_wk.mesh.components[0] kmesh = g_wk.mesh.components[1] bmesh = gamma_wnn.mesh.components[0] fmesh = gamma_wnn.mesh.components[1] nk = len(kmesh) nw = (len(bmesh) + 1) / 2 nwf = len(fmesh) / 2 nwf_g = len(fmesh_g) / 2 if mpi.is_master_node(): print tprf_banner(), "\n" print 'Lattcie BSE with local vertex approximation.\n' print 'nk =', nk print 'nw =', nw print 'nwf =', nwf print 'nwf_g =', nwf_g print 'tail_corr_nwf =', tail_corr_nwf print if tail_corr_nwf is None: tail_corr_nwf = nwf mpi.report('--> chi0_wnk_tail_corr') chi0_wnk_tail_corr = get_chi0_wnk(g_wk, nw=nw, nwf=tail_corr_nwf) mpi.report('--> trace chi0_wnk_tail_corr (WARNING! NO TAIL FIT. FIXME!)') chi0_wk_tail_corr = chi0q_sum_nu_tail_corr_PH(chi0_wnk_tail_corr) #chi0_wk_tail_corr = chi0q_sum_nu(chi0_wnk_tail_corr) mpi.barrier() mpi.report('B1 ' + str(chi0_wk_tail_corr[Idx(0), Idx(0, 0, 0)][0, 0, 0, 0])) mpi.barrier() mpi.report('--> chi0_wnk_tail_corr to chi0_wnk') if tail_corr_nwf != nwf: mpi.report('--> fixed_fermionic_window_python_wnk') chi0_wnk = fixed_fermionic_window_python_wnk(chi0_wnk_tail_corr, nwf=nwf) else: chi0_wnk = chi0_wnk_tail_corr.copy() del chi0_wnk_tail_corr mpi.barrier() mpi.report('C ' + str(chi0_wnk[Idx(0), Idx(0), Idx(0, 0, 0)][0, 0, 0, 0])) mpi.barrier() mpi.report('--> trace chi0_wnk') chi0_wk = chi0q_sum_nu(chi0_wnk) mpi.barrier() mpi.report('D ' + str(chi0_wk[Idx(0), Idx(0, 0, 0)][0, 0, 0, 0])) mpi.barrier() dchi_wk = chi0_wk_tail_corr - chi0_wk chi0_kw = Gf(mesh=MeshProduct(kmesh, bmesh), target_shape=chi0_wk_tail_corr.target_shape) chi0_kw.data[:] = chi0_wk_tail_corr.data.swapaxes(0, 1) del chi0_wk del chi0_wk_tail_corr assert (chi0_wnk.mesh.components[0] == bmesh) assert (chi0_wnk.mesh.components[1] == fmesh) assert (chi0_wnk.mesh.components[2] == kmesh) # -- Lattice BSE calc with built in trace mpi.report('--> chi_kw from BSE') #mpi.report('DEBUG BSE INACTIVE'*72) chi_kw = chiq_sum_nu_from_chi0q_and_gamma_PH(chi0_wnk, gamma_wnn) #chi_kw = chi0_kw.copy() mpi.barrier() mpi.report('--> chi_kw from BSE (done)') del chi0_wnk mpi.report('--> chi_kw tail corrected (using chi0_wnk)') for k in kmesh: chi_kw[ k, :] += dchi_wk[:, k] # -- account for high freq of chi_0 (better than nothing) del dchi_wk mpi.report('--> solve_lattice_bse, done.') return chi_kw, chi0_kw
def set_dc(self,dens_mat,U_interact,J_hund,orb=0,use_dc_formula=0,use_val=None): """Sets the double counting term for inequiv orbital orb use_dc_formula=0: LDA+U FLL double counting, use_dc_formula=1: Held's formula. use_dc_formula=2: AMF Be sure that you use the correct interaction Hamiltonian!""" #if (not hasattr(self,"dc_imp")): self.__init_dc() #dm = [ {} for i in xrange(self.n_corr_shells)] #for i in xrange(self.n_corr_shells): # l = self.corr_shells[i][3] #*(1+self.corr_shells[i][4]) # for j in xrange(len(self.gf_struct_corr[i])): # dm[i]['%s'%self.gf_struct_corr[i][j][0]] = numpy.zeros([l,l],numpy.float_) for icrsh in xrange(self.n_corr_shells): iorb = self.shellmap[icrsh] # iorb is the index of the inequivalent shell corresponding to icrsh if (iorb==orb): # do this orbital Ncr = {} l = self.corr_shells[icrsh][3] #*(1+self.corr_shells[icrsh][4]) for j in xrange(len(self.gf_struct_corr[icrsh])): self.dc_imp[icrsh]['%s'%self.gf_struct_corr[icrsh][j][0]] = numpy.identity(l,numpy.float_) blname = self.gf_struct_corr[icrsh][j][0] Ncr[blname] = 0.0 for a,al in self.gf_struct_solver[iorb]: #for bl in self.map[iorb][blname]: bl = self.map_inv[iorb][a] #print 'bl, valiue = ',bl,dens_mat[a].real.trace() Ncr[bl] += dens_mat[a].real.trace() #print 'Ncr=',Ncr M = self.corr_shells[icrsh][3] Ncrtot = 0.0 a_list = [a for a,al in self.gf_struct_corr[icrsh]] for bl in a_list: Ncrtot += Ncr[bl] # average the densities if there is no SP: if (self.SP==0): for bl in a_list: Ncr[bl] = Ncrtot / len(a_list) # correction for SO: we have only one block in this case, but in DC we need N/2 elif (self.SP==1 and self.SO==1): for bl in a_list: Ncr[bl] = Ncrtot / 2.0 if (use_val is None): if (use_dc_formula==0): self.dc_energ[icrsh] = U_interact / 2.0 * Ncrtot * (Ncrtot-1.0) for bl in a_list: Uav = U_interact*(Ncrtot-0.5) - J_hund*(Ncr[bl] - 0.5) self.dc_imp[icrsh][bl] *= Uav self.dc_energ[icrsh] -= J_hund / 2.0 * (Ncr[bl]) * (Ncr[bl]-1.0) mpi.report("DC for shell %(icrsh)i and block %(bl)s = %(Uav)f"%locals()) elif (use_dc_formula==1): self.dc_energ[icrsh] = (U_interact + J_hund * (2.0-(M-1)) / (2*M-1) ) / 2.0 * Ncrtot * (Ncrtot-1.0) for bl in a_list: # Held's formula, with U_interact the interorbital onsite interaction Uav = (U_interact + J_hund * (2.0-(M-1)) / (2*M-1) ) * (Ncrtot-0.5) self.dc_imp[icrsh][bl] *= Uav mpi.report("DC for shell %(icrsh)i and block %(bl)s = %(Uav)f"%locals()) elif (use_dc_formula==2): self.dc_energ[icrsh] = 0.5 * U_interact * Ncrtot * Ncrtot for bl in a_list: # AMF Uav = U_interact*(Ncrtot - Ncr[bl]/M) - J_hund * (Ncr[bl] - Ncr[bl]/M) self.dc_imp[icrsh][bl] *= Uav self.dc_energ[icrsh] -= (U_interact + (M-1)*J_hund)/M * 0.5 * Ncr[bl] * Ncr[bl] mpi.report("DC for shell %(icrsh)i and block %(bl)s = %(Uav)f"%locals()) # output: mpi.report("DC energy for shell %s = %s"%(icrsh,self.dc_energ[icrsh])) else: a_list = [a for a,al in self.gf_struct_corr[icrsh]] for bl in a_list: self.dc_imp[icrsh][bl] *= use_val self.dc_energ[icrsh] = use_val * Ncrtot # output: mpi.report("DC for shell %(icrsh)i = %(use_val)f"%locals()) mpi.report("DC energy = %s"%self.dc_energ[icrsh])
def solve(self, U_int, J_hund, T=None, verbosity=0, Iteration_Number=1, Test_Convergence=0.0001): """Calculation of the impurity Greens function using Hubbard-I""" if self.Converged : mpi.report("Solver %(name)s has already converged: SKIPPING"%self.__dict__) return if mpi.is_master_node(): self.verbosity = verbosity else: self.verbosity = 0 #self.Nmoments = 5 ur,umn,ujmn=self.__set_umatrix(U=U_int,J=J_hund,T=T) M = [x for x in self.G.mesh] self.zmsb = numpy.array([x for x in M],numpy.complex_) # for the tails: tailtempl={} for sig,g in self.G: tailtempl[sig] = copy.deepcopy(g.tail) for i in range(9): tailtempl[sig][i] *= 0.0 self.__save_eal('eal.dat',Iteration_Number) mpi.report( "Starting Fortran solver %(name)s"%self.__dict__) self.Sigma_Old <<= self.Sigma self.G_Old <<= self.G # call the fortran solver: temp = 1.0/self.beta gf,tail,self.atocc,self.atmag = gf_hi_fullu(e0f=self.ealmat, ur=ur, umn=umn, ujmn=ujmn, zmsb=self.zmsb, nmom=self.Nmoments, ns=self.Nspin, temp=temp, verbosity = self.verbosity) #self.sig = sigma_atomic_fullu(gf=self.gf,e0f=self.eal,zmsb=self.zmsb,ns=self.Nspin,nlm=self.Nlm) if (self.verbosity==0): # No fortran output, so give basic results here mpi.report("Atomic occupancy in Hubbard I Solver : %s"%self.atocc) mpi.report("Atomic magn. mom. in Hubbard I Solver : %s"%self.atmag) # transfer the data to the GF class: if (self.UseSpinOrbit): nlmtot = self.Nlm*2 # only one block in this case! else: nlmtot = self.Nlm M={} isp=-1 for a,al in self.gf_struct: isp+=1 M[a] = numpy.array(gf[isp*nlmtot:(isp+1)*nlmtot,isp*nlmtot:(isp+1)*nlmtot,:]).transpose(2,0,1).copy() for i in range(min(self.Nmoments,8)): tailtempl[a][i+1] = tail[i][isp*nlmtot:(isp+1)*nlmtot,isp*nlmtot:(isp+1)*nlmtot] #glist = lambda : [ GfImFreq(indices = al, beta = self.beta, n_points = self.Nmsb, data =M[a], tail =self.tailtempl[a]) # for a,al in self.gf_struct] glist = lambda : [ GfImFreq(indices = al, beta = self.beta, n_points = self.Nmsb) for a,al in self.gf_struct] self.G = BlockGf(name_list = self.a_list, block_list = glist(),make_copies=False) self.__copy_Gf(self.G,M,tailtempl) # Self energy: self.G0 <<= iOmega_n M = [ self.ealmat[isp*nlmtot:(isp+1)*nlmtot,isp*nlmtot:(isp+1)*nlmtot] for isp in range((2*self.Nlm)/nlmtot) ] self.G0 -= M self.Sigma <<= self.G0 - inverse(self.G) # invert G0 self.G0.invert() def test_distance(G1,G2, dist) : def f(G1,G2) : #print abs(G1.data - G2.data) dS = max(abs(G1.data - G2.data).flatten()) aS = max(abs(G1.data).flatten()) return dS <= aS*dist return reduce(lambda x,y : x and y, [f(g1,g2) for (i1,g1),(i2,g2) in izip(G1,G2)]) mpi.report("\nChecking Sigma for convergence...\nUsing tolerance %s"%Test_Convergence) self.Converged = test_distance(self.Sigma,self.Sigma_Old,Test_Convergence) if self.Converged : mpi.report("Solver HAS CONVERGED") else : mpi.report("Solver has not yet converged")
def run_all(vasp_pid): """ """ mpi.report(" Waiting for VASP lock to appear...") while not is_vasp_lock_present(): time.sleep(1) vasp_running = True while vasp_running: if debug: print bcolors.RED + "rank %s"%(mpi.rank) + bcolors.ENDC mpi.report(" Waiting for VASP lock to disappear...") mpi.barrier() while is_vasp_lock_present(): time.sleep(1) # if debug: print bcolors.YELLOW + " waiting: rank %s"%(mpi.rank) + bcolors.ENDC if not is_vasp_running(vasp_pid): mpi.report(" VASP stopped") vasp_running = False break if debug: print bcolors.MAGENTA + "rank %s"%(mpi.rank) + bcolors.ENDC err = 0 exc = None try: if debug: print bcolors.BLUE + "plovasp: rank %s"%(mpi.rank) + bcolors.ENDC if mpi.is_master_node(): plovasp.generate_and_output_as_text('plo.cfg', vasp_dir='./') # Read energy from OSZICAR dft_energy = get_dft_energy() except Exception, exc: err = 1 err = mpi.bcast(err) if err: if mpi.is_master_node(): raise exc else: raise SystemExit(1) mpi.barrier() try: if debug: print bcolors.GREEN + "rank %s"%(mpi.rank) + bcolors.ENDC corr_energy, dft_dc = dmft_cycle() except: if mpi.is_master_node(): print " master forwarding the exception..." raise else: print " rank %i exiting..."%(mpi.rank) raise SystemExit(1) mpi.barrier() if mpi.is_master_node(): total_energy = dft_energy + corr_energy - dft_dc print print "="*80 print " Total energy: ", total_energy print " DFT energy: ", dft_energy print " Corr. energy: ", corr_energy print " DFT DC: ", dft_dc print "="*80 print if mpi.is_master_node() and vasp_running: open('./vasp.lock', 'a').close()
chemical_potential,dc_imp,dc_energ = SK.load(['chemical_potential','dc_imp','dc_energ']) S.Sigma_iw << mpi.bcast(S.Sigma_iw) chemical_potential = mpi.bcast(chemical_potential) dc_imp = mpi.bcast(dc_imp) dc_energ = mpi.bcast(dc_energ) SK.set_mu(chemical_potential) SK.set_dc(dc_imp,dc_energ) for iteration_number in range(1,loops+1): if mpi.is_master_node(): print "Iteration = ", iteration_number SK.symm_deg_gf(S.Sigma_iw,orb=0) # symmetrise Sigma SK.set_Sigma([ S.Sigma_iw ]) # set Sigma into the SumK class chemical_potential = SK.calc_mu( precision = prec_mu ) # find the chemical potential for given density S.G_iw << SK.extract_G_loc()[0] # calc the local Green function mpi.report("Total charge of Gloc : %.6f"%S.G_iw.total_density()) # Init the DC term and the real part of Sigma, if no previous runs found: if (iteration_number==1 and previous_present==False): dm = S.G_iw.density() SK.calc_dc(dm, U_interact = U, J_hund = J, orb = 0, use_dc_formula = dc_type) S.Sigma_iw << SK.dc_imp[0]['up'][0,0] # Calculate new G0_iw to input into the solver: if mpi.is_master_node(): # We can do a mixing of Delta in order to stabilize the DMFT iterations: S.G0_iw << S.Sigma_iw + inverse(S.G_iw) ar = HDFArchive(dft_filename+'.h5','a')['dmft_output'] if (iteration_number>1 or previous_present): mpi.report("Mixing input Delta with factor %s"%delta_mix) Delta = (delta_mix * delta(S.G0_iw)) + (1.0-delta_mix) * ar['Delta_iw']
def convert_dft_input(self, first_real_part_matrix=True, only_upper_triangle=False, weights_in_file=False): """ Reads the appropriate files and stores the data for the dft_subgrp in the hdf5 archive. Parameters ---------- first_real_part_matrix : boolean, optional Should all the real components for given k be read in first, followed by the imaginary parts? only_upper_triangle : boolean, optional Should only the upper triangular part of H(k) be read in? weights_in_file : boolean, optional Are the k-point weights to be read in? """ # Read and write only on the master node if not (mpi.is_master_node()): return mpi.report("Reading input from %s..." % self.dft_file) # R is a generator : each R.Next() will return the next number in the # file R = ConverterTools.read_fortran_file( self, self.dft_file, self.fortran_to_replace) try: # the energy conversion factor is 1.0, we assume eV in files energy_unit = 1.0 # read the number of k points n_k = int(R.next()) k_dep_projection = 0 SP = 0 # no spin-polarision SO = 0 # no spin-orbit # total charge below energy window is set to 0 charge_below = 0.0 # density required, for setting the chemical potential density_required = R.next() symm_op = 0 # No symmetry groups for the k-sum # the information on the non-correlated shells is needed for # defining dimension of matrices: # number of shells considered in the Wanniers n_shells = int(R.next()) # corresponds to index R in formulas # now read the information about the shells (atom, sort, l, dim): shell_entries = ['atom', 'sort', 'l', 'dim'] shells = [{name: int(val) for name, val in zip( shell_entries, R)} for ish in range(n_shells)] # number of corr. shells (e.g. Fe d, Ce f) in the unit cell, n_corr_shells = int(R.next()) # corresponds to index R in formulas # now read the information about the shells (atom, sort, l, dim, SO # flag, irep): corr_shell_entries = ['atom', 'sort', 'l', 'dim','SO','irep'] corr_shells = [{name: int(val) for name, val in zip( corr_shell_entries, R)} for icrsh in range(n_corr_shells)] # determine the number of inequivalent correlated shells and maps, # needed for further reading [n_inequiv_shells, corr_to_inequiv, inequiv_to_corr] = ConverterTools.det_shell_equivalence(self, corr_shells) use_rotations = 0 rot_mat = [numpy.identity( corr_shells[icrsh]['dim'], numpy.complex_) for icrsh in range(n_corr_shells)] rot_mat_time_inv = [0 for i in range(n_corr_shells)] # Representative representations are read from file n_reps = [1 for i in range(n_inequiv_shells)] dim_reps = [0 for i in range(n_inequiv_shells)] T = [] for ish in range(n_inequiv_shells): # number of representatives ("subsets"), e.g. t2g and eg n_reps[ish] = int(R.next()) dim_reps[ish] = [int(R.next()) for i in range( n_reps[ish])] # dimensions of the subsets # The transformation matrix: # is of dimension 2l+1, it is taken to be standard d (as in # Wien2k) ll = 2 * corr_shells[inequiv_to_corr[ish]]['l'] + 1 lmax = ll * (corr_shells[inequiv_to_corr[ish]]['SO'] + 1) T.append(numpy.zeros([lmax, lmax], numpy.complex_)) T[ish] = numpy.array([[0.0, 0.0, 1.0, 0.0, 0.0], [1.0 / sqrt(2.0), 0.0, 0.0, 0.0, 1.0 / sqrt(2.0)], [-1.0 / sqrt(2.0), 0.0, 0.0, 0.0, 1.0 / sqrt(2.0)], [0.0, 1.0 / sqrt(2.0), 0.0, -1.0 / sqrt(2.0), 0.0], [0.0, 1.0 / sqrt(2.0), 0.0, 1.0 / sqrt(2.0), 0.0]]) # Spin blocks to be read: # number of spins to read for Norbs and Ham, NOT Projectors n_spin_blocs = SP + 1 - SO # define the number of n_orbitals for all k points: it is the # number of total bands and independent of k! n_orbitals = numpy.ones( [n_k, n_spin_blocs], numpy.int) * sum([sh['dim'] for sh in shells]) # Initialise the projectors: proj_mat = numpy.zeros([n_k, n_spin_blocs, n_corr_shells, max( [crsh['dim'] for crsh in corr_shells]), numpy.max(n_orbitals)], numpy.complex_) # Read the projectors from the file: for ik in range(n_k): for icrsh in range(n_corr_shells): for isp in range(n_spin_blocs): # calculate the offset: offset = 0 n_orb = 0 for ish in range(n_shells): if (n_orb == 0): if (shells[ish]['atom'] == corr_shells[icrsh]['atom']) and (shells[ish]['sort'] == corr_shells[icrsh]['sort']): n_orb = corr_shells[icrsh]['dim'] else: offset += shells[ish]['dim'] proj_mat[ik, isp, icrsh, 0:n_orb, offset:offset + n_orb] = numpy.identity(n_orb) # now define the arrays for weights and hopping ... # w(k_index), default normalisation bz_weights = numpy.ones([n_k], numpy.float_) / float(n_k) hopping = numpy.zeros([n_k, n_spin_blocs, numpy.max( n_orbitals), numpy.max(n_orbitals)], numpy.complex_) if (weights_in_file): # weights in the file for ik in range(n_k): bz_weights[ik] = R.next() # if the sum over spins is in the weights, take it out again!! sm = sum(bz_weights) bz_weights[:] /= sm # Grab the H for isp in range(n_spin_blocs): for ik in range(n_k): n_orb = n_orbitals[ik, isp] # first read all real components for given k, then read # imaginary parts if (first_real_part_matrix): for i in range(n_orb): if (only_upper_triangle): istart = i else: istart = 0 for j in range(istart, n_orb): hopping[ik, isp, i, j] = R.next() for i in range(n_orb): if (only_upper_triangle): istart = i else: istart = 0 for j in range(istart, n_orb): hopping[ik, isp, i, j] += R.next() * 1j if ((only_upper_triangle)and(i != j)): hopping[ik, isp, j, i] = hopping[ ik, isp, i, j].conjugate() else: # read (real,im) tuple for i in range(n_orb): if (only_upper_triangle): istart = i else: istart = 0 for j in range(istart, n_orb): hopping[ik, isp, i, j] = R.next() hopping[ik, isp, i, j] += R.next() * 1j if ((only_upper_triangle)and(i != j)): hopping[ik, isp, j, i] = hopping[ ik, isp, i, j].conjugate() # keep some things that we need for reading parproj: things_to_set = ['n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'n_spin_blocs', 'n_orbitals', 'n_k', 'SO', 'SP', 'energy_unit'] for it in things_to_set: setattr(self, it, locals()[it]) except StopIteration: # a more explicit error if the file is corrupted. raise "HK Converter : reading file dft_file failed!" R.close() # Save to the HDF5: with HDFArchive(self.hdf_file, 'a') as ar: if not (self.dft_subgrp in ar): ar.create_group(self.dft_subgrp) things_to_save = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required', 'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat', 'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights', 'hopping', 'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr'] for it in things_to_save: ar[self.dft_subgrp][it] = locals()[it]