def getAvgDispersion(parms, NthOrder_in, extra): int_module = __import__(share_fun.val_def(parms, 'INTEGRATE_MOD', 'integrate'), fromlist=[]); NthOrder = 3; rot_mat = extra['rot_mat']; NORB = int(parms['NORB']); N = int(parms['N_LAYERS']); F = int(parms['FLAVORS']); S = int(parms['SPINS']); bp,wf = extra['GaussianData']; if 'HR' in extra: out = array([int_module.calc_Havg(NthOrder, extra['HR'], extra['R'], float(share_fun.val_def(parms, 'H', 0))*(-1)**s, float(parms['DELTA']), bp, wf).reshape(NthOrder, NORB, NORB) for s in range(S)]); else: out = array([int_module.calc_Havg(NthOrder, extra['tight_binding_parms'], float(share_fun.val_def(parms, 'H', 0))*(-1)**s, float(parms['DELTA']), bp, wf).reshape(NthOrder, NORB, NORB) for s in range(S)]); ret = zeros((NthOrder, NORB)); rret = zeros((S, NthOrder, NORB)); for s in range(S): for n in range(NthOrder): for L in range(N): tmp = mat(out[s, n, L*F:(L+1)*F, L*F:(L+1)*F]); dtmp = diag(rot_mat[L]*tmp*rot_mat[L].H); if linalg.norm(dtmp.imag) > 1e-10: print 'getAvgDispersion: imaginary part is rather large: %g'%linalg.norm(dtmp.imag); ret[n, L*F:(L+1)*F] = dtmp.real; swap_vec = zeros((2, N*F), dtype = int); for L in range(N): for f in range(F): swap_vec[:,f*N+L] = array([f*N+L, L*F+f]); ret[:, swap_vec[0]] = ret[:, swap_vec[1]]; rret[s] = ret; # avg dispersion of uncorrelated orbitals (no need for matrix rotation) for s in range(S): for n in range(NthOrder): for f in range(N*F,NORB): rret[s, n, f] = out[s, n, f, f]; return rret[:, :NthOrder_in, :];
def getGavgFromSelfEnergy(parms, se_filename = None): N_LAYERS = int(parms['N_LAYERS']); FLAVORS = int(parms['FLAVORS']); SPINS = int(parms['SPINS']); parms['BETA'] = 10; # prepare data NMaxFreq = int(parms['N_MAX_FREQ']); if se_filename is not None: print 'load self energy from file: ', se_filename; tmp = genfromtxt(se_filename); if NMaxFreq > len(tmp): NMaxFreq = len(tmp); w = tmp[:,0] + 1j*float(parms['BROADENING']); tmp = tmp[:, 1:]; tmp = tmp[:NMaxFreq, 0::2] + 1j*tmp[:NMaxFreq, 1::2]; se = zeros((SPINS, NMaxFreq, N_LAYERS*FLAVORS), dtype = complex); for s in range(SPINS): for f in range(N_LAYERS*FLAVORS): se[s, :, f] = tmp[:, SPINS*f+s]; else: se = zeros((SPINS, NMaxFreq, N_LAYERS*FLAVORS), dtype = complex); w = linspace(float(parms['EMIN']), float(parms['EMAX']), NMaxFreq) + 1j*float(parms['BROADENING']); # tight binding Hamiltonian HR, R = getHamiltonian(parms['RHAM'], 4); parms['NORB'] = len(HR[0]) extra = { 'HR' : HR, 'R': R }; if int(val_def(parms, 'FORCE_DIAGONAL', 0)) > 0: print 'FORCE_DIAGONAL is used'; ind = nonzero(sum(R**2, 1)==0)[0][0]; H0 = HR[ind]; else: H0 = None; rot_mat = getRotationMatrix(N_LAYERS, FLAVORS, val_def(parms, 'ROT_MAT', None), H0); # prepare for k-integrate bp, wf = grule(int(parms['NUMK'])); extra.update({ 'GaussianData' : [bp, wf], 'rot_mat' : rot_mat }); delta = float(parms['DELTA']); mu = float(parms['MU']); # running Gavg = averageGreen(delta, mu, w, se, parms, -1, -1, 0, extra)[1]; # swap the Gavg to the format of my code # swap_vec = zeros((2, N_LAYERS*FLAVORS), dtype = int); # for L in range(N_LAYERS): # for f in range(FLAVORS): swap_vec[:,f*N_LAYERS+L] = array([f*N_LAYERS+L, L*FLAVORS+f]); # for s in range(SPINS): Gavg[s, :, swap_vec[1]] = Gavg[s, :, swap_vec[0]]; spec = -1/pi * Gavg.imag; if SPINS == 1: savetxt('spec.dat', c_[w.real, spec[0]]); elif SPINS > 1: savetxt('spec_up.dat', c_[w.real, spec[0]]); savetxt('spec_dn.dat', c_[w.real, spec[1]]);
def get_asymp_selfenergy(parms, nf_in, nn_in = None): dmft_id = system.getDMFTCorrIndex(parms, all = False); FLAVORS = int(parms['FLAVORS']); SPINS = 2; U = generate_Umatrix(float(parms['U']), float(parms['J']), int(parms['FLAVORS']), val_def(parms, 'INTERACTION_TYPE', 'SlaterKanamori')); if int(val_def(parms, 'TMP_HELD_DC' , 0)) > 0: for m in range(2*FLAVORS): for n in range(2*FLAVORS): f1 = m/2 f2 = n/2 if (f1 not in dmft_id) or (f2 not in dmft_id): U[m, n] = 0. nf = zeros(SPINS*FLAVORS); nf[::2] = nf[1::2] = nf_in[0]; if int(parms['SPINS']) == 2: nf[1::2] = nf_in[1]; nn = zeros((FLAVORS*SPINS, FLAVORS*SPINS)); pos = 0; for i in range(FLAVORS*SPINS): for j in range(i+1): f1 = i/SPINS; f2 = j/SPINS; if f1 in dmft_id and f2 in dmft_id and nn_in is not None: nn[i,j] = nn[j,i] = nn_in[pos]; pos += 1; if f1 in dmft_id: nn[i,i] = nf[i]; S = zeros((2, SPINS*FLAVORS)); # 2: expansion orders: (iwn)^0, (iwn)^{-1} for f in range(SPINS*FLAVORS): # zeroth order is easy: \Sigma^0_f = U_{f, f'} * <n_f'> S[0, f] = sum(U[f, :]*nf); # first order is harder: \Sigma^1_f = U_{f,f1}*U_{f,f2}*<n_f1 n_f2> - (\Sigma^0_f)^2 for f1 in range(SPINS*FLAVORS): for f2 in range(SPINS*FLAVORS): S[1, f] += U[f, f1]*U[f,f2]*nn[f1,f2]; S[1,f] -= S[0,f]**2; ret = array([S[:,::2], S[:,1::2]]); # for mean field, there is only \Sigma^0, other terms vanish # so I set \Sigma^1 to be zero for f in range(FLAVORS): if f not in dmft_id: ret[:, 1, f] = 0; if int(val_def(parms, 'TMP_HELD_DC' , 0)) > 0: uu = float(parms['U']) jj = float(parms['J']) ntot = sum(nf_in[0][dmft_id] + nf_in[1][dmft_id]) ret[:, 0, f] = ((uu-2*jj) + jj*(2 - (3-1)) / (2*3.-1.))*(ntot-0.5) if int(parms['SPINS']) == 1: ret = array([ret[0]]); return ret;
def getGavgFromSelfEnergy(parms, se_filename = None): N_LAYERS = int(parms['N_LAYERS']) FLAVORS = int(parms['FLAVORS']) SPINS = int(parms['SPINS']) beta = float(parms['BETA']) # prepare data NMaxFreq = int(round((beta*float(parms['MAX_FREQ'])/pi - 1)/2.)) iwn = 1j * (2*arange(NMaxFreq)+1)*pi/beta if se_filename is not None: print 'load self energy from file: ', se_filename; tmp = genfromtxt(se_filename); if NMaxFreq > len(tmp): NMaxFreq = len(tmp); tmp = tmp[:, 1:] tmp = tmp[:NMaxFreq, 0::2] + 1j*tmp[:NMaxFreq, 1::2] se = zeros((SPINS, NMaxFreq, N_LAYERS*FLAVORS), dtype = complex) for s in range(SPINS): for f in range(N_LAYERS*FLAVORS): se[s, :, f] = tmp[:NMaxFreq, SPINS*f+s] else: se = zeros((SPINS, NMaxFreq, N_LAYERS*FLAVORS), dtype = complex) # tight binding Hamiltonian HR, R = getHamiltonian(parms['RHAM'], 4); parms['NORB'] = len(HR[0]) extra = { 'HR' : HR, 'R': R }; if int(val_def(parms, 'FORCE_DIAGONAL', 0)) > 0: print 'FORCE_DIAGONAL is used'; ind = nonzero(sum(R**2, 1)==0)[0][0]; H0 = HR[ind]; else: H0 = None; rot_mat = getRotationMatrix(N_LAYERS, FLAVORS, val_def(parms, 'ROT_MAT', None), H0); # prepare for k-integrate bp, wf = grule(int(parms['NUMK'])); extra.update({ 'GaussianData' : [bp, wf], 'rot_mat' : rot_mat }); delta = float(parms['DELTA']); mu = float(parms['MU']); # running SelfEnergy_rot = array([irotate(se[s], rot_mat) for s in range(SPINS)]) SE = array([array([s.flatten() for s in SelfEnergy_rot[n]]) for n in range(SPINS)]) Gavg = integrate(iwn, delta, mu, SE, parms, extra, parallel = False) g = array([rotate_green(Gavg, rot_mat, layer=L) for L in range(N_LAYERS)]) return iwn, g
def get_inert_band_HF(parms, nf): FLAVORS = int(parms["FLAVORS"]) SPINS = 2 ret = zeros(SPINS) if int(val_def(parms, "TMP_HELD_DC", 0)) > 0: return ret assert size(nf, 1) == FLAVORS dmft_id = system.getDMFTCorrIndex(parms, all=False) inert_id = array([s for s in range(FLAVORS) if s not in dmft_id]) U = float(parms["U"]) J = float(parms["J"]) if len(nf) == 1: nf = r_[nf, nf] for s in range(SPINS): for f in inert_id: ret[s] += (U - 2 * J) * nf[not s, f] + (U - 3 * J) * nf[s, f] if int(val_def(parms, "MEAN_FIELD_UNPOLARIZED", 0)) > 0: ret = ones(SPINS) * mean(ret) return ret
def getSymmetricLayers(tmph5, parms): if int(val_def(parms, "USE_LAYER_SYMMETRY", 0)) == 0: return None if not "sym_layers" in tmph5: sym_layers = system.calc_sym_layers(parms) if len(sym_layers) > 0: tmph5.create_dataset("sym_layers", sym_layers.shape, dtype=sym_layers.dtype, data=sym_layers) else: return None else: sym_layers = tmph5["sym_layers"][:] return sym_layers
def prepare(self, prefix, in_data): # prepare hybtau file for CTQMC print 'Prepare running solver for ' + prefix; self.prefix = prefix; self.list_obs = None; self.parms = in_data['parms']; self.MEASURE_freq = int(val_def(in_data['parms'], 'MEASURE_freq', 1)); parms = in_data['parms']; FLAVORS = int(parms['FLAVORS']); # prepare parms file for CTQMC QMC_parms = { 'SEED' : random.random_integers(10000), 'SWEEPS' : int(val_def(parms, 'SWEEPS', 500000)), 'THERMALIZATION' : int(val_def(parms, 'THERMALIZATION', 300)), 'N_TAU' : int(parms['N_TAU']), 'N_HISTOGRAM_ORDERS' : int(val_def(parms, 'N_ORDER', 50)), 'N_MEAS' : int(val_def(parms, 'N_MEAS', 100)), 'N_CYCLES' : int(val_def(parms, 'N_CYCLES', 30)), 'BETA' : float(parms['BETA']), 'U_MATRIX' : self.prefix+'.Umatrix', 'MU_VECTOR' : self.prefix+'.MUvector', 'BASENAME' : prefix, 'DELTA' : prefix + '.hybtau', 'N_ORBITALS' : FLAVORS, 'MEASURE_freq' : self.MEASURE_freq, 'N_MATSUBARA' : int(parms['N_CUTOFF']), 'MAX_TIME' : val_def(parms, 'MAX_TIME', 80000), }; self.Norder = QMC_parms['N_HISTOGRAM_ORDERS']; solver_parms_file = open(prefix + '.parms', 'w'); for k, v in QMC_parms.iteritems(): solver_parms_file.write(k + ' = ' + str(v) + ';\n'); # Umatrix: either Slater-Kanamori form or using Slater integrals Umatrix = generate_Umatrix(float(parms['U']), float(parms['J']), FLAVORS/2, val_def(parms, 'INTERACTION_TYPE', 'SlaterKanamori')); hyb_tau = in_data['hybtau']; hyb_tau = c_[linspace(0, float(parms['BETA']), int(parms['N_TAU']) + 1), hyb_tau]; savetxt(prefix+'.hybtau', hyb_tau); savetxt(self.prefix+'.Umatrix', Umatrix); savetxt(self.prefix+'.MUvector', in_data['MU']);
def getDensity(h5, it = None): if it is None: it = h5['iter'][0]; parms = load_parms(h5, it); N_LAYERS = int(parms['N_LAYERS']); SPINS = int(parms['SPINS']); NCOR = int(parms['NCOR']); U = float(parms['U']); n_f = h5['log_density'][0 if it == 0 else it-1, 4:].reshape(SPINS, -1)[:, :NCOR]; if U != 0 and it > 0: if int(val_def(parms, 'FIXED_HARTREE', 0)) > 0: n_f = h5['log_density'][0, 4:].reshape(SPINS, -1)[:, :NCOR]; dmft_id = system.getDMFTCorrIndex(parms); gtau = h5['SolverData/Gtau/'+str(it)][:] n_f[:, dmft_id] = -gtau[:, -1, dmft_id]; return n_f;
def prepare(self, prefix, in_data): # prepare hybtau file for CTQMC print 'Prepare running solver for ' + prefix; self.prefix = prefix; parms = in_data['parms']; hyb_tau = in_data['hybtau']; FLAVORS = int(parms['FLAVORS']); for f in range(FLAVORS): hyb_tau[:, f] = -hyb_tau[::-1, f]; hyb_tau = c_[linspace(0, float(parms['BETA']), int(parms['N_TAU']) + 1), hyb_tau]; savetxt(prefix+'.hybtau', hyb_tau); if FLAVORS/2 == 3: Lattice = '"t2g system"'; if FLAVORS/2 == 2: Lattice = '"eg system"'; if FLAVORS/2 == 1: Lattice = '"site"'; # prepare parms file for CTQMC green_only = 1; self.list_obs = None; if int(parms['MEASURE']) > 0: green_only = 0 self.list_obs = parms['OBSERVABLES'].split(',') QMC_parms = { 'LATTICE_LIBRARY' : user_config.LatticeLibrary, 'LATTICE' : Lattice, 'MODEL_LIBRARY' : user_config.ModelLibrary, 'MODEL' : user_config.Model, 'L' : FLAVORS/2, 'SITES' : FLAVORS/2, 'GREEN_ONLY' : green_only, 'SEED' : random.random_integers(10000), 'SWEEPS' : val_def(parms, 'SWEEPS', 500000), 'THERMALIZATION' : val_def(parms, 'THERMALIZATION', 300), 'N' : parms['N_TAU'], 'N_ORDER' : val_def(parms, 'N_ORDER', 50), 'N_MEAS' : val_def(parms, 'N_MEAS', 200), 'N_SHIFT' : val_def(parms, 'N_SHIFT', 0), 'N_SWAP' : val_def(parms, 'N_SWAP', 0), 'BETA' : parms['BETA'], 'U' : parms['U'], "U'" : float(parms['U']) - 2*float(parms['J']), 'J' : parms['J'], 'SPINS' : 2, 'CONSERVED_QUANTUMNUMBERS': '"Nup, Ndown"', 'F' : prefix + '.hybtau' }; for f in range(FLAVORS/2): QMC_parms['MUUP'+str(f)] = in_data['MU'][2*f]; QMC_parms['MUDOWN'+str(f)] = in_data['MU'][2*f+1]; solver_parms_file = open(prefix + '.parms', 'w'); for k, v in QMC_parms.iteritems(): solver_parms_file.write(k + ' = ' + str(v) + ';\n'); solver_parms_file.write('{}'); solver_parms_file.close();
def smooth_selfenergy(it, h5, SelfEnergy, nf): parms = load_parms(h5, it); N_LAYERS = int(parms['N_LAYERS']); FLAVORS = int(parms['FLAVORS']); SPINS = size(nf, 0); # this SPINS may be different from parms['SPINS'] NCOR = int(parms['NCOR']); # calculate asymptotic coeffs and smooth se_coefs = zeros((SPINS, 2, NCOR), dtype = float); for L in range(N_LAYERS): st='SolverData/Observables/%d/L%d'%(it, L); try: nn = h5[st+'/nn'][:]; except: nn = None; se_coefs[:, :, L::N_LAYERS] = get_asymp_selfenergy(parms, nf[:, L::N_LAYERS], nn); if int(val_def(parms, 'USE_SELFENERGY_TAIL', 0)) > 0: minorder = 0 se_coefs = None for L in range(N_LAYERS): st='SolverData/Observables/%d/L%d'%(it, L); se_tail = h5[st+'/SelfEnergyTail'][:] minorder = se_tail[0, 0] maxorder = se_tail[-1, 0] se_tail = se_tail[1:-1] if se_coefs is None: se_coefs = zeros((SPINS, maxorder-minorder+1, NCOR)) for n in range(len(se_tail)): tail = se_tail[n].reshape(-1, 2) if SPINS == 1: tail = [mean(tail, 1)] for s in range(SPINS): se_coefs[s, n, L::N_LAYERS] = tail[s] elif int(parms.get('FIT_SELFENERGY_TAIL', 1)) > 0: n_max_freq = int(parms['N_MAX_FREQ']) n_cutoff = int(parms['N_CUTOFF']) n_fit_stop = n_cutoff + 5 n_fit_start = n_cutoff - 5 wn = (2*arange(n_max_freq)+1)*pi/float(parms['BETA']) for f in range(NCOR): for s in range(SPINS): x_fit = wn[n_fit_start:n_fit_stop] y_fit = x_fit*SelfEnergy[s, n_fit_start:n_fit_stop, f].imag p = polyfit(x_fit, y_fit, 0) se_coefs[s, 1, f] = -p[0] log_data(h5['SolverData'], 'selfenergy_asymp_coeffs', it, se_coefs.flatten(), data_type = float); list_NCutoff = ones((SPINS, NCOR), dtype = int)*int(parms['N_CUTOFF']); ind = SelfEnergy.imag > 0; SelfEnergy[ind] = real(SelfEnergy[ind]); return smooth(SelfEnergy, se_coefs, int(parms['N_MAX_FREQ']), float(parms['BETA']), list_NCutoff, minorder = 0);
def prepare(self, prefix, in_data): print 'Prepare running solver for ' + prefix; self.prefix = prefix; parms = in_data['parms']; BETA = float(parms['BETA']); NCOR = int(parms['FLAVORS']) / 2; self.beta = BETA; self.Ntau = int(parms['N_TAU']) + 1; self.Ncor = NCOR; self.measure = int(parms['MEASURE']) hyb_mat = in_data['hybmat']; hyb_tail = in_data['hybtail']; wn = (2*arange(size(hyb_mat, 0))+1)*pi/BETA; savetxt(prefix+'.hybmat.real', c_[wn, hyb_mat.real]); savetxt(prefix+'.hybmat.imag', c_[wn, hyb_mat.imag]); savetxt(prefix+'.hybmat.tail', hyb_tail); savetxt(prefix+'.MUvector', in_data['MU']); Umatrix = generate_Umatrix(float(parms['U']), float(parms['J']), NCOR, val_def(parms, 'INTERACTION_TYPE', 'SlaterKanamori')); savetxt(prefix+'.Umatrix', Umatrix); # prepare parms file for CTQMC QMC_parms = { 'SWEEPS_EACH_NODE' : int(val_def(parms, 'SWEEPS', 500000))/self.args['np'], 'THERMALIZATION' : val_def(parms, 'THERMALIZATION', 50000), 'N_MEAS' : val_def(parms, 'N_MEAS', 100), 'BETA' : parms['BETA'], 'U_MATRIX' : prefix+'.Umatrix', 'MU_VECTOR' : prefix + '.MUvector', 'HYB_MAT' : prefix + '.hybmat', 'NCOR' : NCOR, 'HDF5_OUTPUT' : prefix + '.solution.h5', 'N_LEGENDRE' : val_def(parms, 'TRIQS_N_LEGENDRE', 50), 'ACCUMULATION' : val_def(parms, 'TRIQS_ACCUMULATION', 'legendre'), 'SPINFLIP' : val_def(parms, 'TRIQS_SPINFLIP', 1), 'MEASURE' : self.measure, }; solver_parms_file = open(prefix + '.parms', 'w'); for k, v in QMC_parms.iteritems(): solver_parms_file.write(k + ' = ' + str(v) + ';\n');
def init_solver(parms, np): solver_type = parms['SOLVER_TYPE']; print '%s solver is used...'%solver_type; input_args = { 'solver_path' : parms.get('SOLVER_EXE_PATH', ''), 'mpirun_path' : parms.get('SOLVER_MPIRUN_PATH', user_config.mpirun), 'np' : np } if solver_type == 'CTHYB_Matrix': input_args['parm2xml'] = val_def(parms, 'PARMS2XML', user_config.parm2xml); input_args['solver_path'] = user_config.solver_matrix; solver = HybridizationMatrixSolver(input_args); elif solver_type == 'CTHYB_Segment': input_args['solver_path'] = user_config.solver_segment; solver = HybridizationSegmentSolver(input_args); elif solver_type == 'TRIQS': input_args['solver_path'] = user_config.solver_triqs; solver = TRIQSSolver(input_args); elif solver_type == 'TRIQSOld': input_args['solver_path'] = user_config.solver_triqs_old; solver = TRIQSSolverOld(input_args); else: print 'Solver %s unknown'%solver_type; return solver;
def getSpectraFromSelfEnergy(h5, se_filename, rham, rotmat, numk = None, setail_filename = None, it = 0): # prepare data w, se_refreq = ProcessSelfEnergy(se_filename, emin = -5, emax = 5, NFreq = 500); it = h5['iter'][0] - it; parms = load_parms(h5, it); print 'work on iteration ', it; if rham is not None: print 'new path for rham file is: ', rham; parms['RHAM'] = rham; if rotmat is not None: print 'new path for rot_mat file is ', rotmat; parms['ROT_MAT'] = rotmat; BETA = float(parms['BETA']); N_LAYERS = int(parms['N_LAYERS']); FLAVORS = int(parms['FLAVORS']); SPINS = int(parms['SPINS']); NORB = int(parms['NORB']); dmft_id = system.getDMFTCorrIndex(parms, all = False); dmft_id_len = len(dmft_id); # get the se tails tmp = h5['SolverData/selfenergy_asymp_coeffs'][:]; se_tail = tmp[tmp[:,0] == it, 1:].reshape(SPINS, 2, -1)[:, :, ::N_LAYERS]; if setail_filename is not None: print 'use the tail from external source: ', setail_filename; tmp = genfromtxt(setail_filename); se_tail[:, :, dmft_id] = array([tmp[:, s::SPINS] for s in range(SPINS)]); print se_tail; # restore SelfEnergy se = zeros((SPINS, len(se_refreq), N_LAYERS*FLAVORS), dtype = complex); for s in range(SPINS): for f in range(N_LAYERS*FLAVORS): if f/N_LAYERS not in dmft_id: se[s,:,f] = se_tail[s, 0, f/N_LAYERS]; else: f1 = nonzero(f/N_LAYERS == dmft_id)[0][0]; se[s, :, f] = se_refreq[:, SPINS*f1+s]*se_tail[s, 1, f/N_LAYERS] + se_tail[s, 0, f/N_LAYERS]; # tight binding Hamiltonian if 'RHAM' in parms: HR, R = getHamiltonian(parms['RHAM'], 4); if parms['DTYPE'] == '3bands': FLAVORS = 3; extra = { 'HR' : HR, 'R': R }; # rotation matrix if int(val_def(parms, 'FORCE_DIAGONAL', 0)) > 0: print 'FORCE_DIAGONAL is used'; ind = nonzero(sum(R**2, 1)==0)[0][0]; H0 = HR[ind]; else: H0 = None; rot_mat = getRotationMatrix(N_LAYERS, FLAVORS, val_def(parms, 'ROT_MAT', None), H0); # prepare for k-integrate parms['NUMK'] = 16 if numk is None else numk; bp, wf = grule(int(parms['NUMK'])); broadening = 0.01; extra.update({ 'GaussianData' : [bp, wf], 'rot_mat' : rot_mat }); delta = float(parms['DELTA']); mu = float(parms['MU']); # running print 'generating interacting DOS with parameters' for k, v in parms.iteritems(): print '%s = %s'%(k, v); Gr = averageGreen(delta, mu, w+1j*broadening, se, parms, float(parms['ND']), float(parms['DENSITY']), 0, extra)[1]; if SPINS == 1: savetxt(parms['ID']+'.idos', c_[w, -1/pi*Gr[0].imag], fmt = '%g'); elif SPINS == 2: savetxt(parms['ID']+'_up.idos', c_[w, -1/pi*Gr[0].imag], fmt = '%g'); savetxt(parms['ID']+'_dn.idos', c_[w, -1/pi*Gr[1].imag], fmt = '%g'); # calculate original G(iwn), only consider one "LAYERS" Giwn_orig = h5['ImpurityGreen/%d'%it][:,:,::N_LAYERS]; NMatsubara = size(Giwn_orig, 1); wn = (2*arange(NMatsubara) + 1)*pi/BETA; Giwn = zeros((NMatsubara, 2*FLAVORS*SPINS), dtype = float); # 2 for real and imag for f in range(FLAVORS): for s in range(SPINS): Giwn[:, 2*(SPINS*f+s)] = Giwn_orig[s, :, f].real; Giwn[:, 2*(SPINS*f+s)+1] = Giwn_orig[s, :, f].imag; savetxt(parms['ID']+'.gmat', c_[wn, Giwn]); # calculate G(iwn) for reference, only consider one "LAYERS" NMatsubara = 200; wn = (2*arange(NMatsubara) + 1)*pi/BETA; Giwn = zeros((NMatsubara, 2*FLAVORS*SPINS), dtype = float); # 2 for real and imag for f in range(FLAVORS): for s in range(SPINS): A = -1/pi * Gr[s, :, f*N_LAYERS].imag; for n in range(NMatsubara): tck_re = splrep(w, real(A / (1j*wn[n] - w))); tck_im = splrep(w, imag(A / (1j*wn[n] - w))); Giwn[n, 2*(SPINS*f+s)] = splint(w[0], w[-1], tck_re); Giwn[n, 2*(SPINS*f+s)+1] = splint(w[0], w[-1], tck_im); savetxt(parms['ID']+'.gmat.ref', c_[wn, Giwn]);
def run_solver(AvgDispersion, nf, w, it, parms, aWeiss, np=1, VCoulomb=None): ID = parms["ID"] N_LAYERS = int(parms["N_LAYERS"]) FLAVORS = int(parms["FLAVORS"]) SPINS = int(parms["SPINS"]) DATA_FILE = parms["DATA_FILE"] TMPH5FILE = "." + DATA_FILE + ".id" + str(ID) + ".i" + str(it) + ".solver_out.h5" if VCoulomb is None: VCoulomb = zeros(N_LAYERS) solver = solver_types.init_solver(parms, np) corr_id = system.getCorrIndex(parms) NCOR = int(parms["NCOR"]) NDMFT = 2 * len(system.getDMFTCorrIndex(parms)) # 2 for SPINS # check save point and initialize for new iteration try: tmph5 = h5py.File(TMPH5FILE, "r+") hyb_tau = tmph5["Hybtau"][:] hyb_mat = tmph5["Hybmat"][:] hyb_coefs = tmph5["hyb_asym_coeffs"][:].reshape(SPINS, -1, NCOR) except: try: tmph5.close() except: pass tmph5 = h5py.File(TMPH5FILE, "w") tmph5.create_dataset("L", (2,), dtype=int, data=array([it, 0])) # asymptotic coefficients, upto 3rd order for hyb hyb_coefs = zeros((SPINS, 3, NCOR), dtype=float) # electric chemical potential eMU = float(parms["MU"]) - VCoulomb for L in range(N_LAYERS): hyb_coefs[:, :, L::N_LAYERS] = get_asymp_hybmat( parms, nf[:, L::N_LAYERS], eMU[L], AvgDispersion[:, :, corr_id[L:NCOR:N_LAYERS]] ) # get practical hybmat, and hybtau Eav = AvgDispersion[:, 0, corr_id] hyb_mat = zeros((SPINS, int(parms["N_MAX_FREQ"]), NCOR), dtype=complex) hyb_tau = zeros((SPINS, int(parms["N_TAU"]) + 1, NCOR), dtype=float) for s in range(SPINS): for f in range(NCOR): hyb_mat[s, :, f] = w + eMU[f % N_LAYERS] - Eav[s, f] - aWeiss[s, :, f] tmp = cppext.IFT_mat2tau( hyb_mat[s, :, f].copy(), int(parms["N_TAU"]) + 1, float(parms["BETA"]), float(hyb_coefs[s, 0, f]), float(hyb_coefs[s, 1, f]), ) # set value >= 0 to be smaller than 0, the mean of left and right neighbors ind = nonzero(tmp >= 0)[0] for i in ind: lefti = righti = i while tmp[lefti] >= 0 and lefti > 0: lefti -= 1 while tmp[righti] >= 0 and righti < len(tmp) - 1: righti += 1 leftval = tmp[lefti] if tmp[lefti] < 0 else 0 rightval = tmp[righti] if tmp[righti] < 0 else 0 tmp[i] = (leftval + rightval) / 2.0 hyb_tau[s, :, f] = tmp tmph5.create_dataset("Hybmat", hyb_mat.shape, dtype=complex, data=hyb_mat) tmph5.create_dataset("Hybtau", hyb_tau.shape, dtype=float, data=hyb_tau) # initialize output dataset Gtau_shape = (int(parms["N_TAU"]) + 1, NDMFT) tmph5.create_dataset("Gtau", Gtau_shape, dtype=float, data=zeros(Gtau_shape, dtype=float)) tmph5.create_group("Observables") tmph5.create_dataset("hyb_asym_coeffs", hyb_coefs.flatten().shape, dtype=float, data=hyb_coefs.flatten()) # run hyb_data = [hyb_tau, hyb_mat, hyb_coefs] MEASURE_freq = True if "Gw" in tmph5 else False startL = tmph5["L"][1] sym_layers = getSymmetricLayers(tmph5, parms) for L in range(startL, N_LAYERS): print "Processing task ", ID, ": iteration ", it, ", layer ", L tmph5["L"][1] = L TMPFILE = "." + DATA_FILE + ".id" + str(ID) + ".i" + str(it) + ".L" + str(L) if float(parms["U"]) == 0: break if (sym_layers is None) or (L not in sym_layers[:, 1]): solver.prepare(TMPFILE, solver_input_data(parms, L, hyb_data, AvgDispersion, VCoulomb, nf)) tmph5.close() ret_val = solver.run() tmph5 = h5py.File(TMPH5FILE, "r+") if ret_val > 0: print "Not finish running impurity solver or problem occurs while running the solver." os.system("rm " + TMPFILE + ".*") tmph5.close() return None solver_out = solver.collect() if solver_out is None: tmph5.close() return None Gtau = solver_out[0] obs = solver_out[1] if len(solver_out) > 2: MEASURE_freq = True Giwn = solver_out[2] Siwn = solver_out[3] os.system("rm " + TMPFILE + ".*") elif L in sym_layers[:, 1]: # symmetric layer, no need to calculate sym_index = nonzero(sym_layers[:, 1] == L)[0] sym_L = sym_layers[sym_index, 0][0] print "L=%d is the symmetric layer of layer L=%d" % (L, sym_L) Gtau = tmph5["Gtau"][:, sym_L::N_LAYERS] obs = None if tmph5["Observables"].keys() != []: obs = dict() for k, v in tmph5["Observables/L" + str(sym_L)].iteritems(): obs[k] = v if MEASURE_freq: Giwn = tmph5["Gw"][:, sym_L::N_LAYERS] Siwn = tmph5["Sw"][:, sym_L::N_LAYERS] # this is the only place for AFM # only works for the 4-cell unitcell (GdFeO3 distortion) # G-type AFM: 0-3 are the same, 0-1 and 0-2 are opposite in spin # here I just swap values of opposite spins if int(val_def(parms, "AFM", 0) > 0) and (L in [1, 2]): print "AFM processing on this L" Ntmp = NDMFT / N_LAYERS # correlated bands per site mapid = zeros(Ntmp, dtype=int) mapid[0::2] = arange(1, Ntmp, 2) mapid[1::2] = arange(0, Ntmp, 2) Gtau = Gtau[:, mapid] if MEASURE_freq: Giwn = Giwn[:, mapid] Siwn = Siwn[:, mapid] if "nn" in obs: tmp = obs["nn"][:] nn = zeros((Ntmp, Ntmp), dtype=float) pos = 0 for i in range(Ntmp): for j in range(i + 1): nn[i, j] = nn[j, i] = tmp[pos] pos += 1 nn = nn[mapid] nn = nn[:, mapid] tmp = array([]) for i in range(Ntmp): for j in range(i + 1): tmp = r_[tmp, nn[i, j]] obs["nn"] = tmp tmph5["Gtau"][:, L::N_LAYERS] = Gtau if MEASURE_freq: if "Gw" not in tmph5: matsubara_shape = (len(Giwn), NDMFT) tmph5.create_dataset("Gw", matsubara_shape, dtype=complex, data=zeros(matsubara_shape, dtype=complex)) tmph5.create_dataset("Sw", matsubara_shape, dtype=complex, data=zeros(matsubara_shape, dtype=complex)) tmph5["Gw"][:, L::N_LAYERS] = Giwn tmph5["Sw"][:, L::N_LAYERS] = Siwn if obs is not None: new_group_str = "Observables/L" + str(L) tmph5.create_group(new_group_str) for k, v in obs.iteritems(): tmph5.create_dataset(new_group_str + "/" + k, v.shape, dtype=v.dtype, data=v) print "Finish iteration ", it, ", layer ", L, "\n" print "DONE: iteration %d\n" % it tmph5["L"][1] = N_LAYERS tmph5.close() return TMPH5FILE
def solver_post_process(parms, aWeiss, h5, tmph5filename): N_LAYERS = int(parms["N_LAYERS"]) FLAVORS = int(parms["FLAVORS"]) NCOR = int(parms["NCOR"]) SPINS = 2 # NOTE: for collecting all spins, symmetrize them later if neccessary if len(aWeiss) == 1: aWeiss = r_[aWeiss, aWeiss] # SPINS = 1 case dmft_id = system.getDMFTCorrIndex(parms) if not os.path.isfile(tmph5filename): print >> sys.stderr, "File %s not found" % tmph5filename return None tmph5 = h5py.File(tmph5filename, "r") if tmph5["L"][1] < N_LAYERS: print >> sys.stderr, "Unfinish solving the impurity model" return None # save data from temporary file h5solver = h5["SolverData"] it = tmph5["L"][0] MEASURE_freq = True if "Gw" in tmph5 else False for s in tmph5["Observables"]: new_group_str = "Observables/%d/%s" % (it, s) for k in tmph5["Observables/%s" % s]: v = tmph5["Observables/%s/%s" % (s, k)] try: h5solver.create_dataset(new_group_str + "/" + k, v.shape, dtype=v.dtype, data=v) except: h5solver[new_group_str + "/" + k][:] = v Gmat = zeros((SPINS, int(parms["N_MAX_FREQ"]), NCOR), dtype=complex) Smat = zeros((SPINS, int(parms["N_MAX_FREQ"]), NCOR), dtype=complex) Ntau = max(int(parms["N_TAU"]) / 20, 400) + 1 Htau = tmph5["Hybtau"][:, ::20, :] # the updated density: for DMFT bands, get from Gtau, for inert bands, get from Gavg of previous iteration nf = h5["log_density"][0 if int(val_def(parms, "FIXED_HARTREE", 0)) > 0 else it - 1, 4:].reshape(-1, NCOR + 1) if len(nf) == 1: nf = r_[nf, nf] nf = nf[:, :NCOR] nf[:, dmft_id] = -assign(tmph5["Gtau"][-1, :], N_LAYERS) # get raw Gmat and Smat for f in range(size(tmph5["Gtau"], 1)): g = cppext.FT_tau2mat(tmph5["Gtau"][:, f].copy(), float(parms["BETA"]), int(parms["N_MAX_FREQ"])) try: tmp = c_[tmp, g] except: tmp = g.copy() Gmat[:, :, dmft_id] = assign(tmp, N_LAYERS) Smat[:, :, dmft_id] = aWeiss[:, :, dmft_id] - 1 / Gmat[:, :, dmft_id] if MEASURE_freq: nfreq = size(tmph5["Gw"][:], 0) Gmat[:, :nfreq, dmft_id] = assign(tmph5["Gw"], N_LAYERS) Stmp = assign(tmph5["Sw"], N_LAYERS) # adjust self energy measured using improved estimator # with contribution from inertial d-bands for L in range(N_LAYERS): SE_inert = get_inert_band_HF(parms, nf[:, L::N_LAYERS]) Stmp[0, :, L::N_LAYERS] += SE_inert[0] Stmp[1, :, L::N_LAYERS] += SE_inert[1] Smat[:, :nfreq, dmft_id] = Stmp # symmetrize orbital and spin if necessary paraorb = [int(s) for s in val_def(parms, "PARAORBITAL", "").split()] if len(paraorb) == 1: if paraorb[0] > 0: if parms["DTYPE"] == "3bands": paraorb = [[0, 1, 2]] # t2g only HARD CODE else: paraorb = [[0, 3], [1, 2, 4]] # t2g and eg HARD CODE else: paraorb = [] if len(paraorb) > 0: if type(paraorb[0]) != list: paraorb = [paraorb] print "Symmetrize over orbital ", paraorb for L in range(N_LAYERS): for s in range(SPINS): for sym_bands in paraorb: gm = zeros(size(Gmat, 1), dtype=complex) sm = zeros(size(Smat, 1), dtype=complex) nf_tmp = 0.0 for f in sym_bands: gm += Gmat[s, :, L + f * N_LAYERS] sm += Smat[s, :, L + f * N_LAYERS] nf_tmp += nf[s, L + f * N_LAYERS] for f in sym_bands: Gmat[s, :, L + f * N_LAYERS] = gm / float(len(sym_bands)) Smat[s, :, L + f * N_LAYERS] = sm / float(len(sym_bands)) nf[s, L + f * N_LAYERS] = nf_tmp / float(len(sym_bands)) if int(parms["SPINS"]) == 1: print "Symmetrize over spins" Gmat = array([mean(Gmat, 0)]) Smat = array([mean(Smat, 0)]) nf = array([mean(nf, 0)]) # smooth Gmat and Smat SPINS = int(parms["SPINS"]) Smat = smooth_selfenergy(it, h5, Smat, nf) NCutoff = int(parms["N_CUTOFF"]) Gmat[:, NCutoff:, :] = 1.0 / (aWeiss[:SPINS, NCutoff:, :] - Smat[:, NCutoff:, :]) # calculate Gtau from Gmat (after symmtrization) Gtau = zeros((SPINS, Ntau, NCOR), dtype=float) S0 = zeros((SPINS, NCOR)) for L in range(N_LAYERS): S0[:, L::N_LAYERS] = get_asymp_selfenergy(parms, nf[:, L::N_LAYERS])[:, 0, :] for s in range(SPINS): for f in range(NCOR): if f not in dmft_id: Smat[s, :, f] = S0[s, f] Gmat[s, :, f] = 1.0 / (aWeiss[s, :, f] - Smat[s, :, f]) Gtau[s, :, f] = cppext.IFT_mat2tau(Gmat[s, :, f].copy(), Ntau, float(parms["BETA"]), 1.0, 0.0) Gtau[:, 0, :] = -(1.0 - nf) Gtau[:, -1, :] = -nf # saving data dT = 5 Nb2 = size(tmph5["Gtau"], 0) / 2 Gb2 = array([mean(tmph5["Gtau"][Nb2 - dT : Nb2 + dT, f], 0) for f in range(size(tmph5["Gtau"], 1))]) log_data(h5solver, "log_Gbeta2", it, Gb2.flatten(), data_type=float) log_data(h5solver, "log_nsolve", it, -tmph5["Gtau"][-1, :].flatten(), data_type=float) log_data(h5solver, "hyb_asym_coeffs", it, tmph5["hyb_asym_coeffs"][:].flatten(), data_type=float) save_data(h5solver, it, ("Gtau", "Hybtau", "Hybmat"), (Gtau, Htau, tmph5["Hybmat"][:])) tmph5.close() del tmph5 os.system("rm %s" % tmph5filename) return Gmat, Smat
Ntot = sum( [ N('%s%d'%(s,f),0) for s in spins for f in range(NCOR) ]); Sz = sum( [ N('%s%d'%(spins[0],f),0) - N('%s%d'%(spins[1],f),0) for f in range(NCOR) ]); Quantum_Numbers = { 'Ntot' : Ntot, 'Sztot' : Sz }; for f in range(NCOR): Quantum_Numbers['Sz2_%d'%f] = N('%s%d'%(spins[0],f),0) + N('%s%d'%(spins[1],f),0) - 2*N('%s%d'%(spins[0],f),0)*N('%s%d'%(spins[1],f),0) else: Quantum_Numbers = {}; for sp in spins: for f in range(NCOR): Quantum_Numbers['N%s%d'%(sp,f)] = N('%s%d'%(sp,f),0); solver_parms['quantum_numbers'] = Quantum_Numbers; solver_parms['use_segment_picture'] = int(parms['SPINFLIP']) == 0; solver_parms['H_local'] = H_Local; # create a solver object solver = Solver(beta = BETA, gf_struct = GFstruct, n_w = int(val_def(parms, 'N_MATSUBARA', len(hyb_mat)))); # Legendre or Time accumulation accumulation = val_def(parms, 'ACCUMULATION', 'time'); if accumulation not in ['time', 'legendre']: exit('ACCUMULATION should be either "time" or "legendre"'); if accumulation == 'time': solver_parms['time_accumulation'] = True; solver_parms['legendre_accumulation'] = False; solver_parms['fit_start'] = len(hyb_mat)-10; solver_parms['fit_stop'] = len(hyb_mat)-1; # I don't want to use the fitTails() elif accumulation == 'legendre': solver_parms['legendre_accumulation'] = True; solver_parms['n_legendre'] = int(val_def(parms, 'N_LEGENDRE', 50));