def shapefun_from_scoef(self, scoefpath, shapefun_path, atom2shapes, shapefun_new): """ Read shapefun and create impurity shapefun using scoef info and shapes array :param scoefpath: absolute path to scoef file :param shapefun_path: absolute path to input shapefun file :param shapes: shapes array for mapping between atom index and shapefunction index :param shapefun_new: absolute path to output shapefun file to which the new shapefunction will be written """ index1, index2, data = self._read_input(shapefun_path) order = list(range(len(index1))) natomtemp = int(open_general(scoefpath).readlines()[0]) filedata = open_general(scoefpath).readlines()[1:natomtemp + 1] listnew = [] for line in filedata: if (len(line.split()) > 1): listnew.append(atom2shapes[int(line.split()[3]) - 1] - 1) order = listnew datanew = [] for i in range(len(order)): for ii in range(index1[order[i]], index2[order[i]] + 1): datanew.append(data[ii]) # add header to shapefun_new tmp = datanew datanew = [] datanew.append(' %i\n' % (len(order))) datanew.append(' 1.000000000000E+00\n') datanew += tmp with open_general(shapefun_new, 'w') as f: f.writelines(datanew)
def test_open_general(self): path = '../tests/files/kkr/kkr_run_slab_nosoc/out_kkr' f = open_general(path) l1 = len(f.readlines()) f = open_general(f) l2 = len(f.readlines()) assert l1 == l2 assert l2 > 0
def kick_out_corestates(potfile, potfile_out, emin): """ Read potential file and kick out all core states that lie higher than emin. If no core state lies higher than emin then the output potential will be the same as the input potential :param potfile: input potential :param potfile_out: output potential where some core states are kicked out :param emin: minimal energy above which all core states are kicked out from potential :returns: number of lines that have been deleted """ from masci_tools.io.common_functions import get_corestates_from_potential from numpy import where, array # read core states nstates, energies, lmoments = get_corestates_from_potential(potfile) with open_general(potfile) as f: # read potential file txt = f.readlines() #get start of each potential part istarts = [ iline for iline in range(len(txt)) if 'POTENTIAL' in txt[iline] ] all_lines = list(range(len(txt))) # index array # change list of core states for ipot in range(len(nstates)): if nstates[ipot] > 0: m = where(energies[ipot] > emin) if len(m[0]) > 0: istart = istarts[ipot] # change number of core states in potential #print(txt[istart+6]) txt[istart + 6] = '%i 1\n' % (nstates[ipot] - len(m[0])) # now remove energy line accordingly for ie_out in m[0][::-1]: m_out = where( array(all_lines) == istart + 6 + ie_out + 1)[0][0] e_out = all_lines.pop(m_out) # find number of deleted lines num_deleted = len(txt) - len(all_lines) if num_deleted > 0: # write output potential with open_general(potfile_out, u'w') as f2: txt_new = [] for iline in all_lines: txt_new.append(str(txt[iline])) f2.writelines(txt_new) # return number of lines that were deleted return num_deleted
def _get_orbmom_per_atom(self, file, natom): """ Extract orbital moment for all atoms (orbmom_at: all atoms in last iteration, orbmom_at_all: all atoms in all iterations). For each atom there are six values: first -> x-component real part, second -> x-component imaginary part, third -> y-component real part, ... sixth -> z-component imaginary part. :param file: file that is parsed :param natom: number of atoms in impurity cluster :returns: orbmom_at (list), orbital moments for all atoms """ import numpy as np f = open_general(file) lines = f.readlines() startline = len(lines) - natom orbmom_at = [] for i in range(natom): tmp = lines[startline + i].split() orbmom_at.append([ tmp[1], tmp[3], tmp[5] ]) # [1,3,5] needed since full complex number is written orbmom_at = np.array(orbmom_at, dtype=float) # convert to float array # do the same for all iterations orbmom_at_all = [] for i in range(1, len(lines)): tmp = lines[i].split() orbmom_at_all.append([tmp[1], tmp[3], tmp[5]]) orbmom_at_all = np.array(orbmom_at_all, dtype=float) # convert to float array return orbmom_at, orbmom_at_all
def _extract_timings(self, outfile): """ Extract timings for the different parts in the KKRimp code :param outfile: timing file of the KKRimp run :returns: res (dict) timings in seconds, averaged over iterations """ f = open_general(outfile) tmptxt = f.readlines() f.close() search_keys = [ 'time until scf starts', 'vpot->tmat', 'gref->gmat', 'gonsite->density', 'energyloop', 'Iteration number', 'Total running time' ] res = {} for isearch in search_keys: tmpval = [] itmp = 0 while itmp >= 0: itmp = search_string(isearch, tmptxt) if itmp >= 0: tmpval.append(float(tmptxt.pop(itmp).split()[-1])) if len(tmpval) > 0: res[isearch] = tmpval # average over iterations niter = len(res.get(search_keys[-2], [])) if niter > 0: for key in search_keys[1:6]: if key in list(res.keys()): res[key] = sum(res[key]) / niter for key in [search_keys[0], search_keys[-1]]: if key in list(res.keys()): res[key] = res[key][0] return res
def get_cls_info(outfile): f = open_general(outfile) with f: # make sure the file is properly closed tmptxt = f.readlines() f.close() itmp = 0 Ncls = 0 Natom = 0 cls_all = [] results = [] while itmp >= 0: itmp = search_string('CLSGEN_TB: Atom', tmptxt) if itmp >= 0: tmpstr = tmptxt.pop(itmp) tmpstr = tmpstr.split() tmp = [ int(tmpstr[2]), int(tmpstr[4]), float(tmpstr[6]), int(tmpstr[8]), int(tmpstr[10]) ] results.append(tmp) if int(tmpstr[8]) not in cls_all: Ncls += 1 cls_all.append(int(tmpstr[8])) Natom += 1 return Ncls, Natom, results
def get_lattice_vectors(outfile_0init): """ read direct and reciprocal lattice vectors in internal units (useful for qdos generation) """ f = open_general(outfile_0init) tmptxt = f.readlines() f.close() vecs, rvecs = [], [] tmpvecs = [] for search_txt in ['a_1: ', 'a_2: ', 'a_3: ', 'b_1: ', 'b_2: ', 'b_3: ']: itmp = search_string(search_txt, tmptxt) if itmp >= 0: tmpvec = tmptxt[itmp].split(':')[1].split() tmpvecs.append( [float(tmpvec[0]), float(tmpvec[1]), float(tmpvec[1])]) if search_txt in ['a_3: ', 'b_3: '] and itmp < 0: # reset vecs for 2D case tmpvecs[0] = tmpvecs[0][:2] tmpvecs[1] = tmpvecs[1][:2] if search_txt == 'a_3: ': vecs = tmpvecs tmpvecs = [] elif search_txt == 'b_3: ': rvecs = tmpvecs return vecs, rvecs
def get_econt_info(outfile_0init): f = open_general(outfile_0init) tmptxt = f.readlines() f.close() itmp = search_string('E min', tmptxt) emin = float(tmptxt[itmp].split('min')[1].split('=')[1].split()[0]) itmp = search_string('Temperature', tmptxt) tempr = float( tmptxt[itmp].split('Temperature')[1].split('=')[1].split()[0]) itmp = search_string('Number of energy points', tmptxt) Nepts = int(tmptxt[itmp].split(':')[1].split()[0]) doscalc = search_string('Density-of-States calculation', tmptxt) if doscalc == -1: # npol itmp = search_string('poles =', tmptxt) Npol = int(tmptxt[itmp].split('=')[1].split()[0]) # npt1, npt2, npt3 itmp = search_string('contour:', tmptxt) tmp = tmptxt[itmp].replace(',', '').split(':')[1].split() N1 = int(tmp[2]) N2 = int(tmp[5]) N3 = int(tmp[8]) else: Npol, N1, N2, N3 = 0, 0, Nepts, 0 return emin, tempr, Nepts, Npol, N1, N2, N3
def extract_timings(outfile): f = open_general(outfile) tmptxt = f.readlines() f.close() itmp = 0 res = [] search_keys = [ 'main0', 'main1a - tbref', 'main1a ', # two spaces to differentiate from following key 'main1b - calctref13', 'main1b ', # two spaces! 'main1c - serial part', 'main1c ', # two spaces! 'main2', 'Time in Iteration' ] while itmp >= 0: tmpvals = [] for isearch in search_keys: itmp = search_string(isearch, tmptxt) if itmp >= 0: tmpval = [isearch, float(tmptxt.pop(itmp).split()[-1])] tmpvals.append(tmpval) if len(tmpvals) > 0: res.append(tmpvals) res = res[0] return dict(res)
def get_spinmom_per_atom(outfile, natom, nonco_out_file=None): """ Extract spin moment information from outfile and nonco_angles_out (if given) """ f = open_general(outfile) tmptxt = f.readlines() f.close() itmp = 0 result = [] while itmp >= 0: itmp = search_string('m_spin', tmptxt) if itmp >= 0: tmpline = tmptxt.pop(itmp) tmparray = [] for iatom in range(natom): tmpline = tmptxt.pop(itmp) tmparray.append(float(tmpline.split()[3])) result.append(tmparray) # if the file is there, i.e. NEWSOSOL is used, then extract also direction of spins (angles theta and phi) if nonco_out_file is not None and result != []: angles = loadtxt(nonco_out_file) if len(shape(angles)) == 1: angles = array([angles]) vec = angles_to_vec(result[-1], angles[:, 0], angles[:, 1]) else: vec, angles = [], [] return array(result), vec, angles
def get_symmetries(outfile_0init): f = open_general(outfile_0init) tmptxt = f.readlines() f.close() try: itmp = search_string('symmetries found for this lattice:', tmptxt) nsym = int(tmptxt[itmp].split(':')[1].split()[0]) except IndexError: itmp = search_string('< FINDGROUP > : Finding symmetry operations', tmptxt) tmptxt2 = tmptxt[itmp:] itmp = search_string('found for this lattice:', tmptxt2) nsym = int(tmptxt2[itmp].split(':')[1].split()[0]) itmp = search_string('symmetries will be used', tmptxt) nsym_used = int(tmptxt[itmp].split()[3]) itmp = search_string('<SYMTAUMAT>', tmptxt) tmpdict = {} for isym in range(nsym_used): tmpval = tmptxt[itmp + 5 + isym].replace('0-', '0 -').replace( '1-', '1 -').split() # bugfix for -120 degree euler angle desc = tmpval[1] inversion = int(tmpval[2]) euler = [float(tmpval[3]), float(tmpval[4]), float(tmpval[5])] unitary = int(tmpval[6].replace('T', '1').replace('F', '0')) tmpdict[desc] = { 'has_inversion': inversion, 'is_unitary': unitary, 'euler_angles': euler } desc = tmpdict return nsym, nsym_used, desc
def _get_orbmom_per_atom(self, file, natom): """ Extract orbital moment for all atoms (orbmom_at: all atoms in last iteration, orbmom_at_all: all atoms in all iterations). For each atom there are six values: first -> x-component real part, second -> x-component imaginary part, third -> y-component real part, ... sixth -> z-component imaginary part. :param file: file that is parsed :param natom: number of atoms in impurity cluster :returns: orbmom_at (list), orbital moments for all atoms """ import numpy as np f = open_general(file) lines = f.readlines() startline = len(lines) - natom orbmom_at = np.array([lines[startline].split()]) orbmom_at_all = np.array([lines[1].split()]) for i in range(1, natom): orbmom_at = np.append(orbmom_at, [lines[startline + i].split()], axis=0) for j in range(2, len(lines)): orbmom_at_all = np.append(orbmom_at_all, [lines[j].split()], axis=0) return orbmom_at, orbmom_at_all
def _get_spinmom_per_atom(self, file, natom): """ Extract spin moment for all atoms :param file: file that is parsed :param natom: number of atoms in impurity cluster :returns: spinmom_at (array of spin moments for all atoms and the last iteration), spinmom_at_all (array of spin moments for all atoms and iterations), spinmom_at_tot (total spinmoment for the last iteration) """ import numpy as np from math import sqrt f = open_general(file) lines = f.readlines() startline = len(lines) - natom spinmom_at = np.array([lines[startline].split()]) spinmom_at_all = np.array([lines[1].split()]) for i in range(1, natom): spinmom_at = np.append(spinmom_at, [lines[startline + i].split()], axis=0) for j in range(2, len(lines)): spinmom_at_all = np.append(spinmom_at_all, [lines[j].split()], axis=0) spinmom_at_tot = 0 for i in range(0, natom): spinmom_at_tot += sqrt( float(spinmom_at[i][0])**2 + float(spinmom_at[i][1])**2 + float(spinmom_at[i][2])**2) return spinmom_at, spinmom_at_all, spinmom_at_tot
def _get_econt_info(self, out_log): """ extract energy contour information from out_log file :param out_log: file that is parsed :retuns: econt (dict), dictionary containing the energy contour info :note: econt contains the following keys * 'emin', bottom of energy contour * 'Nepts', number of points in energy contour * 'epts', list of complex valued energy points * 'weights', list of complex valued weights for energy integration """ from masci_tools.io.common_functions import search_string from numpy import array f = open_general(out_log) tmptxt = f.readlines() f.close() econt = {} itmp = search_string('[read_energy] number of energy points', tmptxt) if itmp >= 0: econt['Nepts'] = int(tmptxt.pop(itmp).split()[-1]) itmp = search_string('energies and weights are:', tmptxt) if itmp >= 0: tmp = [] for ie in range(econt['Nepts']): tmpline = tmptxt[itmp + 4 + ie].split()[1:] tmp.append([ float(tmpline[0]), float(tmpline[1]), float(tmpline[2]), float(tmpline[3]) ]) tmp = array(tmp) econt['epts'] = tmp[:, :2] econt['weights'] = tmp[:, 2:] econt['emin'] = tmp[0, 0] return econt
def write_scoef(x_res, path): """ Sorts the data from find_neighbors with respect to the distance to the selected atom and writes the data correctly formatted into the file 'scoef'. Additionally the total number of atoms in the list is written out in the first line of the file. :param x_res: array of atoms within the cutoff radius obtained by find_neighbors (unsorted) :output: returns scoef file with the total number of atoms in the first line, then with the formatted positions, indices, charges and distances in the subsequent lines. """ #sort the data from x_res with respect to distance to the centered atom m = x_res[:, -1].argsort() x_res = x_res[m] #write data of x_res into the 'scoef'-file with open_general(path, 'w') as file: file.write(str("{0:4d}".format(len(x_res)))) file.write("\n") for i in range(len(x_res)): file.write(str("{0:26.19e}".format(x_res[i][0]))) file.write(" ") file.write(str("{0:26.19e}".format(x_res[i][1]))) file.write(" ") file.write(str("{0:26.19e}".format(x_res[i][2]))) file.write(" ") file.write(str("{0:4d}".format(int(x_res[i][3])))) file.write(" ") file.write(str("{0:4.1f}".format(x_res[i][4]))) file.write(" ") file.write(str("{0:26.19e}".format(x_res[i][5]))) file.write("\n")
def get_alat(inpfile): f = open_general(inpfile) with f: # make sure the file is properly closed txt = f.readlines() itmp = search_string('ALATBASIS', txt) result = float(txt[itmp].split('ALATBASIS')[1].split('=')[1].split()[0]) return result
def cleanup_outfiles(self, fileidentifier, keyslist): """open file and remove unneeded output""" lineids = [] with open_general(fileidentifier) as tfile: txt = tfile.readlines() for iline in range(len(txt)): for key in keyslist: # go through all keys if key in txt[ iline]: # add line id to list if key has been found lineids.append(iline) # rewrite file deleting the middle part if len(lineids) > 1: # cut only if more than one iteration was found txt = txt[:lineids[0]] + [ '# ... [removed output except for last iteration] ...\n' ] + txt[lineids[-1]:] with open_general(fileidentifier, 'w') as tfilenew: tfilenew.writelines(txt)
def get_alatinfo(outfile_0init): f = open_general(outfile_0init) tmptxt = f.readlines() f.close() itmp = search_string('Lattice constants :', tmptxt) alat = float(tmptxt[itmp].split(':')[1].split('=')[1].split()[0]) twopialat = float(tmptxt[itmp].split(':')[1].split('=')[2].split()[0]) return alat, twopialat
def startpot_jellium(outfile): f = open_general(outfile) with f: # make sure the file is properly closed tmptxt = f.readlines() itmp = search_string('JELLSTART POTENTIALS', tmptxt) if itmp == -1: return False else: return True
def get_shape_array(outfile, atominfo): f = open_general(outfile) with f: # make sure the file is properly closed txt = f.readlines() #naez/natyp number of items either one number (=ishape without cpa or two =[iatom, ishape] with CPA) # read in naez and/or natyp and then find ishape array (1..natyp[=naez without CPA]) itmp = search_string('NAEZ= ', txt) if itmp >= 0: tmp = txt[itmp] ipos = tmp.find('NAEZ=') naez = int(tmp[ipos + 5:].split()[0]) else: naez = -1 itmp = search_string('NATYP= ', txt) if itmp >= 0: tmp = txt[itmp] ipos = tmp.find('NATYP=') natyp = int(tmp[ipos + 6:].split()[0]) else: natyp = -1 # consistency check if naez == -1 and natyp > 0: naez = natyp elif natyp == -1 and naez > 0: natyp = naez elif natyp == -1 and naez == -1: raise ValueError('Neither NAEZ nor NATYP found in %s' % outfile) # read shape index from atominfo file f = open_general(atominfo) with f: # make sure the file is properly closed tmptxt = f.readlines() itmp = search_string('<SHAPE>', tmptxt) + 1 ishape = [] for iatom in range(natyp): txt = tmptxt[itmp + iatom] if natyp > naez: #CPA option ishape.append(int(txt.split()[1])) else: ishape.append(int(txt.split()[0])) return natyp, naez, ishape
def get_natom(outfile_0init): """ extract NATYP value from output.0.txt """ f = open_general(outfile_0init) tmptxt = f.readlines() f.close() itmp = search_string('NATYP', tmptxt) natom = int(tmptxt[itmp + 1].split()[0]) return natom
def get_nspin(outfile_0init): """ extract NSPIN value from output.0.txt """ f = open_general(outfile_0init) tmptxt = f.readlines() f.close() itmp = search_string('NSPIN', tmptxt) nspin = int(tmptxt[itmp + 1].split()[0]) return nspin
def get_valence_min(outfile='out_voronoi'): """Construct minimum of energy contour (between valence band bottom and core states)""" f = open_general(outfile) with f: # make sure the file is properly closed txt = f.readlines() searchstr = 'All other states are above' valence_minimum = np.array([ float(line.split(':')[1].split()[0]) for line in txt if searchstr in line ]) return valence_minimum
def _get_EF_potfile(self, potfile): """ Extract EF value from potential file :param potfile: file that is parsed :returns: EF (float), value of the Fermi energy in Ry """ f = open_general(potfile) tmptxt = f.readlines() f.close() EF = float(tmptxt[3].split()[1]) return EF
def get_scfinfo(outfile_0init, outfile_000, outfile): f = open_general(outfile_000) tmptxt = f.readlines() f.close() itmp = search_string('ITERATION :', tmptxt) tmpval = tmptxt[itmp].split(':')[1].split() niter = int(tmpval[0]) nitermax = int(tmpval[3]) f = open_general(outfile) tmptxt = f.readlines() f.close() itmp1 = search_string('SCF ITERATION CONVERGED', tmptxt) itmp2 = search_string('NUMBER OF SCF STEPS EXHAUSTED', tmptxt) if itmp1 >= 0: converged = True else: converged = False if itmp2 >= 0: nmax_reached = True else: nmax_reached = False f = open_general(outfile_0init) tmptxt = f.readlines() f.close() itmp = search_string('STRMIX FCM QBOUND', tmptxt) tmpval = tmptxt[itmp + 1].split() strmix = float(tmpval[0]) fcm = float(tmpval[1]) qbound = float(tmpval[2]) tmpval = tmptxt[itmp + 4].split() brymix = float(tmpval[0]) itmp = search_string('IMIX IGF ICC', tmptxt) imix = int(tmptxt[itmp + 1].split()[0]) idtbry = int(tmptxt[itmp + 4].split()[0]) mixinfo = [imix, strmix, qbound, fcm, idtbry, brymix] return niter, nitermax, converged, nmax_reached, mixinfo
def use_newsosol(outfile_0init): """ extract NEWSOSOL info from output.0.txt """ f = open_general(outfile_0init) tmptxt = f.readlines() f.close() itmp = search_string('NEWSOSOL', tmptxt) newsosol = False if itmp >= 0: newsosol = True return newsosol
def _get_nspin(self, file): """ Extract nspin from file :param file: file that is parsed :returns: 1 if calculation is paramagnetic, 2 otherwise """ f = open_general(file) tmptxt = f.readlines() f.close() itmp = search_string('NSPIN', tmptxt) nspin = int(tmptxt.pop(itmp).split()[-1]) return nspin
def _get_natom(self, file): """ Extract number of atoms in impurity cluster :param file: file that is parsed to find number of atoms :returns: natom (int), number of atoms in impurity cluster """ f = open_general(file) tmptxt = f.readlines() f.close() itmp = search_string('NATOM is', tmptxt) natom = int(tmptxt.pop(itmp).split()[-1]) return natom
def get_fpradius(naez, atominfo): f = open_general(atominfo) with f: # make sure the file is properly closed txt = f.readlines() itmp = search_string('<FPRADIUS>', txt) + 1 results = [] for iatom in range(naez): #ZAT LMXC KFG <CLS> <REFPOT> <NTC> FAC <IRNS> <RMTREF> <FPRADIUS> # 0.00 1 3 3 0 0 1 1 1 1. 199 2.3166000 0.4696902 tmpline = float(txt[itmp + iatom].split()[-1]) results.append(tmpline) return results
def get_radial_meshpoints(potfile): f = open_general(potfile) with f: # make sure the file is properly closed txt = f.readlines() itmp = 0 result = [] while itmp >= 0: itmp = search_string('exc:', txt) if itmp >= 0: txt.pop(itmp) # remove header line tmp = txt.pop(itmp + 3) # extract meshpoints result.append(float(tmp)) return result