def mergejobfiles(jobfiles, outputfile, checkids=False, ignorejobsidentifier=False): if type(jobfiles) == str: jobfiles = [jobfiles] root = lxml.Element("jobs") for jobfile in jobfiles: numberofjobs = 0 ignoredjobs = 0 smallroot = XmlParser(jobfile) for entry in smallroot.iter('job'): if ignorejobsidentifier != False: status = entry.find("status").text if status in ignorejobsidentifier: ignoredjobs += 1 continue numberofjobs += 1 if checkids: jobid = int(entry.find("id").text) for entry in root.iter('job'): jid = int(entry.find("id").text) if jobid == jid: print "Job {} from {} has the same ID as a job parsed before. Exiting..".format( jobid, jobfile) sys.exit() root.append(entry) print "Found {} jobs in file {}".format(numberofjobs, jobfile) if ignorejobsidentifier != False: if type(ignorejobsidentifier) == str: ignorejobsidentifier = [ignorejobsidentifier] print "which are not of type:{}".format( " ".join(ignorejobsidentifier)) print "Ignored jobs: {}".format(ignoredjobs) XmlWriter(root, outputfile)
def mergejobfiles(jobfiles,outputfile,checkids=False,ignorejobsidentifier=False): if type(jobfiles)==str: jobfiles=[jobfiles] root=lxml.Element("jobs") for jobfile in jobfiles: numberofjobs=0 ignoredjobs=0 smallroot=XmlParser(jobfile) for entry in smallroot.iter('job'): if ignorejobsidentifier!=False: status=entry.find("status").text if status in ignorejobsidentifier: ignoredjobs+=1 continue numberofjobs+=1 if checkids: jobid=int(entry.find("id").text) for entry in root.iter('job'): jid=int(entry.find("id").text) if jobid==jid: print "Job {} from {} has the same ID as a job parsed before. Exiting..".format(jobid,jobfile) sys.exit() root.append(entry) print "Found {} jobs in file {}".format(numberofjobs,jobfile) if ignorejobsidentifier!=False: if type(ignorejobsidentifier)==str: ignorejobsidentifier=[ignorejobsidentifier] print "which are not of type:{}".format(" ".join(ignorejobsidentifier)) print "Ignored jobs: {}".format(ignoredjobs) XmlWriter(root,outputfile)
def readexcitonxml_egwbse(filename): results = [] root = XmlParser(filename) for job in root.iter('job'): output = job.find("output") segment = output.find("segment") gwbse = segment.find("GWBSE") mol = readexcitonxml_molecule(gwbse) mol.setId(int(segment.get("id"))) mol.setName(segment.get("type")) results.append(mol) return results
def makefolder(self): #print self.path copyfromtemplate(self.foldername) root=XmlParser("{}/exciton.xml".format(self.foldername)) exciton=root.find("exciton") exciton.find("tasks").text="input" XmlWriter(root,"{}/exciton.xml".format(self.foldername)) with cd(self.foldername): sp.call("xtp_tools -e exciton -o exciton.xml > exciton.log",shell=True) self.modcomfile("system.com") exciton.find("tasks").text="dft,parse,gwbse" XmlWriter(root,"{}/exciton.xml".format(self.foldername))
def makefolder(self): #print self.path copyfromtemplate(self.foldername) root = XmlParser("{}/exciton.xml".format(self.foldername)) exciton = root.find("exciton") gwbseengine = exciton.find("gwbse_engine") gwbseengine.find("tasks").text = "input" XmlWriter(root, "{}/exciton.xml".format(self.foldername)) with cd(self.foldername): sp.call("xtp_tools -e exciton -o exciton.xml > exciton.log", shell=True) self.modcomfile("system.com") gwbseengine.find("tasks").text = "dft,parse,gwbse" XmlWriter(root, "{}/exciton.xml".format(self.foldername))
def readexcitoncoulingclassical(filename): root = XmlParser(filename) results = [] for pair in root: Coupling = pair[0] results.append(float(Coupling.get("jABstatic"))) return results
def readexcitoncouplingxml(filename, states): root = XmlParser(filename) resultlist = [] for pair in root: types = pair[0] couplings = [] for state in states: results = None if state[0] == "s": results = types.find("singlets") elif state[0] == "t": results = types.find("triplets") else: print "state not known" number = int(state[1:]) #print number for coupling in results: noA = int(coupling.get("excitonA")) noB = int(coupling.get("excitonB")) if noA + 1 == number and noB + 1 == number: couplings.append((float(coupling.text))) break resultlist.append(couplings) return resultlist
def splittjobfile(jobfile,jobfiles,ignorejobsidentifier=False): root=XmlParser(jobfile) numberofjobs=0 ignoredjobs=0 for entry in root.iter('job'): if ignorejobsidentifier!=False: status=entry.find("status").text if status in ignorejobsidentifier: root.remove(entry) ignoredjobs+=1 continue numberofjobs+=1 print "Found {} jobs in in file {}".format(numberofjobs,jobfile) if ignorejobsidentifier!=False: if type(ignorejobsidentifier)==str: ignorejobsidentifier=[ignorejobsidentifier] print "which are not of type:{}".format(" ".join(ignorejobsidentifier)) print "Ignored jobs: {}".format(ignoredjobs) numberoffiles=len(jobfiles) jobsperfile=numberofjobs/numberoffiles remaining=numberofjobs%numberoffiles jobsperfilelist=[] roots=[] for i in range(numberoffiles): roots.append(lxml.Element("jobs")) if remaining>0: jobsperfilelist.append(jobsperfile+1) remaining-=1 else: jobsperfilelist.append(jobsperfile) i=0 j=1 for entry in root.iter('job'): if j>jobsperfilelist[i]: i+=1 j=1 j+=1 roots[i].append(entry) print "Split up jobfile into {} files with".format(numberoffiles) print "with: {} jobs each".format(" ".join(map(str,jobsperfilelist))) for outputfile,root in zip(jobfiles,roots): XmlWriter(root,outputfile)
def resetjobfile(jobfile,assigned=True,failed=True): root=XmlParser(jobfile) piece="" if assigned and failed: piece="ASSIGNED and FAILED" elif assigned: piece="ASSIGNED" elif failed: piece="FAILED" else: piece="No Entry" print "Resetting for file {}: {} to AVAILABLE".format(jobfile,piece) for entry in root.iter('job'): status=entry.find("status").text if assigned==True and status=="ASSIGNED": entry.find("status").text="AVAILABLE" elif failed==True and status=="FAILED": entry.find("status").text="AVAILABLE" XmlWriter(root,jobfile)
def infojobfile(jobfile): complete=0 available=0 assigned=0 failed=0 root=XmlParser(jobfile) for entry in root.iter('job'): status=entry.find("status").text if status=="ASSIGNED": assigned+=1 elif status=="AVAILABLE": available+=1 elif status=="COMPLETE": complete+=1 elif status=="FAILED": failed+=1 else: jobid=entry.find("id").text print "WARNING: Job status {} for job id:{} in file {} not known".format(status,jobid,jobfile) total=complete+available+assigned+failed return total,complete,available,assigned,failed
def resetjobfile(jobfile, assigned=True, failed=True, complete=False): root = XmlParser(jobfile) piece = "" if assigned: piece += "ASSIGNED " if failed: piece += "FAILED " if complete: piece += "COMPLETE " if piece == "": piece = "No Entry" print "Resetting for file {}: {} to AVAILABLE".format(jobfile, piece) for entry in root.iter('job'): status = entry.find("status").text if assigned == True and status == "ASSIGNED": entry.find("status").text = "AVAILABLE" elif failed == True and status == "FAILED": entry.find("status").text = "AVAILABLE" elif complete == True and status == "COMPLETE": entry.find("status").text = "AVAILABLE" XmlWriter(root, jobfile)
def infojobfile(jobfile): complete = 0 available = 0 assigned = 0 failed = 0 root = XmlParser(jobfile) for entry in root.iter('job'): status = entry.find("status").text if status == "ASSIGNED": assigned += 1 elif status == "AVAILABLE": available += 1 elif status == "COMPLETE": complete += 1 elif status == "FAILED": failed += 1 else: jobid = entry.find("id").text print "WARNING: Job status {} for job id:{} in file {} not known".format( status, jobid, jobfile) total = complete + available + assigned + failed return total, complete, available, assigned, failed
def readcouplingxml(filename): root = XmlParser(filename) Je = [] Jh = [] for pair in root: homoA = int(pair.get("homoA")) homoB = int(pair.get("homoB")) for overlap in pair: orbA = int(overlap.get("orbA")) orbB = int(overlap.get("orbB")) if orbA == homoA and orbB == homoB: Je.append((float(overlap.text))) elif orbA == homoA + 1 and orbB == homoB + 1: Jh.append((float(overlap.text))) return [Je, Jh]
def splittjobfile(jobfile, jobfiles, ignorejobsidentifier=False): root = XmlParser(jobfile) numberofjobs = 0 ignoredjobs = 0 for entry in root.iter('job'): if ignorejobsidentifier != False: status = entry.find("status").text if status in ignorejobsidentifier: root.remove(entry) ignoredjobs += 1 continue numberofjobs += 1 print "Found {} jobs in in file {}".format(numberofjobs, jobfile) if ignorejobsidentifier != False: if type(ignorejobsidentifier) == str: ignorejobsidentifier = [ignorejobsidentifier] print "which are not of type:{}".format(" ".join(ignorejobsidentifier)) print "Ignored jobs: {}".format(ignoredjobs) numberoffiles = len(jobfiles) jobsperfile = numberofjobs / numberoffiles remaining = numberofjobs % numberoffiles jobsperfilelist = [] roots = [] for i in range(numberoffiles): roots.append(lxml.Element("jobs")) if remaining > 0: jobsperfilelist.append(jobsperfile + 1) remaining -= 1 else: jobsperfilelist.append(jobsperfile) i = 0 j = 1 for entry in root.iter('job'): if j > jobsperfilelist[i]: i += 1 j = 1 j += 1 roots[i].append(entry) print "Split up jobfile into {} files with".format(numberoffiles) print "with: {} jobs each".format(" ".join(map(str, jobsperfilelist))) for outputfile, root in zip(jobfiles, roots): XmlWriter(root, outputfile)
def readexcitonxml(filename): root = XmlParser(filename) return readexcitonxml_molecule(root)
required=True, help="Name of jobfile") parser.add_argument("--min", type=int, default=-14, help="Minimum log10(J2) to still count") args = parser.parse_args() if type(args.jobfiles) == str: args.jobfiles = [args.jobfiles] for i, jobfile in enumerate(args.jobfiles): job = [] toosmall = 0 print "Reading in {}".format(jobfile) root = XmlParser(jobfile) for entry in root.iter('job'): status = entry.find("status").text if status == "COMPLETE": coupling = entry.find("output")[0][0].get("jABstatic") j2 = float(coupling)**2 #if j2>10**args.min: job.append(j2) #else: #toosmall+=1 job = np.array(job) if i == 0: total = job else: total += job
parser.add_argument("--state",type=str,help="State to give tag to, only required for ewald") parser.add_argument("--format",type=str,required=True,help="Format string into which the id is placed e.g. Molecule_{}_n2s1.mps") parser.add_argument("--id",nargs="+",type=int,default=[1, 2],help="only rewrite first or second segment of pair,iexcitoncl") parser.add_argument("--compare", action='store_const', const=1, default=0,help="Only replace mps files where segtype can be found in formatstring") args=parser.parse_args() filetype= os.path.splitext(args.jobfile)[1][1:] if filetype=="jobs": root=XmlParser(args.jobfile) print "read in " for inputs in root.iterfind('.//input'): for i,segment in enumerate(inputs): if i+1 not in args.id: continue if args.compare: segtype=segment.get('type') if segtype not in args.format: continue segid=segment.get("id") mpsfile=os.path.join(args.path,(args.format).format(segid)) segment.set('mps_file',mpsfile) print "writing" with open(args.jobfile, 'w') as f: f.write(lxml.tostring(root, pretty_print=True))
nargs="+", type=int, default=[1, 2], help="only rewrite first or second segment of pair,iexcitoncl") parser.add_argument( "--compare", action='store_const', const=1, default=0, help="Only replace mps files where segtype can be found in formatstring") args = parser.parse_args() filetype = os.path.splitext(args.jobfile)[1][1:] if filetype == "jobs": root = XmlParser(args.jobfile) print "read in " for inputs in root.iterfind('.//input'): for i, segment in enumerate(inputs): if i + 1 not in args.id: continue if args.compare: segtype = segment.get('type') if segtype not in args.format: continue segid = segment.get("id") mpsfile = os.path.join(args.path, (args.format).format(segid)) segment.set('mps_file', mpsfile) print "writing" with open(args.jobfile, 'w') as f: f.write(lxml.tostring(root, pretty_print=True))
parser.add_argument("--run", action='store_const', const=1, default=0, help="Run jobs") parser.add_argument("--read", action='store_const', const=1, default=0, help="Readout outputfiles") args = parser.parse_args() BohrtoAngstroem = 0.5291772109 b2a3 = BohrtoAngstroem**3 root = XmlParser(args.options) h = float(root.find("fieldstrength").text) tags = (root.find("tags").text).split() if h < 10E-5: print "Aborting. Field strength is too small" sys.exit() def copyfromtemplate(path): base = os.path.realpath('.') #print base template = os.path.join(base, "TEMPLATE") #print template
from __tools__ import MyParser from __tools__ import XmlParser from __tools__ import XmlWriter import lxml.etree as lxml parser = MyParser(description="Delete Entries from Jobfile") parser.add_argument("--jobfile", "-j", type=str, required=True, help="jobfile") parser.add_argument("--output", "-o", type=str, default="joboutput.xml", help="jobfile") parser.add_argument("--exclude", type=str, nargs="+", help="Tags to exclude from jobfile e,h,n,s,t") args = parser.parse_args() print "Removing states with tags including: {} ".format(" ".join(args.exclude)) root = XmlParser(args.jobfile) for entry in root.iter('job'): status = entry.find("tag").text for eex in args.exclude: if eex in status: root.remove(entry) break print "Writing to {}".format(args.output) XmlWriter(root, args.output)
parser=MyParser(description="Environment to split a jobfile into many and submit to cluster") parser.add_argument("--options","-o",type=str,required=True,help="optionfile") parser.add_argument("--submit",action='store_const', const=1, default=0,help="Submit to cluster") parser.add_argument("--setup",action='store_const', const=1, default=0,help="Setup") parser.add_argument("--merge",action='store_const', const=1, default=0,help="Merge jobfiles") parser.add_argument("--info",action='store_const', const=1, default=0,help="Display info about each jobfile") parser.add_argument("--reset",type=str, nargs="+",default=False,help="Reset FAILED and or ASSIGNED to AVAILABLE") parser.add_argument("--exclude",type=int, nargs="+", default=False,help="Exclude certain jobs from action,give the numbers of the jobs") parser.add_argument("--include",type=int, nargs="+", default=False,help="Limit action to only the jobs, give the numbers of the jobs") args=parser.parse_args() if args.exclude!=False and args.include!=False: print "ERROR: Excluding and Including at the same time does not work. Choose different options!" sys.exit() root=XmlParser(args.options) queue=root.find("queue").text procs=int(root.find("procs").text) tag=root.find("tag").text jobfile=root.find("jobfile").text calculator=root.find("calculator").text optionfile=root.find("optfile").text sql=root.find("sqlfile").text threads=int(root.find("threads").text) cache=int(root.find("cache").text) rsync=(root.find("rsync").text) numberofjobs=int(root.find("clusterjobs").text) workdir=root.find("workdir").text
nargs="+", default=False, help="Exclude certain jobs from action,give the numbers of the jobs") parser.add_argument( "--include", type=int, nargs="+", default=False, help="Limit action to only the jobs, give the numbers of the jobs") args = parser.parse_args() if args.exclude != False and args.include != False: print "ERROR: Excluding and Including at the same time does not work. Choose different options!" sys.exit() root = XmlParser(args.options) queue = root.find("queue").text procs = int(root.find("procs").text) tag = root.find("tag").text jobfile = root.find("jobfile").text calculator = root.find("calculator").text optionfile = root.find("optfile").text sql = root.find("sqlfile").text threads = int(root.find("threads").text) cache = int(root.find("cache").text) rsync = (root.find("rsync").text) numberofjobs = int(root.find("clusterjobs").text) workdir = root.find("workdir").text modules = root.find("modules").text source = root.find("source").text
from __tools__ import RepresentsInt from __exciton__ import readexcitonlogfile parser=MyParser(description="Enviroment to do numerical polarisation calculations with gwbse and gaussian") parser.add_argument("--template","-t",type=str,required=True,help="Folder, from which to take votca-optionfiles from") parser.add_argument("--options","-o",type=str,required=True,help="optionfile") parser.add_argument("--setup", action='store_const', const=1, default=0,help="Setup folders") parser.add_argument("--run", action='store_const', const=1, default=0,help="Run jobs") parser.add_argument("--read", action='store_const', const=1, default=0,help="Readout outputfiles") args=parser.parse_args() BohrtoAngstroem=0.5291772109 b2a3=BohrtoAngstroem**3 root=XmlParser(args.options) h=float(root.find("fieldstrength").text) tags=(root.find("tags").text).split() if h< 10E-5: print "Aborting. Field strength is too small" sys.exit() def copyfromtemplate(path): base=os.path.realpath('.') #print base template=os.path.join(base, "TEMPLATE")