def send(self): Dir = os.getcwd() nbjobsSub = 0 gplist = self.para.gridpacklist lhedir = self.para.lhe_dir gpdir = self.para.gp_dir gptotest = '%s/%s.tar.gz' % (gpdir, self.process) if ut.file_exist(gptotest) == False: print 'Gridpack=======', gptotest, '======= does not exist' sys.exit(3) try: gplist[self.process] except KeyError, e: print 'process %s does not exist as gridpack, exit' % self.process sys.exit(3)
def makeyaml(outdir, uid): if not ut.dir_exist(outdir): os.system("mkdir %s" % outdir) if outdir[-1] != '/': outdir += '/' outfile = '%sevents_%s.yaml' % (outdir, uid) if ut.file_exist(outfile): return False data = { 'processing': { 'status': 'sending', 'timestamp': ut.gettimestamp(), } } with open(outfile, 'w') as outyaml: yaml.dump(data, outyaml, default_flow_style=False) return True
def run(self,yamlcheck): #ldir=[x[0] for x in os.walk(self.indir)] ldir=next(os.walk(self.indir))[1] for l in ldir: process=l mergefile=self.indir+'/'+l+'/merge.yaml' if not ut.file_exist(mergefile): continue print '-------------- process ',process tmpf=None with open(mergefile, 'r') as stream: try: tmpf = yaml.load(stream) except yaml.YAMLError as exc: print(exc) events_tot=tmpf['merge']['nevents'] size_tot=tmpf['merge']['size']/1000000000. bad_tot=tmpf['merge']['nbad'] files_tot=tmpf['merge']['ndone'] sumw_tot=0 proc = process.replace('mgp8_','mg_') news=str(proc) proc=str(proc) ispythiaonly=False try: teststring=self.para.gridpacklist[proc][0] except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) except ValueError: print "Could not convert data to an integer." except KeyError, e: print 'I got a KeyError 1 - reason "%s"' % str(e) ssplit=proc.split('_') stest='' ntest=1 if '_HT_' in proc: ntest=4 for proc in xrange(0,len(ssplit)-ntest): stest+=ssplit[proc]+'_' stest= stest[0:len(stest)-1] proc=stest try: teststringdecay=self.para.decaylist[stest][0] except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) except ValueError: print "Could not convert data to an integer." except KeyError, e: print 'I got a KeyError 2 - reason "%s"' % str(e) try: teststringpythia=self.para.pythialist[news][0] ispythiaonly=True except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) except ValueError: print "Could not convert data to an integer." except KeyError, e: print 'I got a KeyError 3 - reason "%s"' % str(e)
class send_lhep8(): #__________________________________________________________ def __init__(self, njobs, events, process, islsf, iscondor, queue, para, version, decay): self.njobs = njobs self.events = -1 self.process = process self.islsf = islsf self.iscondor = iscondor self.queue = queue self.para = para self.version = version self.decay = decay self.user = os.environ['USER'] #__________________________________________________________ def send(self, force): Dir = os.getcwd() gplist = self.para.gridpacklist outdir = '%s%s/' % (self.para.delphes_dir, self.version) try: gplist[self.process] except KeyError, e: print 'process %s does not exist as gridpack' % self.process sys.exit(3) delphescards_mmr = '%s%s/%s' % (self.para.delphescards_dir, self.version, self.para.delphescard_mmr) if ut.file_exist( delphescards_mmr ) == False and self.version != 'cms' and 'helhc' not in self.version: print 'delphes card does not exist: ', delphescards_mmr sys.exit(3) delphescards_mr = '%s%s/%s' % (self.para.delphescards_dir, self.version, self.para.delphescard_mr) if ut.file_exist( delphescards_mr ) == False and self.version != 'cms' and 'helhc' not in self.version: print 'delphes card does not exist: ', delphescards_mr sys.exit(3) delphescards_base = '%s%s/%s' % (self.para.delphescards_dir, self.version, self.para.delphescard_base) if ut.file_exist(delphescards_base) == False: print 'delphes card does not exist: ', delphescards_base sys.exit(3) fccconfig = '%s%s' % (self.para.fccconfig_dir, self.para.fccconfig) if ut.file_exist(fccconfig) == False: print 'fcc config file does not exist: ', fccconfig sys.exit(3) print '======================================', self.process pythiacard = '%s%s.cmd' % ( self.para.pythiacards_dir, self.process.replace( 'mg_pp', 'p8_pp').replace('mg_gg', 'p8_gg')) if self.decay != '': pythiacard = '%s%s_%s.cmd' % ( self.para.pythiacards_dir, self.process.replace('mg_pp', 'p8_pp').replace( 'mg_gg', 'p8_gg'), self.decay) if ut.file_exist(pythiacard) == False and not force: print 'pythia card does not exist: ', pythiacard timeout = 60 print "do you want to use the default pythia card [y/n] (60sec to reply)" rlist, _, _ = select([sys.stdin], [], [], timeout) if rlist: s = sys.stdin.readline() if s == "y\n": print 'use default card' pythiacard = '%sp8_pp_default.cmd' % ( self.para.pythiacards_dir) else: print 'exit' sys.exit(3) else: print "timeout, use default card" pythiacard = '%sp8_pp_default.cmd' % ( self.para.pythiacards_dir) elif ut.file_exist(pythiacard) == False and force: print "force argument, use default card" pythiacard = '%sp8_pp_default.cmd' % (self.para.pythiacards_dir) pr_noht = '' if '_HT_' in self.process: ssplit = self.process.split('_') stest = '' for s in xrange(0, len(ssplit) - 3): stest += ssplit[s] + '_' pr_noht = stest[0:len(stest) - 1] #check that the specified decay exists if self.process in self.para.decaylist and self.decay != '' and '_HT_' not in self.process: if self.decay not in self.para.decaylist[self.process]: print 'decay ==%s== does not exist for process ==%s==' % ( self.decay, self.process) sys.exit(3) #check that the specified decay exists if pr_noht in self.para.decaylist and self.decay != '' and '_HT_' in self.process: if self.decay not in self.para.decaylist[pr_noht]: print 'decay ==%s== does not exist for process ==%s==' % ( self.decay, self.process) sys.exit(3) pr_decay = self.process if self.decay != '': pr_decay = self.process + '_' + self.decay print '====', pr_decay, '====' processp8 = pr_decay.replace('mg_pp', 'mgp8_pp').replace('mg_gg', 'mgp8_gg') acctype = 'FCC' if 'HELHC' in self.para.module_name: acctype = 'HELHC' logdir = Dir + "/BatchOutputs/%s/%s/%s/" % (acctype, self.version, processp8) if not ut.dir_exist(logdir): os.system("mkdir -p %s" % logdir) yamldir = '%s/%s/%s' % (self.para.yamldir, self.version, processp8) if not ut.dir_exist(yamldir): os.system("mkdir -p %s" % yamldir) yamllhedir = '%s/lhe/%s' % (self.para.yamldir, self.process) All_files = glob.glob("%s/events_*.yaml" % yamllhedir) if len(All_files) == 0: print 'there is no LHE files checked for process %s exit' % self.process sys.exit(3) if len(All_files) < self.njobs: print 'only %i LHE file exists, will not run all the jobs requested' % len( All_files) nbjobsSub = 0 ntmp = 0 if self.islsf == False and self.iscondor == False: print "Submit issue : LSF nor CONDOR flag defined !!!" sys.exit(3) condor_file_str = '' for i in xrange(len(All_files)): if nbjobsSub == self.njobs: break tmpf = None with open(All_files[i], 'r') as stream: try: tmpf = yaml.load(stream) if ut.getsize(All_files[i]) == 0: continue if tmpf['processing']['status'] != 'DONE': continue except yaml.YAMLError as exc: print(exc) jobid = tmpf['processing']['jobid'] myyaml = my.makeyaml(yamldir, jobid) if not myyaml: print 'job %s already exists' % jobid continue outfile = '%s/%s/events_%s.root' % (outdir, processp8, jobid) if ut.file_exist(outfile): print 'outfile already exist, continue ', outfile frunname = 'job%s.sh' % (jobid) frunfull = '%s/%s' % (logdir, frunname) frun = None try: frun = open(frunfull, 'w') except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) time.sleep(10) frun = open(frunfull, 'w') commands.getstatusoutput('chmod 777 %s' % (frunfull)) frun.write('#!/bin/bash\n') frun.write('unset LD_LIBRARY_PATH\n') frun.write('unset PYTHONHOME\n') frun.write('unset PYTHONPATH\n') frun.write('source %s\n' % (self.para.stack)) frun.write('mkdir job%s_%s\n' % (jobid, processp8)) frun.write('cd job%s_%s\n' % (jobid, processp8)) frun.write('export EOS_MGM_URL=\"root://eospublic.cern.ch\"\n') frun.write('mkdir -p %s%s/%s\n' % (self.para.delphes_dir, self.version, processp8)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (tmpf['processing']['out'])) frun.write('gunzip -c %s > events.lhe\n' % tmpf['processing']['out'].split('/')[-1]) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (delphescards_base)) if 'fcc' in self.version: frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (delphescards_mmr)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (delphescards_mr)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s config.py \n' % (fccconfig)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s card.cmd\n' % (pythiacard)) frun.write('echo "Beams:LHEF = events.lhe" >> card.cmd\n') frun.write('echo "Random:seed = %s" >> card.cmd\n' % jobid) if 'helhc' in self.version: frun.write('echo " Beams:eCM = 27000." >> card.cmd\n') frun.write( '%s/run fccrun.py config.py --delphescard=card.tcl --inputfile=card.cmd --outputfile=events_%s.root --nevents=%i\n' % (self.para.fccsw, jobid, self.events)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py events_%s.root %s\n' % (jobid, outfile)) frun.write('cd ..\n') frun.write('rm -rf job%s_%s\n' % (jobid, processp8)) frun.close() if self.islsf == True: cmdBatch = "bsub -M 3000000 -R \"pool=40000\" -q %s -o %s -cwd %s %s" % ( self.queue, logdir + '/job%s/' % (jobid), logdir + '/job%s/' % (jobid), frunfull) batchid = -1 job, batchid = ut.SubmitToLsf( cmdBatch, 10, "%i/%i" % (nbjobsSub, self.njobs)) nbjobsSub += job elif self.iscondor == True: condor_file_str += frunfull + " " nbjobsSub += 1 if self.iscondor == True: # clean string condor_file_str = condor_file_str.replace("//", "/") # frunname_condor = 'job_desc_lhep8.cfg' frunfull_condor = '%s/%s' % (logdir, frunname_condor) frun_condor = None try: frun_condor = open(frunfull_condor, 'w') except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) time.sleep(10) frun_condor = open(frunfull_condor, 'w') commands.getstatusoutput('chmod 777 %s' % frunfull_condor) # frun_condor.write('executable = $(filename)\n') frun_condor.write( 'Log = %s/condor_job.%s.$(ClusterId).$(ProcId).log\n' % (logdir, str(jobid))) frun_condor.write( 'Output = %s/condor_job.%s.$(ClusterId).$(ProcId).out\n' % (logdir, str(jobid))) frun_condor.write( 'Error = %s/condor_job.%s.$(ClusterId).$(ProcId).error\n' % (logdir, str(jobid))) frun_condor.write('getenv = True\n') frun_condor.write('environment = "LS_SUBCWD=%s"\n' % logdir) # not sure frun_condor.write('request_memory = 4G\n') # frun_condor.write('requirements = ( (OpSysAndVer =?= "CentOS7") && (Machine =!= LastRemoteHost) )\n') frun_condor.write( 'requirements = ( (OpSysAndVer =?= "SLCern6") && (Machine =!= LastRemoteHost) )\n' ) frun_condor.write( 'on_exit_remove = (ExitBySignal == False) && (ExitCode == 0)\n' ) frun_condor.write('max_retries = 3\n') frun_condor.write('+JobFlavour = "%s"\n' % self.queue) frun_condor.write('+AccountingGroup = "group_u_FCC.local_gen"\n') frun_condor.write('queue filename matching files %s\n' % condor_file_str) frun_condor.close() # nbjobsSub = 0 cmdBatch = "condor_submit %s" % frunfull_condor print cmdBatch job = ut.SubmitToCondor(cmdBatch, 10, "%i/%i" % (nbjobsSub, self.njobs)) nbjobsSub += job print 'succesfully sent %i job(s)' % nbjobsSub
class send_lhe(): #__________________________________________________________ def __init__(self, njobs, events, process, islsf, queue, para): self.njobs = njobs self.events = events self.process = process self.islsf = islsf self.queue = queue self.user = os.environ['USER'] self.para = para #__________________________________________________________ def send(self): Dir = os.getcwd() nbjobsSub = 0 gplist = self.para.gridpacklist lhedir = self.para.lhe_dir gpdir = self.para.gp_dir gptotest = '%s/%s.tar.gz' % (gpdir, self.process) if ut.file_exist(gptotest) == False: print 'Gridpack=======', gptotest, '======= does not exist' sys.exit(3) try: gplist[self.process] except KeyError, e: print 'process %s does not exist as gridpack, exit' % self.process sys.exit(3) acctype = 'FCC' if 'HELHC' in self.para.module_name: acctype = 'HELHC' logdir = Dir + "/BatchOutputs/%s/lhe/%s" % (acctype, self.process) if not ut.dir_exist(logdir): os.system("mkdir -p %s" % logdir) yamldir = '%s/lhe/%s' % (self.para.yamldir, self.process) if not ut.dir_exist(yamldir): os.system("mkdir -p %s" % yamldir) while nbjobsSub < self.njobs: #uid = int(ut.getuid(self.user)) uid = ut.getuid2(self.user) myyaml = my.makeyaml(yamldir, uid) if not myyaml: print 'job %s already exists' % uid continue if ut.file_exist('%s/%s/events_%s.lhe.gz' % (lhedir, self.process, uid)): print 'already exist, continue' continue frunname = 'job%s.sh' % (uid) frunfull = '%s/%s' % (logdir, frunname) frun = None try: frun = open(frunfull, 'w') except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) time.sleep(10) frun = open(frunfull, 'w') commands.getstatusoutput('chmod 777 %s' % frunfull) frun.write('unset LD_LIBRARY_PATH\n') frun.write('unset PYTHONHOME\n') frun.write('unset PYTHONPATH\n') frun.write('mkdir job%s_%s\n' % (uid, self.process)) frun.write('cd job%s_%s\n' % (uid, self.process)) frun.write('export EOS_MGM_URL=\"root://eospublic.cern.ch\"\n') frun.write('source %s\n' % (self.para.stack)) frun.write('mkdir %s\n' % (lhedir)) frun.write('mkdir %s%s\n' % (lhedir, self.process)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s/%s.tar.gz .\n' % (gpdir, self.process)) frun.write('tar -zxf %s.tar.gz\n' % self.process) frun.write('cd process/\n') frun.write('./run.sh %i %i\n' % (self.events, int(uid))) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py events.lhe.gz %s/%s/events_%s.lhe.gz\n' % (lhedir, self.process, uid)) frun.write('cd ..\n') frun.write('rm -rf job%s_%s\n' % (uid, self.process)) cmdBatch = "bsub -M 2000000 -R \"rusage[pool=2000]\" -q %s -o %s -cwd %s %s" % ( self.queue, logdir + '/job%s/' % (uid), logdir + '/job%s/' % (uid), logdir + '/' + frunname) #print cmdBatch batchid = -1 job, batchid = ut.SubmitToLsf(cmdBatch, 10, "%i/%i" % (nbjobsSub, self.njobs)) nbjobsSub += job print 'succesfully sent %i jobs' % nbjobsSub
class send_p8(): #__________________________________________________________ def __init__(self, njobs, events, process, islsf, queue, para, version): self.njobs = njobs self.events = events self.process = process self.islsf = islsf self.queue = queue self.user = os.environ['USER'] self.para = para self.version = version #__________________________________________________________ def send(self): Dir = os.getcwd() nbjobsSub = 0 p8list = self.para.pythialist outdir = '%s%s/' % (self.para.delphes_dir, self.version) try: p8list[self.process] except KeyError, e: print 'process %s does not exist, exit' % self.process sys.exit(3) acctype = 'FCC' if 'HELHC' in self.para.module_name: acctype = 'HELHC' logdir = Dir + "/BatchOutputs/%s/%s/%s/" % (acctype, self.version, self.process) if not ut.dir_exist(logdir): os.system("mkdir -p %s" % logdir) yamldir = '%s/%s/%s' % (self.para.yamldir, self.version, self.process) if not ut.dir_exist(yamldir): os.system("mkdir -p %s" % yamldir) delphescards_mmr = '%s%s/%s' % (self.para.delphescards_dir, self.version, self.para.delphescard_mmr) if ut.file_exist( delphescards_mmr ) == False and self.version != 'cms' and 'helhc' not in self.version: print 'delphes card does not exist: ', delphescards_mmr, ' , exit' sys.exit(3) delphescards_mr = '%s%s/%s' % (self.para.delphescards_dir, self.version, self.para.delphescard_mr) if ut.file_exist( delphescards_mr ) == False and self.version != 'cms' and 'helhc' not in self.version: print 'delphes card does not exist: ', delphescards_mr, ' , exit' sys.exit(3) delphescards_base = '%s%s/%s' % (self.para.delphescards_dir, self.version, self.para.delphescard_base) if ut.file_exist(delphescards_base) == False: print 'delphes card does not exist: ', delphescards_base, ' , exit' sys.exit(3) fccconfig = '%s%s' % (self.para.fccconfig_dir, self.para.fccconfig) if ut.file_exist(fccconfig) == False: print 'fcc config file does not exist: ', fccconfig, ' , exit' sys.exit(3) print '======================================', self.process pythiacard = '%s%s.cmd' % (self.para.pythiacards_dir, self.process) if ut.file_exist(pythiacard) == False: print 'pythia card does not exist: ', pythiacard, ' , exit' sys.exit(3) while nbjobsSub < self.njobs: uid = ut.getuid2(self.user) myyaml = my.makeyaml(yamldir, uid) if not myyaml: print 'job %s already exists' % uid continue outfile = '%s/%s/events_%s.root' % (outdir, self.process, uid) if ut.file_exist(outfile): print 'file %s already exist, continue' % outfile continue frunname = 'job%s.sh' % (uid) frunfull = '%s/%s' % (logdir, frunname) frun = None try: frun = open(frunfull, 'w') except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) time.sleep(10) frun = open(frunfull, 'w') commands.getstatusoutput('chmod 777 %s' % (frunfull)) frun.write('#!/bin/bash\n') frun.write('unset LD_LIBRARY_PATH\n') frun.write('unset PYTHONHOME\n') frun.write('unset PYTHONPATH\n') frun.write('source %s\n' % (self.para.stack)) frun.write('mkdir job%s_%s\n' % (uid, self.process)) frun.write('cd job%s_%s\n' % (uid, self.process)) frun.write('export EOS_MGM_URL=\"root://eospublic.cern.ch\"\n') frun.write('mkdir -p %s/%s\n' % (outdir, self.process)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (delphescards_base)) if 'fcc' in self.version: frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (delphescards_mmr)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (delphescards_mr)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s config.py \n' % (fccconfig)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s card.cmd\n' % (pythiacard)) frun.write('echo "" >> card.cmd\n') frun.write('echo "Random:seed = %s" >> card.cmd\n' % uid) if 'helhc' in self.version: frun.write('echo " Beams:eCM = 27000." >> card.cmd\n') frun.write( '%s/run fccrun.py config.py --delphescard=card.tcl --inputfile=card.cmd --outputfile=events%s.root --nevents=%i\n' % (self.para.fccsw, uid, self.events)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py events%s.root %s\n' % (uid, outfile)) frun.write('cd ..\n') frun.write('rm -rf job%s_%s\n' % (uid, self.process)) cmdBatch = "bsub -M 2000000 -R \"pool=20000\" -q %s -o %s -cwd %s %s" % ( self.queue, logdir + '/job%s/' % (uid), logdir + '/job%s/' % (uid), frunfull) batchid = -1 job, batchid = ut.SubmitToLsf(cmdBatch, 10, "%i/%i" % (nbjobsSub, self.njobs)) nbjobsSub += job print 'succesfully sent %i jobs' % nbjobsSub
def check(self, force, statfile): #ldir=[x[0] for x in os.walk(self.indir)] ldir = next(os.walk(self.indir))[1] if not ut.testeos(self.para.eostest, self.para.eostest_size): print 'eos seems to have problems, should check, will exit' sys.exit(3) for l in ldir: if self.process != '' and self.process != l: continue #continue if process has been checked if l == 'BADPYTHIA' or l == 'lhe' or l == "__restored_files__" or l == "backup": continue print '%s/%s/check' % (self.yamldir, l) if not ut.file_exist('%s/%s/check' % (self.yamldir, l)) and not force: continue print '--------------------- ', l process = l All_files = glob.glob("%s/%s/events_*%s" % (self.indir, l, self.fext)) print 'number of files ', len(All_files) if len(All_files) == 0: continue print 'process from the input directory ', process outdir = self.makeyamldir(self.yamldir + process) hasbeenchecked = False nevents_tot = 0 njobsdone_tot = 0 njobsbad_tot = 0 for f in All_files: self.count = 0 if not os.path.isfile(f): print 'file does not exists... %s' % f continue jobid = f.split('_')[-1] jobid = jobid.replace(self.fext, '') userid = ut.find_owner(f) outfile = '%sevents_%s.yaml' % (outdir, jobid) if ut.getsize(outfile) == 0: cmd = "rm %s" % (outfile) print 'file size 0, remove and continue ', cmd os.system(cmd) continue if ut.file_exist( outfile) and ut.getsize(outfile) > 100 and not force: doc = None with open(outfile) as ftmp: try: doc = yaml.load(ftmp) except yaml.YAMLError as exc: print(exc) except IOError as exc: print "I/O error({0}): {1}".format( exc.errno, exc.strerror) print "outfile ", outfile try: if doc != None: value = doc['processing']['status'] if value == 'DONE': continue except KeyError, e: print 'status %s does not exist' % str(e) hasbeenchecked = True print '-----------', f if '.root' in self.fext: nevts, check = self.checkFile_root(f, self.para.treename) status = 'DONE' if not check: status = 'BAD' if status == 'DONE': nevents_tot += nevts njobsdone_tot += 1 else: njobsbad_tot += 1 dic = { 'processing': { 'process': process, 'jobid': jobid, 'nevents': nevts, 'status': status, 'out': f, 'size': os.path.getsize(f), 'user': userid } } try: with open(outfile, 'w') as outyaml: yaml.dump(dic, outyaml, default_flow_style=False) continue except IOError as exc: print "I/O error({0}): {1}".format( exc.errno, exc.strerror) print "outfile ", outfile time.sleep(10) with open(outfile, 'w') as outyaml: yaml.dump(dic, outyaml, default_flow_style=False) continue elif '.lhe.gz' in self.fext: nevts, check = self.checkFile_lhe(f) while nevts == -1 and not check: nevts, check = self.checkFile_lhe(f) if self.count == 10: print 'can not copy or unzip the file, declare it wrong' break status = 'DONE' if not check: status = 'BAD' if status == 'DONE': nevents_tot += nevts njobsdone_tot += 1 else: njobsbad_tot += 1 dic = { 'processing': { 'process': process, 'jobid': jobid, 'nevents': nevts, 'status': status, 'out': f, 'size': os.path.getsize(f), 'user': userid } } with open(outfile, 'w') as outyaml: yaml.dump(dic, outyaml, default_flow_style=False) continue else: print 'not correct file extension %s' % self.fext if hasbeenchecked: cmdp = '<pre>date=%s \t time=%s njobs=%i \t nevents=%i \t njobbad=%i \t process=%s </pre>\n' % ( ut.getdate_str(), ut.gettime_str(), njobsdone_tot, nevents_tot, njobsbad_tot, process) stat_exist = ut.file_exist(statfile) with open(statfile, "a") as myfile: if not stat_exist: myfile.write( '<link href="/afs/cern.ch/user/h/helsens/www/style/txtstyle.css" rel="stylesheet" type="text/css" />\n' ) myfile.write( '<style type="text/css"> /*<![CDATA[*/ .espace{ margin-left:3em } .espace2{ margin-top:9em } /*]]>*/ </style>\n' ) myfile.write(cmdp) print 'date=%s time=%s njobs=%i nevents=%i njobbad=%i process=%s' % ( ut.getdate_str(), ut.gettime_str(), njobsdone_tot, nevents_tot, njobsbad_tot, process)
def check(self, para): #ldir=[x[0] for x in os.walk(self.indir)] ldir = next(os.walk(self.indireos))[1] if not ut.testeos(para.eostest, para.eostest_size): print 'eos seems to have problems, should check, will exit' sys.exit(3) dic = {} for l in ldir: if self.process != '' and self.process != l: continue #continue if process has been checked if l == 'BADPYTHIA' or l == 'lhe' or l == "__restored_files__" or l == "backup": continue print '--------------------- ', l proc = l nfileseos = 0 if os.path.isdir('%s/%s' % (self.indireos, proc)): listeos = [ x for x in os.listdir('%s/%s' % (self.indireos, proc)) if 'events' in x ] nfileseos = len(listeos) if nfileseos == 0: continue nfilesmerged = 0 mergefile = self.indirafs + '/' + l + '/merge.yaml' if not ut.file_exist(mergefile): if not ut.dir_exist('%s/%s' % (self.indirafs, proc)): os.system('mkdir %s/%s' % (self.indirafs, proc)) self.touch('%s/%s/check' % (self.indirafs, proc)) continue if not os.path.isdir(self.indirafs): os.system('mkdir %s' % self.indirafs) tmpf = None with open(mergefile, 'r') as stream: try: tmpf = yaml.load(stream) except yaml.YAMLError as exc: print(exc) bad_tot = tmpf['merge']['nbad'] files_tot = tmpf['merge']['ndone'] ntot_files = bad_tot + files_tot print "tot files ", ntot_files, " files eos ", nfileseos dic[proc] = {'neos': nfileseos, 'nmerged': ntot_files} print '%s/%s/check' % (self.indirafs, proc) if ntot_files < nfileseos: self.touch('%s/%s/check' % (self.indirafs, proc)) elif ntot_files > nfileseos: os.system('rm %s/%s/events*.yaml' % (self.indirafs, proc)) os.system('rm %s/%s/merge.yaml' % (self.indirafs, proc)) else: if ut.file_exist('%s/%s/check' % (self.indirafs, proc)): os.system('rm %s/%s/check' % (self.indirafs, proc)) outfile = self.indirafs + '/files.yaml' with open(outfile, 'w') as outyaml: yaml.dump(dic, outyaml, default_flow_style=False)
def makelist(self): yamldir_lhe = self.para.yamldir + 'lhe/' yamldir_reco = self.para.yamldir + self.version + '/' nmatched = 0 nlhe = 0 # write header for heppy file procDict = open('tmp.json', 'w') procDict.write('{\n') # write header for heppy file heppyFile = open(self.heppyList, 'w') heppyFile.write('import heppy.framework.config as cfg\n') heppyFile.write('\n') # parse param file with open(self.para.module_name) as f: infile = f.readlines() ldir = next(os.walk(yamldir_reco))[1] for l in ldir: processhad = None process = l yaml_reco = yamldir_reco + '/' + l + '/merge.yaml' if not ut.file_exist(yaml_reco): print 'no merged yaml for process %s continue' % l continue print '' print '------ ', process, '-------------' print '' if 'mgp8_' in process: processhad = process.replace('mgp8_', 'mg_') else: processhad = process # maybe this was a decayed process, so it cannot be found as such in in the param file br = 1.0 decay = '' for dec in self.para.branching_ratios: dec_proc = processhad.split('_')[-1] if dec in processhad and dec_proc == dec: br = self.para.branching_ratios[dec] decay = dec if decay != '': print 'decay---------- ' decstr = '_{}'.format(decay) proc_param = processhad.replace(decstr, '') print '-------------- ', decstr, ' -- ', proc_param try: xsec = float(self.para.gridpacklist[proc_param][3]) * br kf = float(self.para.gridpacklist[proc_param][4]) matchingEff = self.addEntry(process, yamldir_lhe, yaml_reco, xsec, kf, heppyFile, procDict, proc_param) except KeyError: print 'process {} does not exist in the list'.format( process) elif process in self.para.pythialist: xsec = float(self.para.pythialist[process][3]) kf = float(self.para.pythialist[process][4]) matchingEff = self.addEntryPythia(process, xsec, kf, yaml_reco, heppyFile, procDict) elif processhad not in self.para.gridpacklist: print 'process :', processhad, 'not found in %s --> skipping process' % self.para.module_name continue else: print 'self.para.gridpacklist[processhad][3] ', self.para.gridpacklist[ processhad][3] print 'self.para.gridpacklist[processhad][4] ', self.para.gridpacklist[ processhad][4] xsec = float(self.para.gridpacklist[processhad][3]) kf = float(self.para.gridpacklist[processhad][4]) matchingEff = self.addEntry(process, yamldir_lhe, yaml_reco, xsec, kf, heppyFile, procDict) # parse new param file with open(self.para.module_name) as f: lines = f.readlines() isgp = False for line in xrange(len(lines)): if 'gridpacklist' in str(lines[line]): isgp = True if isgp == False: continue if process == lines[line].rsplit(':', 1)[0].replace( "'", ""): ll = ast.literal_eval(lines[line].rsplit( ':', 1)[1][:-2]) print 'll ', ll infile[ line] = "'{}':['{}','{}','{}','{}','{}','{}'],\n".format( process, ll[0], ll[1], ll[2], ll[3], ll[4], matchingEff) with open("tmp.py", "w") as f1: f1.writelines(infile) procDict.close() # parse param file # strip last comma with open('tmp.json', 'r') as myfile: data = myfile.read() newdata = data[:-2] # close header for heppy file procDict = open(self.procList, 'w') procDict.write(newdata) procDict.write('\n') procDict.write('}\n') # replace existing param.py file os.system("mv tmp.py %s" % self.para.module_name) os.system("rm tmp.json")
class send_lhep8(): #__________________________________________________________ def __init__(self, njobs, events, process, islsf, queue, para, version, decay): self.njobs = njobs self.events = -1 self.process = process self.islsf = islsf self.queue = queue self.para = para self.version = version self.decay = decay self.user = os.environ['USER'] #__________________________________________________________ def send(self): Dir = os.getcwd() gplist = self.para.gridpacklist outdir = '%s%s/' % (self.para.delphes_dir, self.version) try: gplist[self.process] except KeyError, e: print 'process %s does not exist as gridpack' % self.process sys.exit(3) delphescards_mmr = '%s%s/%s' % (self.para.delphescards_dir, self.version, self.para.delphescard_mmr) if ut.file_exist( delphescards_mmr ) == False and self.version != 'cms' and 'helhc' not in self.version: print 'delphes card does not exist: ', delphescards_mmr sys.exit(3) delphescards_mr = '%s%s/%s' % (self.para.delphescards_dir, self.version, self.para.delphescard_mr) if ut.file_exist( delphescards_mr ) == False and self.version != 'cms' and 'helhc' not in self.version: print 'delphes card does not exist: ', delphescards_mr sys.exit(3) delphescards_base = '%s%s/%s' % (self.para.delphescards_dir, self.version, self.para.delphescard_base) if ut.file_exist(delphescards_base) == False: print 'delphes card does not exist: ', delphescards_base sys.exit(3) fccconfig = '%s%s' % (self.para.fccconfig_dir, self.para.fccconfig) if ut.file_exist(fccconfig) == False: print 'fcc config file does not exist: ', fccconfig sys.exit(3) print '======================================', self.process pythiacard = '%s%s.cmd' % ( self.para.pythiacards_dir, self.process.replace( 'mg_pp', 'p8_pp').replace('mg_gg', 'p8_gg')) if self.decay != '': pythiacard = '%s%s_%s.cmd' % ( self.para.pythiacards_dir, self.process.replace('mg_pp', 'p8_pp').replace( 'mg_gg', 'p8_gg'), self.decay) if ut.file_exist(pythiacard) == False: print 'pythia card does not exist: ', pythiacard timeout = 60 print "do you want to use the default pythia card [y/n] (60sec to reply)" rlist, _, _ = select([sys.stdin], [], [], timeout) if rlist: s = sys.stdin.readline() if s == "y\n": print 'use default card' pythiacard = '%sp8_pp_default.cmd' % ( self.para.pythiacards_dir) else: print 'exit' sys.exit(3) else: print "timeout, use default card" pythiacard = '%sp8_pp_default.cmd' % ( self.para.pythiacards_dir) pr_noht = '' if '_HT_' in self.process: ssplit = self.process.split('_') stest = '' for s in xrange(0, len(ssplit) - 3): stest += ssplit[s] + '_' pr_noht = stest[0:len(stest) - 1] #check that the specified decay exists if self.process in self.para.decaylist and self.decay != '' and '_HT_' not in self.process: if self.decay not in self.para.decaylist[self.process]: print 'decay ==%s== does not exist for process ==%s==' % ( self.decay, self.process) sys.exit(3) #check that the specified decay exists if pr_noht in self.para.decaylist and self.decay != '' and '_HT_' in self.process: if self.decay not in self.para.decaylist[pr_noht]: print 'decay ==%s== does not exist for process ==%s==' % ( self.decay, self.process) sys.exit(3) pr_decay = self.process if self.decay != '': pr_decay = self.process + '_' + self.decay print '====', pr_decay, '====' processp8 = pr_decay.replace('mg_pp', 'mgp8_pp').replace('mg_gg', 'mgp8_gg') logdir = Dir + "/BatchOutputs/%s/%s/" % (self.version, processp8) if not ut.dir_exist(logdir): os.system("mkdir -p %s" % logdir) yamldir = '%s/%s/%s' % (self.para.yamldir, self.version, processp8) if not ut.dir_exist(yamldir): os.system("mkdir -p %s" % yamldir) yamllhedir = '%s/lhe/%s' % (self.para.yamldir, self.process) All_files = glob.glob("%s/events_*.yaml" % yamllhedir) if len(All_files) == 0: print 'there is no LHE files checked for process %s exit' % self.process sys.exit(3) if len(All_files) < self.njobs: print 'only %i LHE file exists, will not run all the jobs requested' % len( All_files) nbjobsSub = 0 ntmp = 0 for i in xrange(len(All_files)): if nbjobsSub == self.njobs: break tmpf = None with open(All_files[i], 'r') as stream: try: tmpf = yaml.load(stream) if tmpf['processing']['status'] != 'DONE': continue except yaml.YAMLError as exc: print(exc) jobid = tmpf['processing']['jobid'] myyaml = my.makeyaml(yamldir, jobid) if not myyaml: print 'job %s already exists' % jobid continue outfile = '%s/%s/events_%s.root' % (outdir, processp8, jobid) if ut.file_exist(outfile): print 'outfile already exist, continue ', outfile frunname = 'job%s.sh' % (jobid) frunfull = '%s/%s' % (logdir, frunname) frun = None try: frun = open(frunfull, 'w') except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) time.sleep(10) frun = open(frunfull, 'w') commands.getstatusoutput('chmod 777 %s' % (frunfull)) frun.write('#!/bin/bash\n') frun.write('unset LD_LIBRARY_PATH\n') frun.write('unset PYTHONHOME\n') frun.write('unset PYTHONPATH\n') frun.write('source %s\n' % (self.para.stack)) frun.write('mkdir job%s_%s\n' % (jobid, processp8)) frun.write('cd job%s_%s\n' % (jobid, processp8)) frun.write('export EOS_MGM_URL=\"root://eospublic.cern.ch\"\n') frun.write('mkdir -p %s%s/%s\n' % (self.para.delphes_dir, self.version, processp8)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (tmpf['processing']['out'])) frun.write('gunzip -c %s > events.lhe\n' % tmpf['processing']['out'].split('/')[-1]) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (delphescards_base)) if 'fcc' in self.version: frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (delphescards_mmr)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (delphescards_mr)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s config.py \n' % (fccconfig)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s card.cmd\n' % (pythiacard)) frun.write('echo "Beams:LHEF = events.lhe" >> card.cmd\n') if 'helhc' in self.version: frun.write('echo " Beams:eCM = 27000." >> card.cmd\n') frun.write( '%s/run fccrun.py config.py --delphescard=card.tcl --inputfile=card.cmd --outputfile=events_%s.root --nevents=%i\n' % (self.para.fccsw, jobid, self.events)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py events_%s.root %s\n' % (jobid, outfile)) frun.write('cd ..\n') frun.write('rm -rf job%s_%s\n' % (jobid, processp8)) cmdBatch = "bsub -M 2000000 -R \"rusage[pool=2000]\" -q %s -cwd%s %s" % ( self.queue, logdir, frunfull) batchid = -1 job, batchid = ut.SubmitToLsf(cmdBatch, 10) nbjobsSub += job print 'succesfully sent %i jobs' % nbjobsSub
def send(self): Dir = os.getcwd() nbjobsSub = 0 # output dir outdir = self.para.lhe_dir logdir = Dir + "/BatchOutputs/lhe/%s" % (self.procname) if not ut.dir_exist(logdir): os.system("mkdir -p %s" % logdir) yamldir = '%s/lhe/%s' % (self.para.yamldir, self.procname) if not ut.dir_exist(yamldir): os.system("mkdir -p %s" % yamldir) outdir = os.path.abspath(outdir) mg5card = os.path.abspath(self.mg5card) cuts = os.path.abspath(self.cutfile) model = os.path.abspath(self.model) jobsdir = './BatchOutputs/lhe/' + self.procname if not os.path.exists(jobsdir): os.makedirs(jobsdir) os.makedirs(jobsdir + '/std/') os.makedirs(jobsdir + '/cfg/') while nbjobsSub < self.njobs: #uid = int(ut.getuid(self.user)) uid = ut.getuid2(self.user) myyaml = my.makeyaml(yamldir, uid) if not myyaml: print 'job %s already exists' % uid continue if ut.file_exist('%s/%s/events_%s.lhe.gz' % (outdir, self.procname, uid)): print 'already exist, continue' continue print 'Submitting job ' + str(nbjobsSub) + ' out of ' + str( self.njobs) seed = str(uid) basename = self.procname + '_' + seed cwd = os.getcwd() script = cwd + '/bin/submitMG.sh ' cmdBatch = 'bsub -o ' + jobsdir + '/std/' + basename + '.out -e ' + jobsdir + '/std/' + basename + '.err -q ' + self.queue cmdBatch += ' -R "rusage[mem={}:pool={}]"'.format( self.memory, self.disk) cmdBatch += ' -J ' + basename + ' "' + script + mg5card + ' ' + self.procname + ' ' + outdir + ' ' + seed + ' ' + str( self.nev) + ' ' + cuts + ' ' + model + '"' print cmdBatch batchid = -1 job, batchid = ut.SubmitToLsf(cmdBatch, 10) nbjobsSub += job print 'succesfully sent %i jobs' % nbjobsSub
def check(self, force, statfile): #ldir=[x[0] for x in os.walk(self.indir)] ldir = next(os.walk(self.indir))[1] if not ut.testeos(self.para.eostest, self.para.eostest_size): print 'eos seems to have problems, should check, will exit' sys.exit(3) for l in ldir: if self.process != '' and self.process != l: continue #continue if process has been checked if ut.yamlcheck(self.yamlcheck, l) and not force: continue print '--------------------- ', l process = l All_files = glob.glob("%s/%s/events_*%s" % (self.indir, l, self.fext)) print 'number of files ', len(All_files) if len(All_files) == 0: continue if l == 'lhe' or l == "__restored_files__": continue print 'process from the input directory ', process outdir = self.makeyamldir(self.yamldir + process) hasbeenchecked = False nevents_tot = 0 njobsdone_tot = 0 njobsbad_tot = 0 for f in All_files: self.count = 0 if not os.path.isfile(f): print 'file does not exists... %s' % f continue jobid = f.split('_')[-1] jobid = jobid.replace(self.fext, '') userid = ut.find_owner(f) outfile = '%sevents_%s.yaml' % (outdir, jobid) if ut.file_exist( outfile) and ut.getsize(outfile) > 100 and not force: continue hasbeenchecked = True print '-----------', f if '.root' in self.fext: nevts, check = self.checkFile_root(f, self.para.treename) status = 'DONE' if not check: status = 'BAD' if status == 'DONE': nevents_tot += nevts njobsdone_tot += 1 else: njobsbad_tot += 1 dic = { 'processing': { 'process': process, 'jobid': jobid, 'nevents': nevts, 'status': status, 'out': f, 'size': os.path.getsize(f), 'user': userid } } with open(outfile, 'w') as outyaml: yaml.dump(dic, outyaml, default_flow_style=False) continue elif '.lhe.gz' in self.fext: nevts, check = self.checkFile_lhe(f) while nevts == -1 and not check: nevts, check = self.checkFile_lhe(f) if self.count == 10: print 'can not copy or unzip the file, declare it wrong' break status = 'DONE' if not check: status = 'BAD' if status == 'DONE': nevents_tot += nevts njobsdone_tot += 1 else: njobsbad_tot += 1 dic = { 'processing': { 'process': process, 'jobid': jobid, 'nevents': nevts, 'status': status, 'out': f, 'size': os.path.getsize(f), 'user': userid } } with open(outfile, 'w') as outyaml: yaml.dump(dic, outyaml, default_flow_style=False) continue else: print 'not correct file extension %s' % self.fext if hasbeenchecked: ut.yamlstatus(self.yamlcheck, process, False) cmdp = 'date=%s <span class="espace"/> time=%s <span class="espace"/> njobs=%i <span class="espace"/> nevents=%i <span class="espace"/> njobbad=%i <span class="espace"/> process=%s <br>\n' % ( ut.getdate_str(), ut.gettime_str(), njobsdone_tot, nevents_tot, njobsbad_tot, process) stat_exist = ut.file_exist(statfile) with open(statfile, "a") as myfile: if not stat_exist: myfile.write( '<link href="/afs/cern.ch/user/h/helsens/www/style/txtstyle.css" rel="stylesheet" type="text/css" />\n' ) myfile.write( '<style type="text/css"> /*<![CDATA[*/ .espace{ margin-left:3em } .espace2{ margin-top:9em } /*]]>*/ </style>\n' ) myfile.write(cmdp) print 'date=%s time=%s njobs=%i nevents=%i njobbad=%i process=%s' % ( ut.getdate_str(), ut.gettime_str(), njobsdone_tot, nevents_tot, njobsbad_tot, process)
def send(self): Dir = os.getcwd() nbjobsSub = 0 # output dir outdir = self.para.lhe_dir acctype = 'FCC' if 'HELHC' in self.para.module_name: acctype = 'HELHC' elif 'FCCee' in self.para.module_name: acctype = 'FCCee' logdir = Dir + "/BatchOutputs/%s/lhe/%s" % (acctype, self.procname) if not ut.dir_exist(logdir): os.system("mkdir -p %s" % logdir) yamldir = '%s/lhe/%s' % (self.para.yamldir, self.procname) if not ut.dir_exist(yamldir): os.system("mkdir -p %s" % yamldir) outdir = os.path.abspath(outdir) mg5card = os.path.abspath(self.mg5card) cuts = os.path.abspath(self.cutfile) model = os.path.abspath(self.model) jobsdir = './BatchOutputs/%s/lhe/%s/' % (acctype, self.procname) if not os.path.exists(jobsdir): os.makedirs(jobsdir) os.makedirs(jobsdir + '/std/') os.makedirs(jobsdir + '/cfg/') if self.islsf == False and self.iscondor == False: print "Submit issue : LSF nor CONDOR flag defined !!!" sys.exit(3) condor_file_params_str = [] while nbjobsSub < self.njobs: #uid = int(ut.getuid(self.user)) uid = ut.getuid2(self.user) myyaml = my.makeyaml(yamldir, uid) if not myyaml: print 'job %s already exists' % uid continue if ut.file_exist('%s/%s/events_%s.lhe.gz' % (outdir, self.procname, uid)): print 'already exist, continue' continue print 'Submitting job ' + str(nbjobsSub) + ' out of ' + str( self.njobs) seed = str(uid) basename = self.procname + '_' + seed cwd = os.getcwd() script = cwd + '/bin/submitMG.sh ' if self.islsf == True: cmdBatch = 'bsub -o ' + jobsdir + '/std/' + basename + '.out -e ' + jobsdir + '/std/' + basename + '.err -q ' + self.queue cmdBatch += ' -R "rusage[mem={}:pool={}]"'.format( self.memory, self.disk) cmdBatch += ' -J ' + basename + ' "' + script + mg5card + ' ' + self.procname + ' ' + outdir + ' ' + seed + ' ' + str( self.nev) + ' ' + cuts + ' ' + model + '"' print cmdBatch batchid = -1 job, batchid = ut.SubmitToLsf(cmdBatch, 10, 1) nbjobsSub += job elif self.iscondor == True: condor_file_params_str.append(mg5card + ' ' + self.procname + ' ' + outdir + ' ' + seed + ' ' + str(self.nev) + ' ' + cuts + ' ' + model) nbjobsSub += 1 if self.iscondor == True: # parameter file fparamname_condor = 'job_params_mglhe.txt' fparamfull_condor = '%s/%s' % (logdir, fparamname_condor) fparam_condor = None try: fparam_condor = open(fparamfull_condor, 'w') except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) time.sleep(10) fparam_condor = open(fparamfull_condor, 'w') for line in condor_file_params_str: fparam_condor.write('%s\n' % line) fparam_condor.close() # condor config frunname_condor = 'job_desc_mglhe.cfg' frunfull_condor = '%s/%s' % (logdir, frunname_condor) frun_condor = None try: frun_condor = open(frunfull_condor, 'w') except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) time.sleep(10) frun_condor = open(frunfull_condor, 'w') commands.getstatusoutput('chmod 777 %s' % frunfull_condor) # frun_condor.write('executable = %s\n' % script) frun_condor.write( 'Log = %s/condor_job.%s.$(ClusterId).$(ProcId).log\n' % (logdir, str(uid))) frun_condor.write( 'Output = %s/condor_job.%s.$(ClusterId).$(ProcId).out\n' % (logdir, str(uid))) frun_condor.write( 'Error = %s/condor_job.%s.$(ClusterId).$(ProcId).error\n' % (logdir, str(uid))) frun_condor.write('getenv = True\n') frun_condor.write('environment = "LS_SUBCWD=%s"\n' % logdir) # not sure frun_condor.write('request_memory = %s\n' % self.memory) # frun_condor.write('requirements = ( (OpSysAndVer =?= "CentOS7") && (Machine =!= LastRemoteHost) )\n') frun_condor.write( 'requirements = ( (OpSysAndVer =?= "SLCern6") && (Machine =!= LastRemoteHost) )\n' ) frun_condor.write( 'on_exit_remove = (ExitBySignal == False) && (ExitCode == 0)\n' ) frun_condor.write('max_retries = 3\n') frun_condor.write('+JobFlavour = "%s"\n' % self.queue) frun_condor.write('+AccountingGroup = "group_u_FCC.local_gen"\n') frun_condor.write('queue arguments from %s\n' % fparamfull_condor) frun_condor.close() # nbjobsSub = 0 cmdBatch = "condor_submit %s" % frunfull_condor print cmdBatch job = ut.SubmitToCondor(cmdBatch, 10, "%i/%i" % (nbjobsSub, self.njobs)) nbjobsSub += job print 'succesfully sent %i job(s)' % nbjobsSub
def merge(self, force): ldir = next(os.walk(self.indir))[1] print self.indir, ' ==== ', self.process #ldir=[x[0] for x in os.walk(self.indir)] for l in ldir: if self.process != '' and self.process != l: continue outfile = self.indir + '/' + l + '/merge.yaml' totsize = 0 totevents = 0 process = None outfiles = [] outfilesbad = [] outdir = None ndone = 0 nbad = 0 All_files = glob.glob("%s/%s/events_*.yaml" % (self.indir, l)) print 'ypuhfwegwegwgwe', len(All_files) print "%s/%s/events_*.yaml" % (self.indir, l) if len(All_files) == 0: if os.path.isfile("%s/%s/merge.yaml" % (self.indir, l)): os.system("rm %s/%s/merge.yaml" % (self.indir, l)) continue #continue if process has been checked print '%s/%s/check' % (self.indir, l) if not ut.file_exist('%s/%s/check' % (self.indir, l)) and not force: continue print 'merging process %s %i files' % (l, len(All_files)) for f in All_files: if not os.path.isfile(f): print 'file does not exists... %s' % f continue with open(f, 'r') as stream: try: tmpf = yaml.load(stream) if ut.getsize(f) == 0: continue if tmpf['processing']['status'] == 'sending': continue if tmpf['processing']['status'] == 'BAD': nbad += 1 outfilesbad.append( tmpf['processing']['out'].split('/')[-1]) outdir = tmpf['processing']['out'].replace( tmpf['processing']['out'].split('/')[-1], '') process = tmpf['processing']['process'] continue totsize += tmpf['processing']['size'] totevents += tmpf['processing']['nevents'] process = tmpf['processing']['process'] tmplist = [ tmpf['processing']['out'].split('/')[-1], tmpf['processing']['nevents'] ] outfiles.append(tmplist) outdir = tmpf['processing']['out'].replace( tmpf['processing']['out'].split('/')[-1], '') ndone += 1 except yaml.YAMLError as exc: print(exc) except IOError as exc: print "I/O error({0}): {1}".format( exc.errno, exc.strerror) print "outfile ", f dic = { 'merge': { 'process': process, 'nevents': totevents, 'outfiles': outfiles, 'outdir': outdir, 'size': totsize, 'ndone': ndone, 'nbad': nbad, 'outfilesbad': outfilesbad, } } try: with open(outfile, 'w') as outyaml: yaml.dump(dic, outyaml, default_flow_style=False) except IOError as exc: print "I/O error({0}): {1}".format(exc.errno, exc.strerror) print "outfile ", outfile time.sleep(10) with open(outfile, 'w') as outyaml: yaml.dump(dic, outyaml, default_flow_style=False)
def addEntry(self, process, yaml_lhe, yaml_reco, xsec, kf, heppyFile, procDict, proc_param=''): processhad = process if 'mgp8_' in process: processhad = process.replace('mgp8_', 'mg_') if proc_param != '': processhad = proc_param.replace('mgp8_', 'mg_') yaml_lhe = yaml_lhe + '/' + processhad + '/merge.yaml' print 'lhe yaml ', yaml_lhe print 'reco yaml ', yaml_reco if not ut.file_exist(yaml_lhe): print 'no merged file lhe for process %s continue' % process sys.exit(3) return 1.0 nmatched = 0 nweights = 0 nlhe = 0 heppyFile.write('{} = cfg.MCComponent(\n'.format(process)) heppyFile.write(" \'{}\',\n".format(process)) heppyFile.write(' files=[\n') matchingEff = 1.0 ylhe = None with open(yaml_lhe, 'r') as stream: try: ylhe = yaml.load(stream) except yaml.YAMLError as exc: print(exc) yreco = None with open(yaml_reco, 'r') as stream: try: yreco = yaml.load(stream) except yaml.YAMLError as exc: print(exc) nmatched += int(yreco['merge']['nevents']) for f in ylhe['merge']['outfiles']: if any(f[0].replace('.lhe.gz', '') in s[0] for s in yreco['merge']['outfiles']): nlhe += int(f[1]) heppyFile.write( " 'root://eospublic.cern.ch/{}/{}',\n".format( yreco['merge']['outdir'], f[0].replace('.lhe.gz', '.root'))) heppyFile.write(']\n') heppyFile.write(')\n') heppyFile.write('\n') # skip process if do not find corresponding lhes if nlhe == 0: print 'did not find any LHE event for process', process return matchingEff if nmatched == 0: print 'did not find any FCCSW event for process', process return matchingEff # compute matching efficiency matchingEff = round(float(nmatched) / nlhe, 3) if nweights == 0: nweights = nmatched entry = ' "{}": {{"numberOfEvents": {}, "sumOfWeights": {}, "crossSection": {}, "kfactor": {}, "matchingEfficiency": {}}},\n'.format( process, nmatched, nweights, xsec, kf, matchingEff) print 'N: {}, Nw:{}, xsec: {} , kf: {} pb, eff: {}'.format( nmatched, nweights, xsec, kf, matchingEff) procDict.write(entry) return matchingEff
class send_lhe(): #__________________________________________________________ def __init__(self, njobs, events, process, islsf, iscondor, queue, para): self.njobs = njobs self.events = events self.process = process self.islsf = islsf self.iscondor = iscondor self.queue = queue self.user = os.environ['USER'] self.para = para #__________________________________________________________ def send(self): Dir = os.getcwd() nbjobsSub = 0 gplist = self.para.gridpacklist lhedir = self.para.lhe_dir gpdir = self.para.gp_dir gptotest = '%s/%s.tar.gz' % (gpdir, self.process) if ut.file_exist(gptotest) == False: print 'Gridpack=======', gptotest, '======= does not exist' sys.exit(3) try: gplist[self.process] except KeyError, e: print 'process %s does not exist as gridpack, exit' % self.process sys.exit(3) acctype = 'FCC' if 'HELHC' in self.para.module_name: acctype = 'HELHC' logdir = Dir + "/BatchOutputs/%s/lhe/%s" % (acctype, self.process) if not ut.dir_exist(logdir): os.system("mkdir -p %s" % logdir) yamldir = '%s/lhe/%s' % (self.para.yamldir, self.process) if not ut.dir_exist(yamldir): os.system("mkdir -p %s" % yamldir) if self.islsf == False and self.iscondor == False: print "Submit issue : LSF nor CONDOR flag defined !!!" sys.exit(3) condor_file_str = '' while nbjobsSub < self.njobs: #uid = int(ut.getuid(self.user)) uid = ut.getuid2(self.user) myyaml = my.makeyaml(yamldir, uid) if not myyaml: print 'job %s already exists' % uid continue if ut.file_exist('%s/%s/events_%s.lhe.gz' % (lhedir, self.process, uid)): print 'already exist, continue' continue frunname = 'job%s.sh' % (uid) frunfull = '%s/%s' % (logdir, frunname) frun = None try: frun = open(frunfull, 'w') except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) time.sleep(10) frun = open(frunfull, 'w') commands.getstatusoutput('chmod 777 %s' % frunfull) frun.write('unset LD_LIBRARY_PATH\n') frun.write('unset PYTHONHOME\n') frun.write('unset PYTHONPATH\n') frun.write('mkdir job%s_%s\n' % (uid, self.process)) frun.write('cd job%s_%s\n' % (uid, self.process)) frun.write('export EOS_MGM_URL=\"root://eospublic.cern.ch\"\n') frun.write('source %s\n' % (self.para.stack)) frun.write('mkdir %s\n' % (lhedir)) frun.write('mkdir %s%s\n' % (lhedir, self.process)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s/%s.tar.gz .\n' % (gpdir, self.process)) frun.write('tar -zxf %s.tar.gz\n' % self.process) frun.write('cd process/\n') frun.write('./run.sh %i %i\n' % (self.events, int(uid))) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py events.lhe.gz %s/%s/events_%s.lhe.gz\n' % (lhedir, self.process, uid)) frun.write('cd ..\n') frun.write('rm -rf job%s_%s\n' % (uid, self.process)) frun.close() if self.islsf == True: cmdBatch = "bsub -M 2000000 -R \"rusage[pool=2000]\" -q %s -o %s -cwd %s %s" % ( self.queue, logdir + '/job%s/' % (uid), logdir + '/job%s/' % (uid), logdir + '/' + frunname) #print cmdBatch batchid = -1 job, batchid = ut.SubmitToLsf( cmdBatch, 10, "%i/%i" % (nbjobsSub, self.njobs)) nbjobsSub += job elif self.iscondor == True: condor_file_str += frunfull + " " nbjobsSub += 1 if self.iscondor == True: # clean string condor_file_str = condor_file_str.replace("//", "/") # frunname_condor = 'job_desc_lhe.cfg' frunfull_condor = '%s/%s' % (logdir, frunname_condor) frun_condor = None try: frun_condor = open(frunfull_condor, 'w') except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) time.sleep(10) frun_condor = open(frunfull_condor, 'w') commands.getstatusoutput('chmod 777 %s' % frunfull_condor) # frun_condor.write('executable = $(filename)\n') frun_condor.write( 'Log = %s/condor_job.%s.$(ClusterId).$(ProcId).log\n' % (logdir, str(uid))) frun_condor.write( 'Output = %s/condor_job.%s.$(ClusterId).$(ProcId).out\n' % (logdir, str(uid))) frun_condor.write( 'Error = %s/condor_job.%s.$(ClusterId).$(ProcId).error\n' % (logdir, str(uid))) frun_condor.write('getenv = True\n') frun_condor.write('environment = "LS_SUBCWD=%s"\n' % logdir) # not sure frun_condor.write('request_memory = 2G\n') # frun_condor.write('requirements = ( (OpSysAndVer =?= "CentOS7") && (Machine =!= LastRemoteHost) )\n') frun_condor.write( 'requirements = ( (OpSysAndVer =?= "SLCern6") && (Machine =!= LastRemoteHost) )\n' ) frun_condor.write( 'on_exit_remove = (ExitBySignal == False) && (ExitCode == 0)\n' ) frun_condor.write('max_retries = 3\n') frun_condor.write('+JobFlavour = "%s"\n' % self.queue) frun_condor.write('+AccountingGroup = "group_u_FCC.local_gen"\n') frun_condor.write('queue filename matching files %s\n' % condor_file_str) frun_condor.close() # nbjobsSub = 0 cmdBatch = "condor_submit %s" % frunfull_condor print cmdBatch job = ut.SubmitToCondor(cmdBatch, 10, "%i/%i" % (nbjobsSub, self.njobs)) nbjobsSub += job print 'succesfully sent %i job(s)' % nbjobsSub
class send_p8(): #__________________________________________________________ def __init__(self, njobs, events, process, islsf, iscondor, queue, para, version): self.njobs = njobs self.events = events self.process = process self.islsf = islsf self.iscondor = iscondor self.queue = queue self.user = os.environ['USER'] self.para = para self.version = version #__________________________________________________________ def send(self): Dir = os.getcwd() nbjobsSub = 0 p8list = self.para.pythialist outdir = '%s%s/' % (self.para.delphes_dir, self.version) try: p8list[self.process] except KeyError, e: print 'process %s does not exist, exit' % self.process sys.exit(3) acctype = 'FCC' if 'HELHC' in self.para.module_name: acctype = 'HELHC' logdir = Dir + "/BatchOutputs/%s/%s/%s/" % (acctype, self.version, self.process) if not ut.dir_exist(logdir): os.system("mkdir -p %s" % logdir) yamldir = '%s/%s/%s' % (self.para.yamldir, self.version, self.process) if not ut.dir_exist(yamldir): os.system("mkdir -p %s" % yamldir) delphescards_mmr = '%s%s/%s' % (self.para.delphescards_dir, self.version, self.para.delphescard_mmr) if ut.file_exist( delphescards_mmr ) == False and self.version != 'cms' and 'helhc' not in self.version: print 'delphes card does not exist: ', delphescards_mmr, ' , exit' sys.exit(3) delphescards_mr = '%s%s/%s' % (self.para.delphescards_dir, self.version, self.para.delphescard_mr) if ut.file_exist( delphescards_mr ) == False and self.version != 'cms' and 'helhc' not in self.version: print 'delphes card does not exist: ', delphescards_mr, ' , exit' sys.exit(3) delphescards_base = '%s%s/%s' % (self.para.delphescards_dir, self.version, self.para.delphescard_base) if ut.file_exist(delphescards_base) == False: print 'delphes card does not exist: ', delphescards_base, ' , exit' sys.exit(3) fccconfig = '%s%s' % (self.para.fccconfig_dir, self.para.fccconfig) if ut.file_exist(fccconfig) == False: print 'fcc config file does not exist: ', fccconfig, ' , exit' sys.exit(3) print '======================================', self.process pythiacard = '%s%s.cmd' % (self.para.pythiacards_dir, self.process) if ut.file_exist(pythiacard) == False: print 'pythia card does not exist: ', pythiacard, ' , exit' sys.exit(3) if self.islsf == False and self.iscondor == False: print "Submit issue : LSF nor CONDOR flag defined !!!" sys.exit(3) condor_file_str = '' while nbjobsSub < self.njobs: uid = ut.getuid2(self.user) myyaml = my.makeyaml(yamldir, uid) if not myyaml: print 'job %s already exists' % uid continue outfile = '%s/%s/events_%s.root' % (outdir, self.process, uid) if ut.file_exist(outfile): print 'file %s already exist, continue' % outfile continue frunname = 'job%s.sh' % (uid) frunfull = '%s/%s' % (logdir, frunname) frun = None try: frun = open(frunfull, 'w') except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) time.sleep(10) frun = open(frunfull, 'w') commands.getstatusoutput('chmod 777 %s' % (frunfull)) frun.write('#!/bin/bash\n') frun.write('unset LD_LIBRARY_PATH\n') frun.write('unset PYTHONHOME\n') frun.write('unset PYTHONPATH\n') frun.write('source %s\n' % (self.para.stack)) frun.write('mkdir job%s_%s\n' % (uid, self.process)) frun.write('cd job%s_%s\n' % (uid, self.process)) frun.write('export EOS_MGM_URL=\"root://eospublic.cern.ch\"\n') frun.write('mkdir -p %s/%s\n' % (outdir, self.process)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (delphescards_base)) if 'fcc' in self.version: frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (delphescards_mmr)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s .\n' % (delphescards_mr)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s config.py \n' % (fccconfig)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py %s card.cmd\n' % (pythiacard)) frun.write('echo "" >> card.cmd\n') frun.write('echo "Random:seed = %s" >> card.cmd\n' % uid) if 'helhc' in self.version: frun.write('echo " Beams:eCM = 27000." >> card.cmd\n') frun.write( '%s/run fccrun.py config.py --delphescard=card.tcl --inputfile=card.cmd --outputfile=events%s.root --nevents=%i\n' % (self.para.fccsw, uid, self.events)) frun.write( 'python /afs/cern.ch/work/h/helsens/public/FCCutils/eoscopy.py events%s.root %s\n' % (uid, outfile)) frun.write('cd ..\n') frun.write('rm -rf job%s_%s\n' % (uid, self.process)) frun.close() if self.islsf == True: cmdBatch = "bsub -M 2000000 -R \"pool=20000\" -q %s -o %s -cwd %s %s" % ( self.queue, logdir + '/job%s/' % (uid), logdir + '/job%s/' % (uid), frunfull) job, batchid = ut.SubmitToLsf( cmdBatch, 10, "%i/%i" % (nbjobsSub, self.njobs)) batchid = -1 job, batchid = ut.SubmitToLsf( cmdBatch, 10, "%i/%i" % (nbjobsSub, self.njobs)) nbjobsSub += job elif self.iscondor == True: condor_file_str += frunfull + " " nbjobsSub += 1 if self.iscondor == True: # clean string condor_file_str = condor_file_str.replace("//", "/") # frunname_condor = 'job_desc_p8.cfg' frunfull_condor = '%s/%s' % (logdir, frunname_condor) frun_condor = None try: frun_condor = open(frunfull_condor, 'w') except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) time.sleep(10) frun_condor = open(frunfull_condor, 'w') commands.getstatusoutput('chmod 777 %s' % frunfull_condor) # frun_condor.write('executable = $(filename)\n') frun_condor.write( 'Log = %s/condor_job.%s.$(ClusterId).$(ProcId).log\n' % (logdir, str(uid))) frun_condor.write( 'Output = %s/condor_job.%s.$(ClusterId).$(ProcId).out\n' % (logdir, str(uid))) frun_condor.write( 'Error = %s/condor_job.%s.$(ClusterId).$(ProcId).error\n' % (logdir, str(uid))) frun_condor.write('getenv = True\n') frun_condor.write('environment = "LS_SUBCWD=%s"\n' % logdir) # not sure frun_condor.write('request_memory = 2G\n') # frun_condor.write('requirements = ( (OpSysAndVer =?= "CentOS7") && (Machine =!= LastRemoteHost) )\n') frun_condor.write( 'requirements = ( (OpSysAndVer =?= "SLCern6") && (Machine =!= LastRemoteHost) )\n' ) frun_condor.write( 'on_exit_remove = (ExitBySignal == False) && (ExitCode == 0)\n' ) frun_condor.write('max_retries = 3\n') frun_condor.write('+JobFlavour = "%s"\n' % self.queue) frun_condor.write('+AccountingGroup = "group_u_FCC.local_gen"\n') frun_condor.write('queue filename matching files %s\n' % condor_file_str) frun_condor.close() # nbjobsSub = 0 cmdBatch = "condor_submit %s" % frunfull_condor print cmdBatch job = ut.SubmitToCondor(cmdBatch, 10, "%i/%i" % (nbjobsSub, self.njobs)) nbjobsSub += job print 'succesfully sent %i job(s)' % nbjobsSub