def test_chain_multiparent(self): from slurmy import JobHandler, Status jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_chain_multiparent', listens=False) output1 = '@SLURMY.output_dir/parent1' run_script1 = '#!/bin/bash\ntouch {}; sleep 2;'.format(output1) jh.add_job(run_script=run_script1, name='test_parent1', tags='parent1', output=output1) output2 = '@SLURMY.output_dir/parent2' run_script2 = '#!/bin/bash\ntouch {}; sleep 2;'.format(output2) jh.add_job(run_script=run_script2, name='test_parent2', tags='parent2', output=output2) run_script3 = '#!/bin/bash\nls {} {};'.format(output1, output2) jh.add_job(run_script=run_script3, name='test_child', parent_tags=['parent1', 'parent2']) jh.run_jobs() self.assertIs(jh.jobs.test_parent1.status, Status.SUCCESS) self.assertIs(jh.jobs.test_parent2.status, Status.SUCCESS) self.assertIs(jh.jobs.test_child.status, Status.SUCCESS)
def setUp(self): from slurmy import JobHandler, test_mode test_mode(True) self.test_dir = os.path.join(options.Main.workdir, 'slurmy_unittest/jobconfig') self.jh = JobHandler(work_dir = self.test_dir, verbosity = 0, name = 'test_jobconfig', do_snapshot = False) self.run_script = 'echo "test"' self.run_script_trigger = '@SLURMY.FINISHED; @SLURMY.SUCCESS;'
def test_type_local(self): from slurmy import JobHandler, Type jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_jobconfig_type_local', do_snapshot=False, local_max=1) job = jh.add_job(run_script=self.run_script, job_type=Type.LOCAL) self.assertIs(job.type, Type.LOCAL)
def test_output_listener(self): from slurmy import JobHandler, Status jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_output_listener') jh.add_job(run_script=self.run_script_touch_file, name='test', output=self.output_file) jh.run_jobs() self.assertIs(jh.jobs.test.status, Status.SUCCESS) jh.reset() jh.jobs.test.config.backend.run_script = self.run_script_success jh.jobs.test.config.output = 'jwoigjwoijegoijwoijegoiwoeg' jh.run_jobs() self.assertIs(jh.jobs.test.status, Status.FAILED)
def load(name): """@SLURMY Load a slurmy session by name. * `name` Name of the slurmy session, as listed by list_sessions(). Returns jobhandler associated to the session (JobHandler). """ from slurmy.tools import options from slurmy import JobHandler import sys ## Synchronise bookkeeping with entries on disk options.Main.sync_bookkeeping() bk = options.Main.get_bookkeeping() if bk is None: log.error('No bookeeping found') return None python_version = sys.version_info.major if bk[name]['python_version'] != python_version: log.error( 'Python version "{}" of the snapshot not compatible with current version "{}"' .format(bk[name]['python_version'], python_version)) return None work_dir = bk[name]['work_dir'] jh = JobHandler(name=name, work_dir=work_dir, use_snapshot=True) return jh
def test_reset(self): from slurmy import JobHandler, Status jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_reset', listens=False) jh.add_job(run_script=self.run_script, name='test') jh.run_jobs() self.assertIs(jh.jobs.test.status, Status.SUCCESS) id_first = jh.jobs.test.id jh.reset() self.assertIs(jh.jobs.test.status, Status.CONFIGURED) jh.run_jobs() self.assertIs(jh.jobs.test.status, Status.SUCCESS) id_second = jh.jobs.test.id self.assertIsNot(id_first, id_second)
def test_chain(self): from slurmy import JobHandler, Status jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_chain', listens=False) jh.add_job(run_script=self.run_script_touch_file, name='test_parent', tags='parent') jh.add_job(run_script=self.run_script_ls_file, name='test_child', parent_tags='parent') jh.run_jobs() self.assertIs(jh.jobs.test_parent.status, Status.SUCCESS) self.assertIs(jh.jobs.test_child.status, Status.SUCCESS)
def test_chain_fail(self): from slurmy import JobHandler, Status jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_chain', listens=False) jh.add_job(run_script=self.run_script_fail, name='test_parent', tags='parent') jh.add_job(run_script=self.run_script_success, name='test_child', parent_tags='parent') jh.run_jobs() self.assertIs(jh.jobs.test_parent.status, Status.FAILED) self.assertIs(jh.jobs.test_child.status, Status.CANCELLED)
def test_trigger_success_listener(self): from slurmy import JobHandler, Status jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_trigger_success_listener') jh.add_job(run_script=self.run_script_trigger_success, name='test') jh.run_jobs() self.assertIs(jh.jobs.test.status, Status.SUCCESS)
def test_batch_listener(self): from slurmy import JobHandler, Status jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_batch_listener') jh.add_job(run_script=self.run_script_fail, name='test') jh.run_jobs() status_fail = jh.jobs.test.status id_first = jh.jobs.test.id jh.jobs.test.config.backend.run_script = self.run_script_success jh.run_jobs(retry=True) status_success = jh.jobs.test.status id_second = jh.jobs.test.id self.assertIsNot(id_first, id_second) self.assertIs(status_fail, Status.FAILED) self.assertIs(status_success, Status.SUCCESS)
def test_mix_batch_local_listener(self): from slurmy import JobHandler, Status jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_mix_batch_local_listener', local_max=1, local_dynamic=True) jh.add_job(run_script=self.run_script_fail, name='test_1') jh.add_job(run_script=self.run_script_fail, name='test_2') jh.run_jobs() self.assertIsNot(jh.jobs.test_1.type, jh.jobs.test_2.type) self.assertIs(jh.jobs.test_1.status, Status.FAILED) self.assertIs(jh.jobs.test_2.status, Status.FAILED) jh.jobs.test_1.config.backend.run_script = self.run_script_success jh.jobs.test_2.config.backend.run_script = self.run_script_success jh.run_jobs(retry=True) self.assertIs(jh.jobs.test_1.status, Status.SUCCESS) self.assertIs(jh.jobs.test_2.status, Status.SUCCESS)
def test_local_listener(self): from slurmy import JobHandler, Status, Type, test_mode test_mode(True) jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_local_listener', local_max=1) jh.add_job(run_script=self.run_script_fail, name='test', job_type=Type.LOCAL) jh.run_jobs() status_fail = jh.jobs.test.status jh.jobs.test.config.backend.run_script = self.run_script_success jh.run_jobs(retry=True) status_success = jh.jobs.test.status test_mode(False) self.assertIs(status_fail, Status.FAILED) self.assertIs(status_success, Status.SUCCESS)
def load_path(path): """@SLURMY Load a slurmy session by full path. * `path` Full folder path of the slurmy session, as listed by list_sessions(). Returns jobhandler associated to the session (JobHandler). """ from slurmy import JobHandler jh_name = path jh_path = '' if '/' in jh_name: jh_path = jh_name.rsplit('/', 1)[0] jh_name = jh_name.rsplit('/', 1)[-1] jh = JobHandler(name=jh_name, work_dir=jh_path, use_snapshot=True) return jh
def test_run_max(self): ## This effectively only tests if run_jobs finishes or not from slurmy import JobHandler, Status jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_run_max', run_max=1) for i in range(3): jh.add_job(run_script=self.run_script, name='test_{}'.format(i)) jh.run_jobs() for i in range(3): self.assertIs(jh['test_{}'.format(i)].status, Status.SUCCESS)
def test_post_process(self): from slurmy import JobHandler, Status jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_post_process') output_file = os.path.join(jh.config.output_dir, 'test') post_func = TestPostFunction(output_file) jh.add_job(run_script=self.run_script, name='test', post_func=post_func) jh.run_jobs() time.sleep(1) self.assertIs(jh.jobs.test.status, Status.SUCCESS) self.assertTrue(os.path.isfile(output_file))
def main(): """Batch submission using slurmy.""" ## Set up the JobHandler jh = JobHandler( local_max=8, local_dynamic=True, work_dir=mkdir('batch'), printer_bar_mode=True, wrapper=SingularityWrapper('docker://philippgadow/checkmate')) ## Define the run script content ## Add a job jh.add_job(run_script='/work/run_example_in_docker.sh', job_type=Type.LOCAL) ## Run all jobs jh.run_jobs()
def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] def list_treenames(file): tf = ROOT.TFile.Open(file) trees_list = [] for key in tf.GetListOfKeys(): trees_list.append(key.GetName()) tf.Close() return trees_list # set up the JobHandler jh = JobHandler(work_dir="/project/etp3/eschanet/collect", name="hadd", run_max=50) merge_script = """ echo "running on $(hostname)" echo "" source /etc/profile.d/modules.sh module load root echo "using $(which root)" hadd -f {outputfile} {inputfiles} #echo "removing files that have been merged to save diskspace" #rm {inputfiles}
#!/usr/bin/env python3 import os import glob from slurmy import JobHandler, Slurm, Singularity3Wrapper, SuccessTrigger sw = Singularity3Wrapper( '/cvmfs/atlas.cern.ch/repo/containers/images/singularity/x86_64-slc6.img') jh = JobHandler(wrapper=sw, work_dir="/project/etp3/eschanet/collect", name="Reco_tf", run_max=50) outdir = "/project/etp1/eschanet/derivations/private/TRUTH3" indir = "/project/etp1/eschanet/EVNT/atlas" run_script = """ echo Running on host `hostname` echo Time is `date` echo Directory is `pwd` echo "Is this the real life? Is this just fantasy? Caught in a landslide, no escape from reality Open your eyes, look up to the skies and see I'm just a poor boy, I need no sympathy Because I'm easy come, easy go, little high, little low Any way the wind blows doesn't really matter to me, to me" pushd $TMPDIR
class Test(unittest.TestCase): def setUp(self): from slurmy import JobHandler, test_mode test_mode(True) self.test_dir = 'slurmy_unittest/jobconfig' self.jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_jobconfig', do_snapshot=False) self.run_script = 'echo "test"' self.run_script_trigger = '@SLURMY.FINISHED; @SLURMY.SUCCESS;' def tearDown(self): from slurmy import test_mode test_mode(False) ##TODO: run_script test --> with direct string and path to file def test_run_args(self): job = self.jh.add_job(run_script=self.run_script, run_args='test') self.assertIs(job.config.backend.run_args, 'test') def test_name(self): job = self.jh.add_job(run_script=self.run_script, name='test') self.assertIs(job.name, 'test') self.assertIn('test', self.jh.jobs) self.assertIs(self.jh.jobs.test.name, 'test') def test_type_local(self): from slurmy import JobHandler, Type jh = JobHandler(work_dir=self.test_dir, verbosity=0, name='test_jobconfig_type_local', do_snapshot=False, local_max=1) job = jh.add_job(run_script=self.run_script, job_type=Type.LOCAL) self.assertIs(job.type, Type.LOCAL) def test_finished_func(self): from slurmy import Status, Mode job = self.jh.add_job(run_script=self.run_script, finished_func=lambda x: x) self.assertIs(job.get_mode(Status.RUNNING), Mode.ACTIVE) self.assertTrue(job.config.finished_func(True)) def test_success_func(self): from slurmy import Status, Mode job = self.jh.add_job(run_script=self.run_script, success_func=lambda x: x) self.assertIs(job.get_mode(Status.FINISHED), Mode.ACTIVE) self.assertTrue(job.config.success_func(True)) def test_post_func(self): from slurmy import Status job = self.jh.add_job(run_script=self.run_script, post_func=lambda x: x) self.assertTrue(job.config.post_func(True)) def test_output(self): from slurmy import Status, Mode job = self.jh.add_job(run_script=self.run_script, output='test') self.assertIs(job.get_mode(Status.FINISHED), Mode.PASSIVE) self.assertIsNotNone(job.output) def test_tags(self): job = self.jh.add_job(run_script=self.run_script, tags='hans') self.assertIn('hans', job.tags) job = self.jh.add_job(run_script=self.run_script, tags=['hans', 'horst']) self.assertIn('hans', job.tags) self.assertIn('horst', job.tags) def test_parent_tags(self): job = self.jh.add_job(run_script=self.run_script, parent_tags='hans') self.assertIn('hans', job.parent_tags) job = self.jh.add_job(run_script=self.run_script, parent_tags=['hans', 'horst']) self.assertIn('hans', job.parent_tags) self.assertIn('horst', job.parent_tags) def test_variable_substitution(self): from slurmy import Status job = self.jh.add_job(run_script=self.run_script, output='@SLURMY.output_dir/test') output = os.path.join(self.jh.config.output_dir, 'test') self.assertTrue(job.output == output)
#!/usr/bin/env python import ROOT import os,sys,glob import argparse import re import pprint from machete import pythonHelpers as ph from slurmy import JobHandler, Slurm jh = JobHandler(work_dir="/project/etp/eschanet/collect", name="campaignHadder", run_max=100) merge_script = """ echo "running on $(hostname)" echo "" source /etc/profile.d/modules.sh module load root echo "using $(which root)" hadd -f {outputfile} {inputfiles} echo "removing files that have been merged to save diskspace" #rm {inputfiles} """ parser = argparse.ArgumentParser(description='Hadd files across MC campaigns')
help='Include LHE weights', action='store_true') parser.add_argument('--excludeSystematics', help='Exclude systematics', action='store_true') parser.add_argument('--nominal', help='Only nominal trees', action='store_true') parser.add_argument('--vjets', help='', action='store_true') args = parser.parse_args() print args # set up the JobHandler jh = JobHandler(work_dir="/project/etp3/eschanet/collect", name="skimCode", run_max=100) # define the run script content skim_script = """ #source ~/Code/hades/init.sh #YTHONPATH=$PYTHONPATH:~/Code/hades/ #PATH=$PATH:~/ma/packages/hades/executables #echo $PATH pwd cd ~/ma/packages/basic/ pwd ls -1 source /etc/profile.d/modules.sh
def run(): parser = argparse.ArgumentParser(description='Run stuff locally on etp') parser.add_argument('files', type=argparse.FileType('r'), nargs='+') parser.add_argument("-s",help="input sample",default=None) parser.add_argument("-selector",help="selector",default="OneLep") parser.add_argument("-writeTrees",help="sys or nominal",default="1") parser.add_argument("-deepConfig",help="input sample",default="SusySkim1LInclusive_Rel21.config") parser.add_argument("-outputPath",help="output path",default=None) parser.add_argument("-process",help="process tag to find your output",default=None) args = parser.parse_args() jobscript = """ echo Running on host `hostname` echo Time is `date` echo Directory is `pwd` shopt -s expand_aliases export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase source ${ATLAS_LOCAL_ROOT_BASE}/user/atlasLocalSetup.sh pushd $testarea set -- "" acmSetup popd echo "run_xAODNtMaker" echo " -s $sample" echo " -selector $selector" echo " -writeTrees $writeTrees" echo " -deepConfig $deepConfig" echo " -MaxEvents $maxEvents" echo " -SkipEvents $skipEvents" run_xAODNtMaker -s $sample -selector $selector -writeTrees $writeTrees -deepConfig $deepConfig -MaxEvents $maxEvents -SkipEvents $skipEvents [[ "$?" = "0" ]] && mv ${outputPath}/submitDir/data-tree/ ${outputPath}/${groupset}/${mytag}/merged/${process}_${minEvent}_${maxEvent}_merged_processed_${writeTrees}.root """ sw = SingularityWrapper('/cvmfs/atlas.cern.ch/repo/containers/images/singularity/x86_64-centos7.img') jh = JobHandler(wrapper = sw,work_dir="/project/etp2/eschanet/collect", name="run_ntuple")#, run_max=50) # sf = SuccessOutputFile() for exportopts, optDict in params: slurm = Slurm(export = exportopts) outputfile = os.path.abspath("{path}/{groupset}/{mytag}/merged/{process}_{minEvent}_{maxEvent}_merged_processed_{sys}.root.done".format(**optDict)) jobname = "run_xAODNtMaker_{groupset}_{process}_{sys}_{minEvent}".format(**optDict) jobname = clean_jobname(jobname) print(jobname) ft = FinishedTrigger(outputfile) jh.add_job(backend = slurm, run_script = jobscript, output = outputfile, success_func = ft, name = jobname) jh.run_jobs()