Exemplo n.º 1
0
def runMT_pool(args=('', '', '')):
    print "START %s" % (sh)
    block, param, sh, analysisDir, MT = args
    producePlots(param.selections[sh], block, param.colors, param.variables,
                 param.uncertainties, sh, param.intLumi, param.delphesVersion,
                 param.runFull, analysisDir, MT)
    print "END %s" % (sh)
Exemplo n.º 2
0
def main():

    ops, args = options()

    args = split_comma_args(args)

    # give this analysis a name
    analysisName = ops.analysis_name

    # heppy analysis configuration
    heppyCfg = ops.heppy_cfg

    # process dictionary
    processDict = ops.proc_file_json

    # heppy trees location directory
    treeDir = ops.heppy_tree_dir

    # analysis dir
    analysisDir = ops.analysis_output

    # param file
    paramFile = ops.param_file
    sys.path.append(os.path.dirname(os.path.expanduser(paramFile)))
    param = importlib.import_module(
        os.path.splitext(ntpath.basename(paramFile))[0])

    # tree location
    treePath = 'heppy.{}.TreeProducer.TreeProducer_1/tree.root'.format(
        analysisName)

    # retrieve list of processes from heppy cfg
    processes = []
    with open(heppyCfg) as f:
        lines = f.readlines()
        for l in lines:
            if 'splitFactor' in l:
                processes.append(l.rsplit('.', 1)[0])

    with open(processDict) as f:
        procDict = json.load(f)

    # prepare analysis dir
    os.system('mkdir -p {}'.format(analysisDir))

    ### produce process dictionnaries
    for sh in param.selections.keys():

        block = collections.OrderedDict()

        formBlock(processes, procDict, param.signal_groups,
                  param.background_groups, sh, treeDir, treePath, block)

        ### run analysis
        producePlots(param.selections[sh], block, param.colors,
                     param.variables, param.uncertainties, sh, param.intLumi,
                     param.delphesVersion, param.runFull, analysisDir)
Exemplo n.º 3
0
def runMT_pool(args=('', '', '', '', '', '')):
    print "START %s" % (sh)
    block, param, sh, analysisDir, MT, ops = args
    producePlots(param.selections[sh],
                 block,
                 param.colors,
                 param.variables,
                 param.variables2D,
                 param.uncertainties,
                 sh,
                 param.intLumi,
                 param.delphesVersion,
                 param.runFull,
                 analysisDir,
                 MT,
                 latex_table=ops.latex_table,
                 no_plots=ops.no_plots,
                 nevents=ops.nevents)
    print "END %s" % (sh)
Exemplo n.º 4
0
def main():

    ops, args = options()

    args = split_comma_args(args)

    # give this analysis a name
    analysisName = ops.analysis_name

    # heppy analysis configuration
    heppyCfg = ops.heppy_cfg

    # process dictionary
    processDict = ops.proc_file_json

    # heppy trees location directory
    treeDir = ops.heppy_tree_dir

    # analysis dir
    analysisDir = ops.analysis_output

    # param file
    paramFile = ops.param_file

    module_path = os.path.abspath(paramFile)
    module_dir = os.path.dirname(module_path)
    base_name = os.path.splitext(ntpath.basename(paramFile))[0]

    sys.path.insert(0, module_dir)
    param = importlib.import_module(base_name)

    # tree location
    treePath = '/heppy.FCChhAnalyses.{}.TreeProducer.TreeProducer_1/tree.root'.format(
        analysisName)

    #multi-threading
    MT = ops.MT

    #lsf
    lsf = ops.lsf
    chunks = ops.chunks
    queue = ops.queue

    sh = ops.sh

    # retrieve list of processes from heppy cfg
    processes = []
    with open(heppyCfg) as f:
        lines = f.readlines()
        for l in lines:
            if 'splitFactor' in l:
                processes.append(l.rsplit('.', 1)[0])

    #processes = [c.name for c in heppyCfg.selectedComponents]

    with open(processDict) as f:
        procDict = json.load(f)

    # prepare analysis dir
    os.system('mkdir -p {}'.format(analysisDir))

    block = collections.OrderedDict()
    formBlock(processes, procDict, param.signal_groups,
              param.background_groups, sh, treeDir, treePath, block,
              ops.nevents)

    print sh

    ### run analysis
    producePlots(param.selections[sh],
                 block,
                 param.colors,
                 param.variables,
                 param.variables2D,
                 param.uncertainties,
                 sh,
                 param.intLumi,
                 param.delphesVersion,
                 param.runFull,
                 analysisDir,
                 MT,
                 latex_table=ops.latex_table,
                 no_plots=ops.no_plots,
                 nevents=ops.nevents)
Exemplo n.º 5
0
def main():

    ops, args = options()

    args = split_comma_args(args)

    # give this analysis a name
    analysisName = ops.analysis_name

    # heppy analysis configuration
    heppyCfg = ops.heppy_cfg

    # process dictionary
    processDict = ops.proc_file_json

    # heppy trees location directory
    treeDir = ops.heppy_tree_dir

    # analysis dir
    analysisDir = ops.analysis_output

    # param file
    paramFile = ops.param_file

    module_path = os.path.abspath(paramFile)
    module_dir = os.path.dirname(module_path)
    base_name = os.path.splitext(ntpath.basename(paramFile))[0]

    sys.path.insert(0, module_dir)
    param = importlib.import_module(base_name)

    # tree location
    treePath = '/heppy.FCChhAnalyses.{}.TreeProducer.TreeProducer_1/tree.root'.format(
        analysisName)

    #multi-threading
    MT = ops.MT

    #lsf
    lsf = ops.lsf
    queue = ops.queue

    # selection
    sel = ops.sel

    # check if output dir exists already
    '''if ops.force and not ops.clean:
        print 'removing {}'.format(analysisDir)
        processCmd('rm -rf {}'.format(analysisDir))
        os.makedirs(analysisDir)

    elif os.path.exists(analysisDir) and not ops.clean and not ops.sel:
        print ''
        sys.exit('Output dir: "'+analysisDir+'" exists. To overwrite existing dir run with --force option')
    '''

    # retrieve list of processes from heppy cfg
    processes = []
    with open(heppyCfg) as f:
        lines = f.readlines()
        for l in lines:
            if 'splitFactor' in l:
                processes.append(l.rsplit('.', 1)[0])

    #processes = [c.name for c in heppyCfg.selectedComponents]

    with open(processDict) as f:
        procDict = json.load(f)

    # run on single signal hypothesis, also used in lsf btach submission
    if sel:
        block = collections.OrderedDict()
        formBlock(processes, procDict, param.signal_groups,
                  param.background_groups, sel, treeDir, treePath, block,
                  ops.nevents)
        producePlots(param, block, sel, ops)

    elif not lsf:
        if MT:
            runMT(processes, procDict, param, treeDir, treePath, analysisDir,
                  MT, ops)
        else:
            for sh in param.selections.keys():

                block = collections.OrderedDict()

                formBlock(processes, procDict, param.signal_groups,
                          param.background_groups, sh, treeDir, treePath,
                          block, ops.nevents)

                ### run analysis
                producePlots(param, block, sh, ops)

    else:
        runLSF(processes, procDict, param, treeDir, treePath, analysisDir, ops)
Exemplo n.º 6
0
def runLSF(processes, procDict, param, treeDir, treePath, analysisDir, ops):

    # clean if asked for it
    if ops.clean:
        print 'Cleaning LSF for {} jobs...'.format(ops.analysis_output)
        processCmd('rm -rf BatchOutput/{} LSF*'.format(ops.analysis_output))
        sys.exit('cleaned up everything.')

    # first thing is to check whether previous submission (if any) succeeded
    selection_list_4sub = []
    nbad = 0
    for sh in param.selections.keys():
        selname = formatted(sh)
        outseldir = 'sel_' + selname
        name_batch_dir = 'BatchOutput/{}/{}'.format(ops.analysis_output,
                                                    outseldir)
        rootfile = name_batch_dir + '/root_' + selname + '/histos.root'

        if not os.path.isfile(rootfile) or not isValidROOTfile(
                rootfile) or not getsize(rootfile):
            selection_list_4sub.append(sh)
            nbad += 1

    # keep submitting until nbad = 0
    if nbad > 0:
        print ' '
        print ' =========  Submitting {} jobs on {} queue ========='.format(
            nbad, ops.queue)

        jobCount = 0
        for sh in selection_list_4sub:

            block = collections.OrderedDict()
            formBlock(processes, procDict, param.signal_groups,
                      param.background_groups, sh, treeDir, treePath, block,
                      ops.nevents)
            selname = formatted(sh)

            dummyscript = """
    unset LD_LIBRARY_PATH
    unset PYTHONHOME
    unset PYTHONPATH
    mkdir job
    cd job

    cp -r DUMMYHOMEDIR/init.sh .
    cp -r DUMMYHOMEDIR/bin .
    cp -r DUMMYHOMEDIR/templates .

    source ./init.sh

    python bin/analyze.py -n DUMMYANALYSISNAME -c DUMMYHEPPYCFG -t DUMMYTREELOCATION -p DUMMYTEMPLATEFILE -j DUMMYJSONFILE -o DUMMYOUTSELDIR --sel 'DUMMYSEL' -m --nev DUMMYNEVTS --no_plots

    mkdir -p DUMMYHOMEDIR/BatchOutput/DUMMYOUTDIR/DUMMYOUTSELDIR
    cp -r DUMMYOUTSELDIR DUMMYHOMEDIR/BatchOutput/DUMMYOUTDIR
            """

            outseldir = 'sel_' + selname

            # replace relevant parts in script and dump into file
            dummyscript = dummyscript.replace('DUMMYHOMEDIR', os.getcwd())
            dummyscript = dummyscript.replace('DUMMYANALYSISNAME',
                                              ops.analysis_name)
            dummyscript = dummyscript.replace('DUMMYHEPPYCFG',
                                              os.path.abspath(ops.heppy_cfg))
            dummyscript = dummyscript.replace(
                'DUMMYTREELOCATION', os.path.abspath(ops.heppy_tree_dir))
            dummyscript = dummyscript.replace('DUMMYTEMPLATEFILE',
                                              os.path.abspath(ops.param_file))
            dummyscript = dummyscript.replace(
                'DUMMYJSONFILE', os.path.abspath(ops.proc_file_json))
            dummyscript = dummyscript.replace('DUMMYOUTSELDIR', outseldir)
            dummyscript = dummyscript.replace('DUMMYSEL', sh)
            dummyscript = dummyscript.replace('DUMMYNEVTS', str(ops.nevents))
            dummyscript = dummyscript.replace('DUMMYOUTDIR',
                                              ops.analysis_output)
            script = dummyscript

            name_batch_dir = 'BatchOutput/{}/{}'.format(
                ops.analysis_output, outseldir)
            if not os.path.exists(name_batch_dir):
                os.makedirs(name_batch_dir)

            scriptdir = name_batch_dir + '/cfg/'
            if not os.path.exists(scriptdir):
                os.makedirs(scriptdir)

            with open('script.sh', "w") as f:
                f.write(script)
            processCmd('chmod u+x script.sh')
            processCmd('mv script.sh {}'.format(scriptdir))

            script = scriptdir + 'script.sh'
            print 'Submitting job ' + str(jobCount + 1) + ' out of ' + str(
                len(selection_list_4sub))

            cmd = 'bsub -o ' + name_batch_dir + '/std/STDOUT -e ' + name_batch_dir + '/std/STDERR -q ' + ops.queue
            cmd += ' -J ' + outseldir + ' "' + os.path.abspath(script) + '" '

            # submitting jobs
            output = processCmd(cmd)
            while ('error' in output):
                time.sleep(1.0)
                output = processCmd(cmd)
                if ('error' not in output):
                    print 'Submitted after retry - job ' + str(jobCount + 1)

            jobCount += 1

    # no bad job is found, can thus collect output
    else:
        print '================================================================'
        print 'Submission was successful, now collecting output ...'
        print ''

        # 1 root file per signal hypothesis, simply copy in properly name dir
        # and run producePlots with runFull = false
        for sh in param.selections.keys():
            selname = formatted(sh)
            outseldir = 'sel_' + selname
            name_batch_dir = 'BatchOutput/{}/{}'.format(
                ops.analysis_output, outseldir)
            root_dir = '{}/root_{}'.format(name_batch_dir, selname)
            cmd = 'cp -r {} {}'.format(root_dir, ops.analysis_output)

            local_root_dir = '{}/root_{}'.format(ops.analysis_output, selname)

            print local_root_dir

            # collecting files
            if not os.path.exists(local_root_dir):
                processCmd(cmd)

            # run analysis on histos
            block = collections.OrderedDict()
            formBlock(processes, procDict, param.signal_groups,
                      param.background_groups, sh, treeDir, treePath, block,
                      ops.nevents)

            param.runFull = False
            producePlots(param, block, sh, ops)
Exemplo n.º 7
0
def runMT_join(block, param, sh, analysisDir, MT, ops):
    print "START %s" % (sh)
    producePlots(param, block, sh, ops)
    print "END %s" % (sh)
Exemplo n.º 8
0
def runCondor(processes, procDict, param, treeDir, treePath, analysisDir, ops):

    # clean if asked for it
    if ops.clean:
        print('Cleaning LSF for {} jobs...'.format(ops.analysis_output))
        processCmd('rm -rf BatchOutput/{} LSF*'.format(ops.analysis_output))
        sys.exit('cleaned up everything.')

    # first thing is to check whether previous submission (if any) succeeded
    selection_list_4sub = []
    nbad = 0
    for sh in param.selections.keys():
        selname = formatted(sh)
        outseldir = 'sel_' + selname
        name_batch_dir = 'BatchOutput/{}/{}'.format(ops.analysis_output,
                                                    outseldir)
        rootfile = name_batch_dir + '/root_' + selname + '/histos.root'

        if not os.path.isfile(rootfile) or not isValidROOTfile(
                rootfile) or not getsize(rootfile):
            selection_list_4sub.append(sh)
            nbad += 1

    # keep submitting until nbad = 0
    if nbad > 0 and not ops.collect:

        print(' ')
        print(' =========  Submitting {} jobs on {} queue ========='.format(
            nbad, ops.queue))

        condor_filename = 'configs/condor_{}.sub'.format(ops.analysis_name)
        cmdfile = """# here goes your shell script
executable    = condorScript.sh

# here you specify where to put .log, .out and .err files
output                = configs/std/condor.$(ClusterId).$(ProcId).out
error                 = configs/std/condor.$(ClusterId).$(ProcId).err
log                   = configs/std/condor.$(ClusterId).log

+AccountingGroup = "group_u_CMST3.all"
+JobFlavour = "{}"\n
""".format(ops.queue)

        jobCount = 0
        for sh in selection_list_4sub:

            block = collections.OrderedDict()
            formBlock(processes, procDict, param.signal_groups,
                      param.background_groups, sh, treeDir, treePath, block,
                      ops.nevents)
            selname = formatted(sh)
            outseldir = 'sel_' + selname
            cmd = 'arguments="DUMMYHOMEDIR DUMMYANALYSISNAME DUMMYHEPPYCFG DUMMYTREELOCATION DUMMYTEMPLATEFILE DUMMYJSONFILE DUMMYOUTSELDIR DUMMYOUTDIR DUMMYSEL DUMMYNEVTS"\n'

            print(outseldir)
            # replace relevant parts in script and dump into file
            cmd = cmd.replace('DUMMYHOMEDIR', os.getcwd())
            cmd = cmd.replace('DUMMYANALYSISNAME', ops.analysis_name)
            cmd = cmd.replace('DUMMYHEPPYCFG', os.path.abspath(ops.heppy_cfg))
            cmd = cmd.replace('DUMMYTREELOCATION',
                              os.path.abspath(ops.heppy_tree_dir))
            cmd = cmd.replace('DUMMYTEMPLATEFILE',
                              os.path.abspath(ops.param_file))
            cmd = cmd.replace('DUMMYJSONFILE',
                              os.path.abspath(ops.proc_file_json))
            cmd = cmd.replace('DUMMYOUTSELDIR', outseldir)
            cmd = cmd.replace('DUMMYSEL', "'{}'".format(sh))
            cmd = cmd.replace('DUMMYNEVTS', str(ops.nevents))
            cmd = cmd.replace('DUMMYOUTDIR', ops.analysis_output)

            #print cmd
            cmdfile += cmd
            cmdfile += 'queue\n'

        with open(condor_filename, 'w') as dest_file:
            dest_file.write(cmdfile)

        print('-- launching --')
        submit_cmd = "condor_submit {}".format(condor_filename)
        print(condor_filename)

        if (ops.DRYRUN):
            print('Dry-run:')
            #os.system('cat {}'.format(condor_filename))
        else:
            os.system(submit_cmd)

    # no bad job is found, can thus collect output
    else:
        print(
            '================================================================')
        print('Submission was successful, now collecting output ...')
        print('')

        # 1 root file per signal hypothesis, simply copy in properly name dir
        # and run producePlots with runFull = false
        for sh in param.selections.keys():
            selname = formatted(sh)
            outseldir = 'sel_' + selname
            name_batch_dir = 'BatchOutput/{}/{}'.format(
                ops.analysis_output, outseldir)
            root_dir = '{}/root_{}'.format(name_batch_dir, selname)
            cmd = 'cp -r {} {}'.format(root_dir, ops.analysis_output)

            local_root_dir = '{}/root_{}'.format(ops.analysis_output, selname)

            print(local_root_dir)

            # collecting files
            if not os.path.exists(local_root_dir):
                processCmd(cmd)

            # run analysis on histos
            block = collections.OrderedDict()
            formBlock(processes, procDict, param.signal_groups,
                      param.background_groups, sh, treeDir, treePath, block,
                      ops.nevents)
            producePlots(param, block, sh, ops)