コード例 #1
0
##               * rescale MC
##               * maximum runtime of 1h
##               * maximum memory per cpu of 2GB

samples = [
        {
            "ID": 2129,
            # Either specify ID *OR* DB name
            #"db_name": "ST_t-channel_4f_leptonDecays_13TeV-amcatnlo_MiniAODv2_v1.0.0+7415_TTAnalysis_12d3865",
            "files_per_job": 10,
            # Optional: specify a path to a "json skeleton" that will be filled and used for this sample (see below)
            #"json_skeleton": "myJson.json",
        }
    ]

mySub = slurmSubmitter(samples, "<path to plotter / skimmer exe>", "test_slurm/", rescale=True, runtime="60", memory=2000)

## Create test_slurm directory and subdirs
mySub.setupDirs()

## Write command and data files in the slurm directory
mySub.createFiles()

## Actually submit the jobs
## It is recommended to do a dry-run first without submitting to slurm
#mySub.submit()


## Example Json skeleton: the field "#DB_NAME#" will be filled by slurmSubmitter.
## You can specify anything else you want that will be passed to createHistoWithtMultiDraw
## for this particular sample.
コード例 #2
0
def create_slurm(samples, output, executable):
    ## Create Slurm submitter to handle job creating
    #mySub = slurmSubmitter(samples, "%s/build/" % output + executable, "DUMMY", output + "/", rescale=True)
    mySub = slurmSubmitter(samples,
                           "%s/build/" % output + executable,
                           output + "/",
                           rescale=True)

    ## Create test_slurm directory and subdirs
    mySub.setupDirs()

    splitTT = False
    splitDY = False

    def get_node(db_name):
        split_name = db_name.split("_")
        node = None
        for i, it in enumerate(split_name):
            if it == "node":
                node = split_name[i + 1]
                break
        if node is None:
            raise Exception(
                "Could not extract node from DB name {}".format(db_name))
        return node

    def get_node_id(node):
        if node == "SM": return "-1"
        elif node == "box": return "0"
        else: return node

    ## Modify the input samples to add sample cuts and stuff
    for sample in mySub.sampleCfg[:]:
        # TTbar final state splitting
        if splitTT and 'TT_TuneCUETP8M2T4_13TeV-powheg-pythia8' in sample[
                "db_name"]:

            # Fully leptonic
            #tt_fl_sample = copy.deepcopy(sample)
            #newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            #tt_fl_sample["db_name"] = sample["db_name"].replace("TT_Tune", "TT_FL_Tune")
            #newJson["sample_cut"] = "(hh_gen_ttbar_decay_type >= 4 && hh_gen_ttbar_decay_type <= 10 && hh_gen_ttbar_decay_type != 7)"

            #tt_fl_sample["json_skeleton"][tt_fl_sample["db_name"]] = newJson
            #tt_fl_sample["json_skeleton"].pop(sample["db_name"])
            #mySub.sampleCfg.append(tt_fl_sample)

            ## Semi leptonic
            #tt_sl_sample = copy.deepcopy(sample)
            #newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            #tt_sl_sample["db_name"] = sample["db_name"].replace("TT_Tune", "TT_SL_Tune")
            #newJson["sample_cut"] = "(hh_gen_ttbar_decay_type == 2 || hh_gen_ttbar_decay_type == 3 || hh_gen_ttbar_decay_type == 7)"

            #tt_sl_sample["json_skeleton"][tt_sl_sample["db_name"]] = newJson
            #tt_sl_sample["json_skeleton"].pop(sample["db_name"])
            #mySub.sampleCfg.append(tt_sl_sample)

            ## Fully hadronic
            #tt_fh_sample = copy.deepcopy(sample)
            #newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            #tt_fh_sample["db_name"] = sample["db_name"].replace("TT_Tune", "TT_FH_Tune")
            #newJson["sample_cut"] = "(hh_gen_ttbar_decay_type == 1)"

            #tt_fh_sample["json_skeleton"][tt_fh_sample["db_name"]] = newJson
            #tt_fh_sample["json_skeleton"].pop(sample["db_name"])
            #mySub.sampleCfg.append(tt_fh_sample)

            # Not fully leptonic
            tt_other_sample = copy.deepcopy(sample)
            newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            tt_other_sample["db_name"] = sample["db_name"].replace(
                "TT_Tune", "TT_Other_Tune")
            newJson[
                "sample_cut"] = "(hh_gen_ttbar_decay_type <= 3 || hh_gen_ttbar_decay_type == 7)"

            tt_other_sample["json_skeleton"][
                tt_other_sample["db_name"]] = newJson
            tt_other_sample["json_skeleton"].pop(sample["db_name"])
            mySub.sampleCfg.append(tt_other_sample)

            mySub.sampleCfg.remove(sample)

        if splitDY and 'DYJetsToLL_' in sample["db_name"]:

            # Z + jj' ; j = b/c, j' = b/c
            dy_bb_sample = copy.deepcopy(sample)
            newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            dy_bb_sample["db_name"] = sample["db_name"].replace(
                "DYJetsToLL", "DYBBOrCCToLL")
            newJson[
                "sample_cut"] = "(hh_llmetjj_HWWleptons_nobtag_cmva.gen_bb || hh_llmetjj_HWWleptons_nobtag_cmva.gen_cc)"

            dy_bb_sample["json_skeleton"][dy_bb_sample["db_name"]] = newJson
            dy_bb_sample["json_skeleton"].pop(sample["db_name"])
            mySub.sampleCfg.append(dy_bb_sample)

            # Z + jj' ; j = b/c, j' = l
            dy_bx_sample = copy.deepcopy(sample)
            newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            dy_bx_sample["db_name"] = sample["db_name"].replace(
                "DYJetsToLL", "DYBXOrCXToLL")
            newJson[
                "sample_cut"] = "(hh_llmetjj_HWWleptons_nobtag_cmva.gen_bl || hh_llmetjj_HWWleptons_nobtag_cmva.gen_cl || hh_llmetjj_HWWleptons_nobtag_cmva.gen_bc)"

            dy_bx_sample["json_skeleton"][dy_bx_sample["db_name"]] = newJson
            dy_bx_sample["json_skeleton"].pop(sample["db_name"])
            mySub.sampleCfg.append(dy_bx_sample)

            # Z + jj' ; j = l, j' = l
            dy_xx_sample = copy.deepcopy(sample)
            newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            dy_xx_sample["db_name"] = sample["db_name"].replace(
                "DYJetsToLL", "DYXXToLL")
            newJson[
                "sample_cut"] = "(hh_llmetjj_HWWleptons_nobtag_cmva.gen_ll)"

            dy_xx_sample["json_skeleton"][dy_xx_sample["db_name"]] = newJson
            dy_xx_sample["json_skeleton"].pop(sample["db_name"])
            mySub.sampleCfg.append(dy_xx_sample)

        # Merging with HT binned sample: add cut on inclusive one
        if 'DYJetsToLL_M-5to50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8' in sample[
                "db_name"] or 'DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8' in sample[
                    "db_name"]:
            sample["json_skeleton"][
                sample["db_name"]]["sample_cut"] = "event_ht < 100"

        #if 'WJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8' in sample["db_name"]:
        #    sample["json_skeleton"][sample["db_name"]]["sample_cut"] = "event_ht < 100"

    ## Write command and data files in the slurm directory
    mySub.createFiles()

    # Actually submit the jobs
    # It is recommended to do a dry-run first without submitting to slurm
    if args.submit:
        mySub.submit()
コード例 #3
0
ファイル: launchFactory.py プロジェクト: tahuang1991/HHTools
def create_slurm(samples, output, executable):
    ## Create slurm submitter to handle job creating
    mySub = slurmSubmitter(samples, "%s/build/" % output + executable, output + "/", rescale=True, memory=2000, runtime="360")

    ## Create test_slurm directory and subdirs
    mySub.setupDirs()

    splitTT = True
    splitDY = False

    def get_node(db_name):
        split_name = db_name.split("_")
        node = None
        for i, it in enumerate(split_name):
            if it == "node":
                node = split_name[i+1]
                break
        if node is None:
            raise Exception("Could not extract node from DB name {}".format(db_name))
        return node

    def get_node_id(node):
        if node == "SM": return "-1"
        elif node == "box": return "0"
        else: return node

    ## Modify the input samples to add sample cuts and stuff
    for sample in mySub.sampleCfg[:]:
        # TTbar final state splitting
        if splitTT and 'TT_TuneCUETP8M2T4_13TeV-powheg-pythia8' in sample["db_name"]:

            # Fully leptonic
            #tt_fl_sample = copy.deepcopy(sample)
            #newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            #tt_fl_sample["db_name"] = sample["db_name"].replace("TT_Tune", "TT_FL_Tune")
            #newJson["sample_cut"] = "(hh_gen_ttbar_decay_type >= 4 && hh_gen_ttbar_decay_type <= 10 && hh_gen_ttbar_decay_type != 7)"

            #tt_fl_sample["json_skeleton"][tt_fl_sample["db_name"]] = newJson
            #tt_fl_sample["json_skeleton"].pop(sample["db_name"])
            #mySub.sampleCfg.append(tt_fl_sample)

            ## Semi leptonic
            #tt_sl_sample = copy.deepcopy(sample)
            #newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            #tt_sl_sample["db_name"] = sample["db_name"].replace("TT_Tune", "TT_SL_Tune")
            #newJson["sample_cut"] = "(hh_gen_ttbar_decay_type == 2 || hh_gen_ttbar_decay_type == 3 || hh_gen_ttbar_decay_type == 7)"

            #tt_sl_sample["json_skeleton"][tt_sl_sample["db_name"]] = newJson
            #tt_sl_sample["json_skeleton"].pop(sample["db_name"])
            #mySub.sampleCfg.append(tt_sl_sample)

            ## Fully hadronic
            #tt_fh_sample = copy.deepcopy(sample)
            #newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            #tt_fh_sample["db_name"] = sample["db_name"].replace("TT_Tune", "TT_FH_Tune")
            #newJson["sample_cut"] = "(hh_gen_ttbar_decay_type == 1)"

            #tt_fh_sample["json_skeleton"][tt_fh_sample["db_name"]] = newJson
            #tt_fh_sample["json_skeleton"].pop(sample["db_name"])
            #mySub.sampleCfg.append(tt_fh_sample)

            # Not fully leptonic
            tt_other_sample = copy.deepcopy(sample)
            newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            tt_other_sample["db_name"] = sample["db_name"].replace("TT_Tune", "TT_Other_Tune")
            newJson["sample_cut"] = "(hh_gen_ttbar_decay_type <= 3 || hh_gen_ttbar_decay_type == 7)"

            tt_other_sample["json_skeleton"][tt_other_sample["db_name"]] = newJson
            tt_other_sample["json_skeleton"].pop(sample["db_name"])
            mySub.sampleCfg.append(tt_other_sample)

            mySub.sampleCfg.remove(sample)

        if splitDY and 'DYJetsToLL_' in sample["db_name"]:

            # Z + jj' ; j = b/c, j' = b/c
            dy_bb_sample = copy.deepcopy(sample)
            newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            dy_bb_sample["db_name"] = sample["db_name"].replace("DYJetsToLL", "DYBBOrCCToLL")
            newJson["sample_cut"] = "(hh_llmetjj_HWWleptons_nobtag_cmva.gen_bb || hh_llmetjj_HWWleptons_nobtag_cmva.gen_cc)"

            dy_bb_sample["json_skeleton"][dy_bb_sample["db_name"]] = newJson
            dy_bb_sample["json_skeleton"].pop(sample["db_name"])
            mySub.sampleCfg.append(dy_bb_sample)

            # Z + jj' ; j = b/c, j' = l
            dy_bx_sample = copy.deepcopy(sample)
            newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            dy_bx_sample["db_name"] = sample["db_name"].replace("DYJetsToLL", "DYBXOrCXToLL")
            newJson["sample_cut"] = "(hh_llmetjj_HWWleptons_nobtag_cmva.gen_bl || hh_llmetjj_HWWleptons_nobtag_cmva.gen_cl || hh_llmetjj_HWWleptons_nobtag_cmva.gen_bc)"

            dy_bx_sample["json_skeleton"][dy_bx_sample["db_name"]] = newJson
            dy_bx_sample["json_skeleton"].pop(sample["db_name"])
            mySub.sampleCfg.append(dy_bx_sample)

            # Z + jj' ; j = l, j' = l
            dy_xx_sample = copy.deepcopy(sample)
            newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])

            dy_xx_sample["db_name"] = sample["db_name"].replace("DYJetsToLL", "DYXXToLL")
            newJson["sample_cut"] = "(hh_llmetjj_HWWleptons_nobtag_cmva.gen_ll)"

            dy_xx_sample["json_skeleton"][dy_xx_sample["db_name"]] = newJson
            dy_xx_sample["json_skeleton"].pop(sample["db_name"])
            mySub.sampleCfg.append(dy_xx_sample)

        # Merging with HT binned sample: add cut on inclusive one
        # if 'DYJetsToLL_M-5to50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8' in sample["db_name"] or 'DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8' in sample["db_name"]:
            # sample["json_skeleton"][sample["db_name"]]["sample_cut"] = "event_ht < 100"

        #if 'WJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8' in sample["db_name"]:
        #    sample["json_skeleton"][sample["db_name"]]["sample_cut"] = "event_ht < 100"

        # Benchmark to training grid reweighting (ME-based)
        if "node" in sample["db_name"]:
            base = get_node(sample["db_name"])
            
            for grid_point in nonresonant_signal_grid:
                kl = "{:.2f}".format(grid_point[0])
                kt = "{:.2f}".format(grid_point[1])
                
                weight_args = [get_node_id(base), grid_point[0], grid_point[1], number_of_bases]

                newSample = copy.deepcopy(sample)
                newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])
            
                point_str = "base_" + base + "_point_" + kl + "_" + kt
                point_str = point_str.replace(".", "p").replace("-", "m")
                newSample["db_name"] = sample["db_name"].replace("node_" + base, point_str)
                newJson["sample-weight"] = "training_grid"
                newJson["sample-weight-args"] = weight_args
            
                newSample["json_skeleton"][newSample["db_name"]] = newJson
                newSample["json_skeleton"].pop(sample["db_name"])
                mySub.sampleCfg.append(newSample)

            mySub.sampleCfg.remove(sample)

        ## Cluster to MV reweighting (ME-based)
        #operators_MV = ["OtG", "Otphi", "O6", "OH"]
        #if "node" in sample["db_name"]:
        #    for base, base_name in enumerate(rwgt_base):
        #        for i, op1 in enumerate(operators_MV):
        #            newSample = copy.deepcopy(sample)
        #            newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])
        #
        #            newSample["db_name"] = sample["db_name"].replace("node_" + base_name, "SM_" + op1)
        #            newJson["sample-weight"] = "base_" + base_name + "_SM_" + op1
        #
        #            newSample["json_skeleton"][newSample["db_name"]] = newJson
        #            newSample["json_skeleton"].pop(sample["db_name"])
        #
        #            mySub.sampleCfg.append(newSample)
        #
        #            for j, op2 in enumerate(operators_MV):
        #                if i < j: continue
        #
        #                newSample = copy.deepcopy(sample)
        #                newJson = copy.deepcopy(sample["json_skeleton"][sample["db_name"]])
        #
        #                newSample["db_name"] = sample["db_name"].replace("node_" + base_name, op1 + "_" + op2)
        #                newJson["sample-weight"] = "base_" + base_name + "_" + op1 + "_" + op2
        #
        #                newSample["json_skeleton"][newSample["db_name"]] = newJson
        #                newSample["json_skeleton"].pop(sample["db_name"])
        #
        #                mySub.sampleCfg.append(newSample)

    ## Write command and data files in the slurm directory
    mySub.createFiles()

    # Actually submit the jobs
    # It is recommended to do a dry-run first without submitting to slurm
    if args.submit:
       mySub.submit()