Пример #1
0
def main():

    # Specify some output dirs
    out_dir_private_UL = os.path.join(topcoffea_path("json"),
                                      "signal_samples/private_UL/")
    out_dir_top19001_local = os.path.join(
        topcoffea_path("json"), "signal_samples/private_top19001_local")
    out_dir_central_UL = os.path.join(topcoffea_path("json"),
                                      "signal_samples/central_UL/")
    out_dir_central_2017 = os.path.join(topcoffea_path("json"),
                                        "signal_samples/central_2017/")

    # Private UL
    #make_jsons_for_dict_of_samples(private_UL17_dict,"/hadoop","2017",out_dir_private_UL)
    #make_jsons_for_dict_of_samples(private_UL18_dict,"/hadoop","2018",out_dir_private_UL)
    #make_jsons_for_dict_of_samples(private_UL16_dict,"/hadoop","2016",out_dir_private_UL)
    #make_jsons_for_dict_of_samples(private_UL16APV_dict,"/hadoop","2016APV",out_dir_private_UL) # Not sure what we need here for the year, can remake the JSONs later to update when we have SFs etc set up for 2016 stuff (right now I think it's mostly just 18)

    # TOP-19-001 ttll
    #make_jsons_for_dict_of_samples(private_2017_dict,"","2017",out_dir_top19001_local)

    # Central
    #make_jsons_for_dict_of_samples(central_2017_correctnPartonsInBorn_dict,"root://ndcms.crc.nd.edu/","2017",out_dir_central_2017,on_das=True)
    make_jsons_for_dict_of_samples(central_2017_dict,
                                   "root://ndcms.crc.nd.edu/",
                                   "2017",
                                   out_dir_central_2017,
                                   on_das=True)
Пример #2
0
def GetBTagSF(eta, pt, flavor, year=2018, sys=0):

  # Efficiencies and SFs for UL only available for 2017 and 2018
  if   year == 2016: SFevaluatorBtag = BTagScaleFactor(topcoffea_path("data/btagSF/DeepFlav_2016.csv"),"MEDIUM")
  elif year == 2017: SFevaluatorBtag = BTagScaleFactor(topcoffea_path("data/btagSF/UL/DeepJet_UL17.csv"),"MEDIUM")
  elif year == 2018: SFevaluatorBtag = BTagScaleFactor(topcoffea_path("data/btagSF/UL/DeepJet_UL18.csv"),"MEDIUM")

  if   sys==0 : SF=SFevaluatorBtag.eval("central",flavor,eta,pt)
  elif sys==1 : SF=SFevaluatorBtag.eval("up",flavor,eta,pt)
  elif sys==-1: SF=SFevaluatorBtag.eval("down",flavor,eta,pt)

  return (SF)
Пример #3
0
def GetMCeffFunc(WP='medium', flav='b', year=2018):
    pathToBtagMCeff = topcoffea_path('data/btagSF/UL/btagMCeff_%i.pkl.gz' %
                                     year)
    hists = {}
    with gzip.open(pathToBtagMCeff) as fin:
        hin = pickle.load(fin)
        for k in hin.keys():
            if k in hists: hists[k] += hin[k]
            else: hists[k] = hin[k]
    h = hists['jetptetaflav']
    hnum = h.integrate('WP', WP)
    hden = h.integrate('WP', 'all')
    getnum = lookup_tools.dense_lookup.dense_lookup(
        hnum.values(overflow='over')[()], [
            hnum.axis('pt').edges(),
            hnum.axis('abseta').edges(),
            hnum.axis('flav').edges()
        ])
    getden = lookup_tools.dense_lookup.dense_lookup(
        hden.values(overflow='over')[()], [
            hden.axis('pt').edges(),
            hnum.axis('abseta').edges(),
            hden.axis('flav').edges()
        ])
    values = hnum.values(overflow='over')[()]
    edges = [
        hnum.axis('pt').edges(),
        hnum.axis('abseta').edges(),
        hnum.axis('flav').edges()
    ]
    fun = lambda pt, abseta, flav: getnum(pt, abseta, flav) / getden(
        pt, abseta, flav)
    return fun
Пример #4
0
def main():

    # Specify some output dirs
    out_dir_private_UL = os.path.join(topcoffea_path("json"),
                                      "signal_samples/private_UL/")
    out_dir_top19001_local = os.path.join(
        topcoffea_path("json"), "signal_samples/private_top19001_local")
    out_dir_central_UL = os.path.join(topcoffea_path("json"),
                                      "signal_samples/central_UL/")
    out_dir_central_2017 = os.path.join(topcoffea_path("json"),
                                        "signal_samples/central_2017/")

    # Private UL
    #make_jsons_for_dict_of_samples(private_UL17_dict,"/hadoop","2017",out_dir_private_UL)
    #make_jsons_for_dict_of_samples(private_UL18_dict,"/hadoop","2018",out_dir_private_UL)
    #make_jsons_for_dict_of_samples(private_UL16_dict,"/hadoop","2016",out_dir_private_UL)
    make_jsons_for_dict_of_samples(
        private_UL16APV_dict, "/hadoop", "2016APV", out_dir_private_UL
    )  # Not sure what we need here for the year, can remake the JSONs later to update when we have SFs etc set up for 2016 stuff (right now I think it's mostly just 18)
Пример #5
0
    def __init__(self):

        # The path to the file listing the luminosity for each year
        self.LUMI_FILE = topcoffea_path("json/lumi.json")

        # The order of the categories in the TOP-19-001 AN yield tables
        self.CAT_LST = [
            "cat_2lss_p", "cat_2lss_m", "cat_3l_1b_offZ_p", "cat_3l_1b_offZ_m",
            "cat_3l_2b_offZ_p", "cat_3l_2b_offZ_m", "cat_3l_1b_onZ",
            "cat_3l_2b_onZ", "cat_4l"
        ]

        # A dictionary mapping names of samples in the samples axis to a short version of the name
        self.PROC_MAP = {
            "ttlnu": [
                "ttW_centralUL17", "ttlnu_private2017", "ttlnuJet_privateUL17",
                "ttlnuJet_privateUL18"
            ],
            "ttll": [
                "ttZ_centralUL17", "ttll_TOP-19-001", "ttllJet_privateUL17",
                "ttllJet_privateUL18"
            ],
            "ttH": [
                "ttH_centralUL17", "ttH_private2017", "ttHJet_privateUL17",
                "ttHJet_privateUL18"
            ],
            "tllq": [
                "tZq_centralUL17", "tllq_private2017", "tllq_privateUL17",
                "tllq_privateUL18"
            ],
            "tHq": ["tHq_central2017", "tHq_privateUL17"],
            "tttt": ["tttt_central2017", "tttt_privateUL17"],
        }

        # The jet bins we define for the lep categories
        self.JET_BINS = {
            "2lss": [4, 5, 6, 7],
            "3l": [2, 3, 4, 5],
            "4l": [2, 3, 4],
        }

        # The sub categories of the lep categories
        self.ch_3l_onZ = ["eemSSonZ", "mmeSSonZ", "eeeSSonZ", "mmmSSonZ"]
        self.ch_3l_offZ = ["eemSSoffZ", "mmeSSoffZ", "eeeSSoffZ", "mmmSSoffZ"]
        self.ch_2lss = ["eeSSonZ", "eeSSoffZ", "mmSSonZ", "mmSSoffZ", "emSS"]
        self.ch_4l = ["eeee", "eeem", "eemm", "mmme", "mmmm"]

        # A dictionary specifying which categories from the hists create the analysis categories
        self.CATEGORIES = {
            "cat_2lss_p": {
                "channel": self.ch_2lss,
                "sumcharge": ["ch+"],
                "cut": ["1+bm2+bl"],
            },
            "cat_2lss_m": {
                "channel": self.ch_2lss,
                "sumcharge": ["ch-"],
                "cut": ["1+bm2+bl"],
            },
            "cat_3l_1b_onZ": {
                "channel": self.ch_3l_onZ,
                "sumcharge": ["ch+", "ch-"],
                "cut": ["1bm"],
            },
            "cat_3l_1b_offZ_p": {
                "channel": self.ch_3l_offZ,
                "sumcharge": ["ch+"],
                "cut": ["1bm"],
            },
            "cat_3l_1b_offZ_m": {
                "channel": self.ch_3l_offZ,
                "sumcharge": ["ch-"],
                "cut": ["1bm"],
            },
            "cat_3l_2b_onZ": {
                "channel": self.ch_3l_onZ,
                "sumcharge": ["ch+", "ch-"],
                "cut": ["2+bm"],
            },
            "cat_3l_2b_offZ_p": {
                "channel": self.ch_3l_offZ,
                "sumcharge": ["ch+"],
                "cut": ["2+bm"],
            },
            "cat_3l_2b_offZ_m": {
                "channel": self.ch_3l_offZ,
                "sumcharge": ["ch-"],
                "cut": ["2+bm"],
            },
            "cat_4l": {
                "channel": self.ch_4l,
                "sumcharge": ["ch+", "ch-", "ch0"],
                "cut": ["1+bm2+bl"],
            },
        }

        # Yields from TOP-19-001 AN table 15
        self.TOP19001_YLDS = {
            "ttlnu": {
                "cat_2lss_p": (81.1, None),
                "cat_2lss_m": (44.0, None),
                "cat_3l_1b_offZ_p": (16.6, None),
                "cat_3l_1b_offZ_m": (9.1, None),
                "cat_3l_2b_offZ_p": (12.1, None),
                "cat_3l_2b_offZ_m": (6.7, None),
                "cat_3l_1b_onZ": (3.4, None),
                "cat_3l_2b_onZ": (2.5, None),
                "cat_4l": (0.0, None),
            },
            "ttll": {
                "cat_2lss_p": (22.6, None),
                "cat_2lss_m": (22.5, None),
                "cat_3l_1b_offZ_p": (14.2, None),
                "cat_3l_1b_offZ_m": (14.7, None),
                "cat_3l_2b_offZ_p": (10.1, None),
                "cat_3l_2b_offZ_m": (9.4, None),
                "cat_3l_1b_onZ": (106.5, None),
                "cat_3l_2b_onZ": (70.9, None),
                "cat_4l": (10.4, None),
            },
            "ttH": {
                "cat_2lss_p": (28.6, None),
                "cat_2lss_m": (27.9, None),
                "cat_3l_1b_offZ_p": (8.5, None),
                "cat_3l_1b_offZ_m": (8.1, None),
                "cat_3l_2b_offZ_p": (5.5, None),
                "cat_3l_2b_offZ_m": (5.6, None),
                "cat_3l_1b_onZ": (3.5, None),
                "cat_3l_2b_onZ": (2.4, None),
                "cat_4l": (1.1, None),
            },
            "tllq": {
                "cat_2lss_p": (2.9, None),
                "cat_2lss_m": (1.7, None),
                "cat_3l_1b_offZ_p": (3.8, None),
                "cat_3l_1b_offZ_m": (1.9, None),
                "cat_3l_2b_offZ_p": (1.3, None),
                "cat_3l_2b_offZ_m": (0.6, None),
                "cat_3l_1b_onZ": (42.1, None),
                "cat_3l_2b_onZ": (14.1, None),
                "cat_4l": (0.0, None),
            },
            "tHq": {
                "cat_2lss_p": (0.9, None),
                "cat_2lss_m": (0.5, None),
                "cat_3l_1b_offZ_p": (0.3, None),
                "cat_3l_1b_offZ_m": (0.2, None),
                "cat_3l_2b_offZ_p": (0.2, None),
                "cat_3l_2b_offZ_m": (0.1, None),
                "cat_3l_1b_onZ": (0.1, None),
                "cat_3l_2b_onZ": (0.1, None),
                "cat_4l": (0.0, None),
            },
        }
Пример #6
0
'''
 This script is used to transform scale factors, which are tipically provided as 2D histograms within root files,
 into coffea format of corrections.
'''

#import uproot, uproot_methods
import uproot
from coffea import hist, lookup_tools
import os, sys
from topcoffea.modules.paths import topcoffea_path


def GetHistoFun(fname, hname):
    f = uproot.open(fname)
    h = f[hname]
    return lookup_tools.dense_lookup.dense_lookup(h.values, h.edges)


getMuonIso = GetHistoFun(topcoffea_path('data/scaleFactors/MuonISO.root'),
                         'NUM_TightRelIso_DEN_TightIDandIPCut_pt_abseta')
getMuonId = GetHistoFun(topcoffea_path('data/scaleFactors/MuonID.root'),
                        'NUM_TightID_DEN_genTracks_pt_abseta')
Пример #7
0
  Returns a dictionary containting all the info for each sample

  Usage:
    >> python samples.py configFile.cfg

  Example of how to run this script in this repo: 
    >> python moca/samples.py cfg/2018.cfg

'''

import os, sys
from coffea.util import save
from topcoffea.modules.DASsearch import GetDatasetFromDAS
from topcoffea.modules.paths import topcoffea_path
from topcoffea.modules.fileReader import GetFiles, GetAllInfoFromFile
basepath = topcoffea_path("") # Just want path to topcoffea/topcoffea, not any particular file within it, so just pass "" to the function

def FindFileInDir(fname, dname = '.'):
  if not os.path.isfile(dname+'/'+fname):
    l = list(filter(lambda x: x[0] == fname, [x.split('.') for x in os.listdir(dname)]))
    if len(l) == 0: return False
    else          : l = l[0]
    fname = l[0] + '.' + l[1]
    return fname
  else: return dname+'/'+fname

def loadxsecdic(fname, verbose):
  xsecdir = {}
  dname = '.'
  filename = FindFileInDir(fname, dname)
  if not filename: filename = FindFileInDir(fname, basepath)
Пример #8
0
def get_lumi(year):
    lumi_json = topcoffea_path("json/lumi.json")
    with open(lumi_json) as f_lumi:
        lumi = json.load(f_lumi)
        lumi = lumi[year]
    return lumi
Пример #9
0
import gzip
import pickle
from coffea.jetmet_tools import FactorizedJetCorrector, JetCorrectionUncertainty
from coffea.jetmet_tools import JECStack, CorrectedJetsFactory
from coffea.btag_tools.btagscalefactor import BTagScaleFactor

basepathFromTTH = 'data/fromTTH/lepSF/'

###### Lepton scale factors
################################################################
extLepSF = lookup_tools.extractor()

# Electron reco
extLepSF.add_weight_sets([
    "ElecRecoSFb20_2016 EGamma_SF2D %s" %
    topcoffea_path(basepathFromTTH +
                   'reco/elec/2016/el_scaleFactors_gsf_ptLt20.root')
])
extLepSF.add_weight_sets([
    "ElecRecoSF_2016 EGamma_SF2D %s" %
    topcoffea_path(basepathFromTTH +
                   'reco/elec/2016/el_scaleFactors_gsf_ptGt20.root')
])
extLepSF.add_weight_sets([
    "ElecRecoSFb20_2017 EGamma_SF2D %s" %
    topcoffea_path(basepathFromTTH +
                   'reco/elec/2017/el_scaleFactors_gsf_ptLt20.root')
])
extLepSF.add_weight_sets([
    "ElecRecoSF_2017 EGamma_SF2D %s" %
    topcoffea_path(basepathFromTTH +
                   'reco/elec/2017/el_scaleFactors_gsf_ptGt20.root')
Пример #10
0
import numpy as np
import awkward as ak
import gzip
import pickle
from coffea.jetmet_tools import FactorizedJetCorrector, JetCorrectionUncertainty
from coffea.jetmet_tools import JECStack, CorrectedJetsFactory
from coffea.btag_tools.btagscalefactor import BTagScaleFactor

basepathFromTTH = 'data/fromTTH/lepSF/'

###### Lepton scale factors
################################################################
extLepSF = lookup_tools.extractor()

# Electron reco
extLepSF.add_weight_sets(["ElecRecoSFb20_2016 EGamma_SF2D %s"%topcoffea_path(basepathFromTTH+'reco/elec/2016/el_scaleFactors_gsf_ptLt20.root')])
extLepSF.add_weight_sets(["ElecRecoSF_2016 EGamma_SF2D %s"%topcoffea_path(basepathFromTTH+'reco/elec/2016/el_scaleFactors_gsf_ptGt20.root')])
extLepSF.add_weight_sets(["ElecRecoSFb20_2017 EGamma_SF2D %s"%topcoffea_path(basepathFromTTH+'reco/elec/2017/el_scaleFactors_gsf_ptLt20.root')])
extLepSF.add_weight_sets(["ElecRecoSF_2017 EGamma_SF2D %s"%topcoffea_path(basepathFromTTH+'reco/elec/2017/el_scaleFactors_gsf_ptGt20.root')])
extLepSF.add_weight_sets(["ElecRecoSF_2018 EGamma_SF2D %s"%topcoffea_path(basepathFromTTH+'reco/elec/2018/el_scaleFactors_gsf.root')])
extLepSF.add_weight_sets(["ElecRecoSFb20_2016_er EGamma_SF2D_error %s"%topcoffea_path(basepathFromTTH+'reco/elec/2016/el_scaleFactors_gsf_ptLt20.root')])
extLepSF.add_weight_sets(["ElecRecoSF_2016_er EGamma_SF2D_error %s"%topcoffea_path(basepathFromTTH+'reco/elec/2016/el_scaleFactors_gsf_ptGt20.root')])
extLepSF.add_weight_sets(["ElecRecoSFb20_2017_er EGamma_SF2D_error %s"%topcoffea_path(basepathFromTTH+'reco/elec/2017/el_scaleFactors_gsf_ptLt20.root')])
extLepSF.add_weight_sets(["ElecRecoSF_2017_er EGamma_SF2D_error %s"%topcoffea_path(basepathFromTTH+'reco/elec/2017/el_scaleFactors_gsf_ptGt20.root')])
extLepSF.add_weight_sets(["ElecRecoSF_2018_er EGamma_SF2D_error %s"%topcoffea_path(basepathFromTTH+'reco/elec/2018/el_scaleFactors_gsf.root')])

# Electron loose
extLepSF.add_weight_sets(["ElecLooseSF_2016 EGamma_SF2D %s"%topcoffea_path(basepathFromTTH+'loose/elec/TnP_loose_ele_2016.root')])
extLepSF.add_weight_sets(["ElecLooseSF_2017 EGamma_SF2D %s"%topcoffea_path(basepathFromTTH+'loose/elec/TnP_loose_ele_2017.root')])
extLepSF.add_weight_sets(["ElecLooseSF_2018 EGamma_SF2D %s"%topcoffea_path(basepathFromTTH+'loose/elec/TnP_loose_ele_2018.root')])
extLepSF.add_weight_sets(["ElecLoosettHSF_2016 EGamma_SF2D %s"%topcoffea_path(basepathFromTTH+'loose/elec/TnP_loosettH_ele_2016.root')])