def __init__(self,
                 year=2017,
                 sigma='central',
                 sample=None,
                 buggy=False,
                 flat=False):
        """Load data and MC pilup profiles."""

        assert (year
                in [2016, 2017,
                    2018]), "You must choose a year from: 2016, 2017, or 2018."
        assert (
            sigma in ['central', 'up', 'down']
        ), "You must choose a s.d. variation from: 'central', 'up', or 'down'."

        minbias = '69p2'
        if sigma == 'down':
            minbias = '66p0168'  # -4.6%
        elif sigma == 'up':
            minbias = '72p3832'  # +4.6%

        if year == 2016:
            datafilename = path + "Data_PileUp_2016_%s.root" % (minbias)
            mcfilename = path + "MC_PileUp_2016_Moriond17.root"
        elif year == 2017:
            tag = ""
            if buggy or sample:
                buggy = buggy or hasBuggyPU(sample)
                if buggy: tag = "_old_pmx"
                else: tag = "_new_pmx"
            datafilename = path + "Data_PileUp_2017_%s.root" % (minbias)
            mcfilename = path + "MC_PileUp_2017_Winter17_V2%s.root" % (tag)
        else:
            datafilename = path + "Data_PileUp_2018_%s.root" % (minbias)
            mcfilename = path + "MC_PileUp_2018_Autumn18.root"

        if flat or (sample and hasFlatPU(sample)):
            mcfilename = path + "MC_PileUp_%d_FlatPU0to75.root" % year

        print "Loading PileupWeightTool for '%s' and '%s'" % (datafilename,
                                                              mcfilename)
        self.datafile = ensureTFile(datafilename, 'READ')
        self.mcfile = ensureTFile(mcfilename, 'READ')
        self.datahist = self.datafile.Get('pileup')
        self.mchist = self.mcfile.Get('pileup')
        self.datahist.SetDirectory(0)
        self.mchist.SetDirectory(0)
        self.datahist.Scale(1. / self.datahist.Integral())
        self.mchist.Scale(1. / self.mchist.Integral())
        self.datafile.Close()
        self.mcfile.Close()
 def __init__(self, filename_1, filename_2, type, year):
     print '>>> ScaleFactor.init(file_1="%s", file_2="%s" ,type="%s")' % (
         filename_1, filename_2, type)
     self.type = type
     self.filename_1 = filename_1
     self.filename_2 = filename_2
     if type != 'selection':
         self.datafile = ensureTFile(filename_1)
         self.embfile = ensureTFile(filename_2)
         self.datahist = self.datafile.Get('{}_pt_eta_bins'.format(type))
         self.embhist = self.embfile.Get('{}_pt_eta_bins'.format(type))
     else:
         self.file_1 = ensureTFile(filename_1)
         self.file_2 = ensureTFile(filename_2)
         if year == 2018:
             self.file_1_hist = self.file_1.Get('Mu8_pt_eta_bins')
             self.file_2_hist = self.file_2.Get('Mu17_pt_eta_bins')
         elif year == 2017:
             self.file_1_hist = self.file_1.Get('data_trg_eff')
             self.file_2_hist = self.file_2.Get('data_trg_eff')
         elif year == 2016:
             self.file_1_hist = self.file_1.Get('Trg8_pt_eta_bins')
             self.file_2_hist = self.file_2.Get('Trg17_pt_eta_bins')
 def __init__(self, filename, graphname='ZMass', name="<noname>"):
     #print '>>> ScaleFactor.init("%s","%s",name="%s")'%(filename,graphname,name)
     self.name      = name
     self.filename  = filename
     self.file      = ensureTFile(filename)
     self.hist_eta  = self.file.Get('etaBinsH')
     self.hist_eta.SetDirectory(0)
     self.effs_data = { }
     self.effs_mc   = { }
     for ieta in range(1,self.hist_eta.GetXaxis().GetNbins()+1):
       etalabel = self.hist_eta.GetXaxis().GetBinLabel(ieta)
       self.effs_data[etalabel] = self.file.Get(graphname+etalabel+"_Data")
       self.effs_mc[etalabel]   = self.file.Get(graphname+etalabel+"_MC")
     self.file.Close()
 def __init__(self, filename, histname, name="<noname>", ptvseta=True):
     #print '>>> ScaleFactor.init("%s","%s",name="%s",ptvseta=%r)'%(filename,histname,name,ptvseta)
     self.name     = name
     self.ptvseta  = ptvseta
     self.filename = filename
     self.file     = ensureTFile(filename)
     self.hist     = self.file.Get(histname)
     if not self.hist:
       print '>>> ScaleFactor(%s).__init__: histogram "%s" does not exist in "%s"'%(self.name,histname,filename)
       exit(1)
     self.hist.SetDirectory(0)
     self.file.Close()
     
     if ptvseta: self.getSF = self.getSF_ptvseta
     else:       self.getSF = self.getSF_etavspt
 def __init__(self, trigger, wp='medium', id='MVAv2', year=2016):
     """Load tau trigger histograms from files."""
     print "Loading TauTriggerSFs for %s (%s WP)..."%(trigger,wp)
     
     trigger = trigger.replace('tautau','ditau').replace('eletau','etau')
     assert(trigger in ['ditau', 'mutau', 'etau']), "Choose from: 'ditau', 'mutau', 'etau' triggers."
     assert(wp in ['vloose', 'loose', 'medium', 'tight', 'vtight', 'vvtight']), "You must choose a WP from: vloose, loose, medium, tight, vtight, or vvtight"
     assert(id in ['MVAv2', 'dR0p3']), "Choose from two provided ID types: 'MVAv2', 'dR0p3'. 'MVAv2' uses dR0p5, and 'dR0p3' is also an MVA-based ID."
     assert(id=='MVAv2'), "Tau POG is currently only providing efficiencies for MVAv2, sorry."
     assert(year in [2016,2017,2018]), "You must choose a year from: 2016, 2017, or 2018."
     print "Loading Efficiencies for trigger %s usingTau %s ID WP %s for year %i"%(trigger,id,wp,year)
     
     file = ensureTFile( path+'%d/tauTriggerEfficiencies%i.root'%(year,year), 'r' )
     
     self.fit_data = { }
     self.fit_mc   = { }
     self.fitUnc_data = { }
     self.fitUnc_mc   = { }
     self.effEtaPhi_data = { }
     self.effEtaPhi_mc   = { }
     self.effEtaPhiAvg_data = { }
     self.effEtaPhiAvg_mc   = { }
     
     # LOOP over decay modes
     for dm in [0,1,10]:
       
       # LOAD the TF1s containing the analytic best-fit results
       self.fit_data[dm] = extractTH1(file,'%s_%s%s_dm%d_DATA_fit'%(trigger,wp,id,dm))
       self.fit_mc[dm]   = extractTH1(file,'%s_%s%s_dm%d_MC_fit'%(  trigger,wp,id,dm))
       
       # LOAD the TH1s containing the analytic best-fit result in 1 GeV incriments and the associated uncertainty
       self.fitUnc_data[dm] = extractTH1(file,'%s_%s%s_dm%d_DATA_errorBand'%(trigger,wp,id,dm))
       self.fitUnc_mc[dm]   = extractTH1(file,'%s_%s%s_dm%d_MC_errorBand'%(  trigger,wp,id,dm))
       
       # LOAD the TH2s containing the eta phi efficiency corrections
       self.effEtaPhi_data[dm] = extractTH1(file,'%s_%s%s_dm%d_DATA'%(trigger,wp,id,dm))
       self.effEtaPhi_mc[dm]   = extractTH1(file,'%s_%s%s_dm%d_MC'%(  trigger,wp,id,dm))
       
       # LOAD eta phi Averages
       self.effEtaPhiAvg_data[dm] = extractTH1(file,'%s_%s%s_dm%d_DATA_AVG'%(trigger,wp,id,dm))
       self.effEtaPhiAvg_mc[dm]   = extractTH1(file,'%s_%s%s_dm%d_MC_AVG'%(  trigger,wp,id,dm))
     
     file.Close()
     
     self.trigger = trigger
     self.year = year
     self.wp = wp
     self.id = id
Example #6
0
    def __init__(self, year=2017):
        """Load Z pT weights."""
        assert year in [
            2016, 2017, 2018
        ], "ZptCorrectionTool: You must choose a year from: 2016, 2017, or 2018."

        if year == 2016:
            filename = zptpath + "Zpt_weights_2016.root"
        elif year == 2017:
            filename = zptpath + "Zpt_weights_2017.root"
        else:
            filename = zptpath + "Zpt_weights_2018.root"

        file = ensureTFile(filename, 'READ')
        hist = file.Get('zptmass_weights')
        hist.SetDirectory(0)
        file.Close()

        self.hist = hist
        self.filename = filename
    def __init__(self, year=2017, sigma='central', sample=None, buggy=False):
        """Load data and MC pilup profiles."""

        assert (year
                in [2016, 2017,
                    2018]), "You must choose a year from: 2016, 2017, or 2018."
        assert (
            sigma in ['central', 'up', 'down']
        ), "You must choose a s.d. variation from: 'central', 'up', or 'down'."

        minbias = '69p2'
        if sigma == 'down':
            minbias = '66p0168'  # -4.6%
        elif sigma == 'up':
            minbias = '72p3832'  # +4.6%

        if year == 2016:
            self.datafile = ensureTFile(
                path + 'Data_PileUp_2016_%s.root' % (minbias), 'READ')
            self.mcfile = ensureTFile(path + 'MC_PileUp_2016_Moriond17.root',
                                      'READ')
        elif year == 2017:
            tag = ""
            if buggy or sample:
                if buggy or hasBuggyPU(sample): tag = "_old_pmx"
                else: tag = "_new_pmx"
            self.datafile = ensureTFile(
                path + 'Data_PileUp_2017_%s.root' % (minbias), 'READ')
            self.mcfile = ensureTFile(
                path + 'MC_PileUp_2017_Winter17_V2%s.root' % (tag), 'READ')
        else:
            self.datafile = ensureTFile(
                path + 'Data_PileUp_2018_%s.root' % (minbias), 'READ')
            self.mcfile = ensureTFile(path + 'MC_PileUp_2018_Autumn18.root',
                                      'READ')
        self.datahist = self.datafile.Get('pileup')
        self.mchist = self.mcfile.Get('pileup')
        self.datahist.SetDirectory(0)
        self.mchist.SetDirectory(0)
        self.datahist.Scale(1. / self.datahist.Integral())
        self.mchist.Scale(1. / self.mchist.Integral())
        self.datafile.Close()
        self.mcfile.Close()
    def __init__(self,
                 tagger,
                 wp='medium',
                 sigma='central',
                 channel='mutau',
                 year=2017,
                 maxeta=2.4):
        """Load b tag weights from CSV file."""

        assert (year
                in [2016, 2017,
                    2018]), "You must choose a year from: 2016, 2017, or 2018."
        assert (tagger in [
            'CSVv2', 'DeepCSV'
        ]), "BTagWeightTool: You must choose a tagger from: CSVv2, DeepCSV!"
        assert (wp in [
            'loose', 'medium', 'tight'
        ]), "BTagWeightTool: You must choose a WP from: loose, medium, tight!"
        assert (sigma in [
            'central', 'up', 'down'
        ]), "BTagWeightTool: You must choose a WP from: central, up, down!"
        #assert(channel in ['mutau','eletau','tautau','mumu']), "BTagWeightTool: You must choose a channel from: mutau, eletau, tautau, mumu!"

        # FILE
        if year == 2016:
            if 'deep' in tagger.lower():
                csvname = path + 'DeepCSV_Moriond17_B_H.csv'
                effname = path + 'DeepCSV_2016_Moriond17_eff.root'
            else:
                csvname = path + 'CSVv2_Moriond17_B_H.csv'
                effname = path + 'CSVv2_2016_Moriond17_eff.root'
        elif year == 2017:
            if 'deep' in tagger.lower():
                csvname = path + 'DeepCSV_94XSF_V3_B_F.csv'
                effname = path + 'DeepCSV_2017_12Apr2017_eff.root'
            else:
                csvname = path + 'CSVv2_94XSF_V2_B_F.csv'
                effname = path + 'CSVv2_2017_12Apr2017_eff.root'
        elif year == 2018:
            if 'deep' in tagger.lower():
                csvname = path + 'DeepCSV_94XSF_V3_B_F.csv'
                effname = path + 'DeepCSV_2018_Autumn18_eff.root'
            else:
                csvname = path + 'CSVv2_94XSF_V2_B_F.csv'
                effname = path + 'CSVv2_2018_Autumn18_eff.root'

        # TAGGING WP
        self.wpname = wp
        self.wp = getattr(BTagWPs(tagger, year), wp)
        if 'deep' in tagger.lower():
            tagged = lambda e, i: e.Jet_btagDeepB[i] > self.wp
        else:
            tagged = lambda e, i: e.Jet_btagCSVV2[i] > self.wp

        # CSV READER
        print "Loading BTagWeightTool for %s (%s WP)..." % (tagger, wp)
        op = OP_LOOSE if wp == 'loose' else OP_MEDIUM if wp == 'medium' else OP_TIGHT if wp == 'tight' else OP_RESHAPING
        type_udsg = 'incl'
        type_bc = 'comb'  # 'mujets' for QCD; 'comb' for QCD+TT
        calib = BTagCalibration(tagger, csvname)
        reader = BTagCalibrationReader(op, sigma)
        reader.load(calib, FLAV_B, type_bc)
        reader.load(calib, FLAV_C, type_bc)
        reader.load(calib, FLAV_UDSG, type_udsg)

        # EFFICIENCIES
        hists = {}  # histograms to compute the b tagging efficiencies in MC
        effmaps = {
        }  # b tag efficiencies in MC to compute b tagging weight for an event
        efffile = ensureTFile(effname)
        default = False
        if not efffile:
            warning(
                "File %s with efficiency histograms does not exist! Reverting to default efficiency histogram..."
                % (effname),
                title="BTagWeightTool")
            default = True
        for flavor in [0, 4, 5]:
            flavor = flavorToString(flavor)
            histname = "%s_%s_%s" % (tagger, flavor, wp)
            effname = "%s/eff_%s_%s_%s" % (channel, tagger, flavor, wp)
            hists[flavor] = createEfficienyMap(
                histname)  # numerator   = b tagged jets
            hists[flavor + '_all'] = createEfficienyMap(
                histname + '_all')  # denominator = all jets
            if efffile:
                effmaps[flavor] = efffile.Get(effname)
                if not effmaps[flavor]:
                    warning(
                        "histogram '%s' does not exist in %s! Reverting to default efficiency histogram..."
                        % (effname, efffile.GetName()),
                        title="BTagWeightTool")
                    default = True
                    effmaps[flavor] = createDefaultEfficiencyMap(
                        effname, flavor, wp)
            else:
                effmaps[flavor] = createDefaultEfficiencyMap(
                    effname, flavor, wp)
            effmaps[flavor].SetDirectory(0)
        efffile.Close()

        if default:
            warning("Made use of default efficiency histograms! The b tag weights from this module should be regarded as placeholders only,\n"+\
                    "and should NOT be used for analyses. B (mis)tag efficiencies in MC are analysis dependent. Please create your own\n"+\
                    "efficiency histogram with corrections/btag/getBTagEfficiencies.py after running all MC samples with BTagWeightTool.",title="BTagWeightTool")

        self.tagged = tagged
        self.calib = calib
        self.reader = reader
        self.hists = hists
        self.effmaps = effmaps
        self.maxeta = maxeta