def test_merge_sample_sum(self): """ Merge two samples with the sum option true. In this case time samples do no play any role. """ this = TQSampleFolder("this") s1 = TQSample("htautau") s1.addObject(TQCounter("c", 1.234, 35.1, 1234)) s1.setTag("test.s1", "hello world") this.addSampleFolder(s1) other = TQSampleFolder("other") s2 = TQSample("htautau") s2.addObject(TQCounter("c", 4.321, 65.7, 4321)) s2.setTag("test.s2", "hello multiverse") other.addSampleFolder(s2) this.merge(other, "asv", True) S = this.getSample("htautau") self.assertTrue(S) self.assertIsInstance(S, TQSample) self.assertEqual( repr(S), 'TQSample("htautau") @ this:/htautau test.s1 = "hello world", test.s2 = "hello multiverse"' ) C = S.getObject("c") self.assertTrue(C) self.assertIsInstance(C, TQCounter) self.assertEqual(C.getCounter(), 5.555) self.assertEqual(C.getRawCounter(), 5555) self.assertEqual(C.getError(), 74.48825410761083)
def test_merge_sample_nosum(self): """ Merge two samples without the sum option, this means merging should not take place at all, since there are no timestamps. """ this = TQSampleFolder("this") s1 = TQSample("htautau") s1.addObject(TQCounter("c", 1.234, 35.1, 1234)) s1.setTag("test.s1", "hello world") this.addSampleFolder(s1) other = TQSampleFolder("other") s2 = TQSample("htautau") s2.addObject(TQCounter("c", 4.321, 65.7, 4321)) s2.setTag("test.s2", "hello multiverse") other.addSampleFolder(s2) this.merge(other, False) S = this.getSample("htautau") self.assertTrue(S) self.assertIsInstance(S, TQSample) C = S.getObject("c") self.assertTrue(C) self.assertIsInstance(C, TQCounter) self.assertEqual(C.getCounter(), 1.234) self.assertEqual(C.getRawCounter(), 1234) self.assertEqual(C.getError(), 35.1)
def test_merge_sample_other_newer_nosum(self): """ Merge two samples without the sum option, but the other sample folder is newer. This means this one should be taken. """ this = TQSampleFolder("this") s1 = TQSample("htautau") s1.addObject(TQCounter("c", 1.234, 35.1, 1234)) s1.setTag("test.s1", "hello world") s1.setTag(".test.timestamp.machine", self.YESTERDAY) this.addSampleFolder(s1) other = TQSampleFolder("other") s2 = TQSample("htautau") s2.addObject(TQCounter("c", 4.321, 65.7, 4321)) s2.setTag("test.s2", "hello multiverse") s2.setTag(".test.timestamp.machine", self.TODAY) other.addSampleFolder(s2) this.merge(other, "test", False) S = this.getSample("htautau") self.assertTrue(S) self.assertIsInstance(S, TQSample) C = S.getObject("c") self.assertTrue(C) self.assertIsInstance(C, TQCounter) self.assertEqual(C.getCounter(), 4.321) self.assertEqual(C.getRawCounter(), 4321) self.assertEqual(C.getError(), 65.7)
def test_number_parsing_w_space(self): samples = TQSampleFolder("samples") parser = TQXSecParser(samples) parser.readCSVfile(os.path.join(self.tempdir, "testXS.csv")) parser.readMappingFromColumn("*path*") unit = TQXSecParser.unitName(TQXSecParser.unit("pb")) parser.setTagString("xSectionUnit", unit) parser.addPathVariant("channel", "em") parser.addAllSamples(False) vbf = samples.getSampleFolder("sig/em/vbf/341080") self.assertTrue(vbf, msg="Failed to get vbf sample folder") xsVBF = vbf.getTagDoubleDefault(".xsp.xSection", -999.) self.assertAlmostEqual(xsVBF, 0.8579E-4, delta=1e-10)
def test_simple_mapping(self): histogram = os.path.join(self.tempdir, "testHistogram.root:mapping") samples = TQSampleFolder("samples") testSample = samples.getSampleFolder("test+").getSample("testSample+") testSample.setTreeLocation( os.path.join(self.tempdir, "test.root:testTree")) testSample.setTagBool("usemcweights", True) #print "Creating test cuts" baseCutFolder = TQFolder("cuts") cutText = '+baseCut{<.cutExpression="1.",.weightExpression="1.",title="dummy base cut">\n+weightedCut{<.cutExpression="1.",.weightExpression="TH1Map:' + histogram + '([testVal])">}\n}' #print "Creating cuts from expression:" #print cutText baseCutFolder.importFromText(cutText) baseCut = TQCut.importFromFolder(baseCutFolder.getFolder("?")) #print "creating sample visitor" visitor = TQAnalysisSampleVisitor() visitor.setBaseCut(baseCut) #visitor.setVerbose(True) #print "creating histomaker analysis job" histoJob = TQHistoMakerAnalysisJob() if not histoJob.bookHistogram( 'TH1F("histo","",20,-2.,2.) << (testVal:"original distribution")' ): print("Failed to book histogram") baseCut.addAnalysisJob(histoJob, "*") samples.visitMe(visitor) #samples.writeToFile("testSampleFolder.root") original = samples.getHistogram("test", "baseCut/histo") scaled = samples.getHistogram("test", "weightedCut/histo") ok = True for b in range(0, original.GetNbinsX()): exponent = -1 if original.GetBinCenter(b) < 0 else 1 #check if removing the scaling yield the same bin value ok = ok and (abs( original.GetBinContent(b) - scaled.GetBinContent(b) / pow(2, exponent)) < 1e-10) self.assertTrue( ok ) #don't check for exact match, we might have some numerical discrepancies
if len(sys.argv) < 2: print "Usage:" print " python {} INDEX [SAMPLE]".format(sys.argv[0]) print "" print " INDEX determines which variation to run" print " -1 : print cuts and samples and exit" print " 0 : nominal" print " 1 : jec_up" print " 2 : jec_dn" print " 3 : gen_met" print "" sys.exit() # Create the master TQSampleFolder samples = TQSampleFolder("samples") # Connect input baby ntuple connectNtuples(samples, "../samples.cfg", ntuplepath, "<-2", "<-3") samples.printContents("trd") # Add BSM samples addBSMsamples(samples) if len(sys.argv) >= 3: # Run single job main(samples, str(sys.argv[2]), [int(sys.argv[1])]) else: # First remove old files os.system("rm -f .output_-*")
def main(): options = { # The main root TQSampleFolder name "master_sample_name": "samples", # Where the ntuples are located "ntuple_path": "/nfs-7/userdata/phchang/WWW_babies/WWW_v1.2.3/skim/", # Path to the config file that defines how the samples should be organized "sample_config_path": "samples.cfg", # The samples with "priority" (defined in sample_config_pat) values satisfying the following condition is looped over "priority_value": ">0", # The samples with "priority" (defined in sample_config_pat) values satisfying the following condition is NOT looped over "exclude_priority_value": "<-1", # N-cores "ncore": 4, # TQCuts config file "cuts": "cuts.cfg", # Histogram config file "histo": "histo.cfg", # Eventlist histogram "eventlist": "eventlist.cfg", # Custom observables (dictionary) "customobservables": {}, # Custom observables (dictionary) "output_dir": "outputs/" } # First generate cuts.cfg file generate_cuts_config() # Analyze loop(options) # Create plots and tables samples = TQSampleFolder.loadSampleFolder("outputs/output.root:samples") bkg_path = [ ("WWW", "/sig/www"), ("WHWWW", "/sig/whwww"), ] sig_path = [("WZ", "/bkg/WZ")] autoplot(samples, bkg_path=bkg_path, sig_path=sig_path, options={ "remove_underflow": True, "signal_scale": "auto" }) autotable(samples, "yield", bkg_path=bkg_path, sig_path=sig_path, options={"cuts": "cuts.cfg"})
def compute_fake_factor_1d(th1): for ix in xrange(0, th1.GetNbinsX() + 2): frnom = th1.GetBinContent(ix) frerr = th1.GetBinError(ix) fr = E(frnom, frerr) if fr.val != 0 and fr.val != 1: ff = fr / (E(1., 0.) - fr) else: ff = E(0., 0.) th1.SetBinContent(ix, ff.val) th1.SetBinError(ix, ff.err) ROOT.gROOT.SetBatch(True) samples = TQSampleFolder.loadSampleFolder("output.root:samples") qcdloosemu = samples.getHistogram("/qcd/mu", "OneMuLoose/lep_ptcorrcoarse_vs_etacoarse") qcdtightmu = samples.getHistogram("/qcd/mu", "OneMuTight/lep_ptcorrcoarse_vs_etacoarse") qcdloosemu.Print("all") qcdtightmu.Print("all") qcdlooseel = samples.getHistogram("/qcd/el", "OneElLoose/lep_ptcorrcoarse_vs_etacoarse") qcdtightel = samples.getHistogram("/qcd/el", "OneElTight/lep_ptcorrcoarse_vs_etacoarse") qcdlooseelEM = samples.getHistogram( "/qcd/el/EM", "OneElLoose/lep_ptcorrcoarse_vs_etacoarse")
#!/bin/env python import os import sys import ROOT from QFramework import TQSampleFolder, TQXSecParser, TQCut, TQAnalysisSampleVisitor, TQSampleInitializer, TQCutflowAnalysisJob, TQCutflowPrinter, TQHistoMakerAnalysisJob from rooutil import plottery_wrapper as p from plottery import plottery as ply try: tqsamplefolderpath = sys.argv[1] except: tqsamplefolderpath = "output.root" ROOT.gROOT.SetBatch(True) samples = TQSampleFolder.loadSampleFolder(tqsamplefolderpath + ":samples") samples_up = TQSampleFolder.loadSampleFolder("output_up.root:samples") samples_dn = TQSampleFolder.loadSampleFolder("output_dn.root:samples") output_plot_dir = "plots" doW = False docombinedqcdel = True testsample = "/top" testsamplename = "t#bar{t}" testsamplelegendname = "t#bar{t}" if doW: testsample = "/W/HT" #testsample = "/W" testsamplename = "W"
#!/bin/env python import os import sys import ROOT from QFramework import TQSampleFolder, TQXSecParser, TQCut, TQAnalysisSampleVisitor, TQSampleInitializer, TQCutflowAnalysisJob, TQCutflowPrinter, TQHistoMakerAnalysisJob from rooutil import plottery_wrapper as p from plottery import plottery as ply from rooutil.syncfiles.pyfiles.errors import E ROOT.gROOT.SetBatch(True) samples_cn = TQSampleFolder.loadSampleFolder("output.root:samples") samples_up = TQSampleFolder.loadSampleFolder("output_up.root:samples") samples_dn = TQSampleFolder.loadSampleFolder("output_dn.root:samples") output_plot_dir = "plots" #_________________________________________________________ def compute_fake_factor_1d(th1): for ix in xrange(0, th1.GetNbinsX()+2): frnom = th1.GetBinContent(ix) frerr = th1.GetBinError(ix) fr = E(frnom, frerr) if fr.val != 0 and fr.val != 1: ff = fr / (E(1., 0.) - fr) else: ff = E(0., 0.) th1.SetBinContent(ix, ff.val) th1.SetBinError(ix, ff.err) #___________________________________________________________________________
def main(model="", mass=""): samples = TQSampleFolder.loadSampleFolder("outputs/output.root:samples") samples_jec_up = TQSampleFolder.loadSampleFolder( "outputs/output_jec_up.root:samples") samples_jec_dn = TQSampleFolder.loadSampleFolder( "outputs/output_jec_dn.root:samples") options = { # Signal name and TQSampleFolder path "sig": ("www", "/sig" if model == "" else "/bsm/{}/{}".format(model, mass)), # Background names and TQSampelFolder paths "bkgs": [ ("lostlep", "/typebkg/lostlep/[ttZ+WZ+Other]"), ("fake", "/fake"), ("vbsww", "/typebkg/?/VBSWW"), ("ttw", "/typebkg/?/ttW"), ("photon", "/typebkg/photon/[ttZ+WZ+Other]"), ("qflip", "/typebkg/qflip/[ttZ+WZ+Other]"), ("prompt", "/typebkg/prompt/[ttZ+WZ+Other]" if model == "" else "/typebkg/prompt/[ttZ+WZ+Other]+sig"), ], # Data TQSampleFolder paths "data": "/data", # Counter names for getting yields "bins": [ "SRSSeeFull", "SRSSemFull", "SRSSmmFull", "SideSSeeFull", "SideSSemFull", "SideSSmmFull", "SR0SFOSFull", "SR1SFOSFull", "SR2SFOSFull", #"SRNj1SSeeFull", #"SRNj1SSemFull", #"SRNj1SSmmFull", ], # TQSampleFolder object "nominal_sample": samples, # Control regions # The control regions will normalize the counts # The systematics "control_regions": { ("SRSSeeFull", "/typebkg/lostlep/[ttZ+WZ+Other]"): ("WZCRSSeeFull", "/data-typebkg/qflip-typebkg/photon-typebkg/prompt-typebkg/fakes-typebkg/lostlep/VBSWW-typebkg/lostlep/ttW-sig" ), ("SRSSemFull", "/typebkg/lostlep/[ttZ+WZ+Other]"): ("WZCRSSemFull", "/data-typebkg/qflip-typebkg/photon-typebkg/prompt-typebkg/fakes-typebkg/lostlep/VBSWW-typebkg/lostlep/ttW-sig" ), ("SRSSmmFull", "/typebkg/lostlep/[ttZ+WZ+Other]"): ("WZCRSSmmFull", "/data-typebkg/qflip-typebkg/photon-typebkg/prompt-typebkg/fakes-typebkg/lostlep/VBSWW-typebkg/lostlep/ttW-sig" ), ("SideSSeeFull", "/typebkg/lostlep/[ttZ+WZ+Other]"): ("WZCRSSeeFull", "/data-typebkg/qflip-typebkg/photon-typebkg/prompt-typebkg/fakes-typebkg/lostlep/VBSWW-typebkg/lostlep/ttW-sig" ), ("SideSSemFull", "/typebkg/lostlep/[ttZ+WZ+Other]"): ("WZCRSSemFull", "/data-typebkg/qflip-typebkg/photon-typebkg/prompt-typebkg/fakes-typebkg/lostlep/VBSWW-typebkg/lostlep/ttW-sig" ), ("SideSSmmFull", "/typebkg/lostlep/[ttZ+WZ+Other]"): ("WZCRSSmmFull", "/data-typebkg/qflip-typebkg/photon-typebkg/prompt-typebkg/fakes-typebkg/lostlep/VBSWW-typebkg/lostlep/ttW-sig" ), ("SR1SFOSFull", "/typebkg/lostlep/[ttZ+WZ+Other]"): ("WZCR1SFOSFull", "/data-typebkg/qflip-typebkg/photon-typebkg/prompt-typebkg/fakes-typebkg/lostlep/VBSWW-typebkg/lostlep/ttW-sig" ), ("SR2SFOSFull", "/typebkg/lostlep/[ttZ+WZ+Other]"): ("WZCR2SFOSFull", "/data-typebkg/qflip-typebkg/photon-typebkg/prompt-typebkg/fakes-typebkg/lostlep/VBSWW-typebkg/lostlep/ttW-sig" ), #("SRNj1SSeeFull", "/typebkg/lostlep/[ttZ+WZ+Other]") : ("WZCRNj1SSeeFull" , "/data-typebkg/qflip-typebkg/photon-typebkg/prompt-typebkg/fakes-typebkg/lostlep/VBSWW-typebkg/lostlep/ttW-sig"), #("SRNj1SSemFull", "/typebkg/lostlep/[ttZ+WZ+Other]") : ("WZCRNj1SSemFull" , "/data-typebkg/qflip-typebkg/photon-typebkg/prompt-typebkg/fakes-typebkg/lostlep/VBSWW-typebkg/lostlep/ttW-sig"), #("SRNj1SSmmFull", "/typebkg/lostlep/[ttZ+WZ+Other]") : ("WZCRNj1SSmmFull" , "/data-typebkg/qflip-typebkg/photon-typebkg/prompt-typebkg/fakes-typebkg/lostlep/VBSWW-typebkg/lostlep/ttW-sig"), }, # Weight variation systematics that are saved in the "nominal_sample" TQSampleFolder # The nomenclature of the coutner names must be <BIN_COUNTER><SYSTS>Up and <BIN_COUNTER><SYSTS>Down # The keyword are the systematics and then the items list the processes to apply the systematics "systematics": [ ("LepSF", { "procs_to_apply": [ "www", "vbsww", "ttw", "photon", "qflip", "prompt", "lostlep" ] }), ("TrigSF", { "procs_to_apply": [ "www", "vbsww", "ttw", "photon", "qflip", "prompt", "lostlep" ] }), ("BTagLF", { "procs_to_apply": [ "www", "vbsww", "ttw", "photon", "qflip", "prompt", "lostlep" ] }), ("BTagHF", { "procs_to_apply": [ "www", "vbsww", "ttw", "photon", "qflip", "prompt", "lostlep" ] }), ("Pileup", { "procs_to_apply": [ "www", "vbsww", "ttw", "photon", "qflip", "prompt", "lostlep" ] }), ("FakeRateEl", { "procs_to_apply": ["fake"] }), ("FakeRateMu", { "procs_to_apply": ["fake"] }), ("FakeClosureEl", { "procs_to_apply": ["fake"] }), ("FakeClosureMu", { "procs_to_apply": ["fake"] }), ("PDF", { "procs_to_apply": ["www"] }), ("AlphaS", { "procs_to_apply": ["www"] }), ("Qsq", { "procs_to_apply": ["www"] }), ("JEC", { "procs_to_apply": [ "www", "vbsww", "ttw", "photon", "qflip", "prompt", "lostlep" ], "syst_samples": { "Up": samples_jec_up, "Down": samples_jec_dn } }), ], "statistical": ["www", "vbsww", "ttw", "photon", "qflip", "prompt", "fake"], "flat_systematics": [ ("VBSWWXSec", ["vbsww"], "1.2", ""), ("ttWXSec", ["ttw"], "1.2", ""), ("VBSWWVRSyst", ["vbsww"], "1.22", ""), ("ttWVRSyst", ["ttw"], "1.18", ""), ("QFlipVRSyst", ["qflip"], "1.5", ""), ("PhotonVRSyst", ["photon"], "1.5", ""), ("LostLepMll3LModeling", ["lostlep"], "1.082", "SFOS"), ("LostLepMllSSModeling", ["lostlep"], "1.053", "SS"), ("LostLepMjjModeling", ["lostlep"], "1.049", "SS"), ("LumSyst", ["vbsww", "ttw", "photon", "qflip", "prompt", "www"], "1.025", ""), ], } return qutils.make_counting_experiment_statistics_data_card(options)
def main(index, mode, donotrun): # Determine JEC mode jecvar = "" if mode == 1: jecvar = "_up" if mode == 2: jecvar = "_dn" # # # Create the master TQSampleFolder # # samples = TQSampleFolder("samples") # # # Connect input baby ntuple # # connectNtuples(samples, samplescfgpath, nfspath, ">4", ">5") # # # Define cuts # # PreselCuts = [ ["1", "{$(usefakeweight)?1.:evt_scale1fb*35.9}"], ["1", "{$(usefakeweight)?1.:purewgt}"], ["Flag_AllEventFilters", "1"], ["nj30>=1", "1"], ["firstgoodvertex==0", "1"], ["evt_passgoodrunlist", "1"], ["mc_HLT_SingleIsoMu17+mc_HLT_SingleIsoEl17", "1"], ] PreselCutExpr, PreselWgtExpr = combexpr(PreselCuts) # Complicated string construction for looes and tight ID muon mu_loosetemp = "(TMath::Abs(lep_eta[{idx}]<2.4))*(abs(lep_dz[{idx}])<0.1)*(abs(lep_dxy[{idx}])<0.05)*(abs(lep_ip3d[{idx}])<0.015)*(abs(lep_ip3derr[{idx}]/lep_ip3d[{idx}])<4.)*(abs(lep_pterr[{idx}]/lep_trk_pt[{idx}])<0.2)*(lep_isMediumPOG[{idx}])*(lep_relIso03EAv2Lep[{idx}]<0.4)*(lep_pt[{idx}]>20.)" mu_tighttemp = "({loose})*(lep_relIso03EAv2Lep[{idx}]<0.03)".format( loose=mu_loosetemp, idx="{idx}") leadmu_loose = mu_loosetemp.format(idx="0") leadmu_tight = mu_tighttemp.format(idx="0") trailmu_loose = mu_loosetemp.format(idx="1") trailmu_tight = mu_tighttemp.format(idx="1") bothmu_loose = "({})&&({})".format(leadmu_loose, trailmu_loose) bothmu_tight = "({})&&({})".format(leadmu_tight, trailmu_tight) # Complicated string construction for looes and tight ID muon mu3l_loosetemp = "(TMath::Abs(lep_eta[{idx}]<2.4))*(abs(lep_dz[{idx}])<0.1)*(abs(lep_dxy[{idx}])<0.05)*(abs(lep_ip3d[{idx}])<0.015)*(abs(lep_ip3derr[{idx}]/lep_ip3d[{idx}])<4.)*(abs(lep_pterr[{idx}]/lep_trk_pt[{idx}])<0.2)*(lep_isMediumPOG[{idx}])*(lep_relIso03EAv2Lep[{idx}]<0.4)*(lep_pt[{idx}]>20.)" mu3l_tighttemp = "({loose})*(lep_relIso03EAv2Lep[{idx}]<0.07)".format( loose=mu3l_loosetemp, idx="{idx}") leadmu3l_loose = mu3l_loosetemp.format(idx="0") leadmu3l_tight = mu3l_tighttemp.format(idx="0") trailmu3l_loose = mu3l_loosetemp.format(idx="1") trailmu3l_tight = mu3l_tighttemp.format(idx="1") bothmu3l_loose = "({})&&({})".format(leadmu3l_loose, trailmu3l_loose) bothmu3l_tight = "({})&&({})".format(leadmu3l_tight, trailmu3l_tight) # Complicated string construction for looes and tight ID electron el_loosetemp = "(TMath::Abs(lep_eta[{idx}]<2.4))*(abs(lep_dz[{idx}])<0.1)*(abs(lep_dxy[{idx}])<0.05)*(abs(lep_ip3d[{idx}])<0.01)*(lep_tightCharge[{idx}]==2)*((abs(lep_etaSC[{idx}])<=1.479)*(lep_MVA[{idx}]>0.941)+(abs(lep_etaSC[{idx}])>1.479)*(lep_MVA[{idx}]>0.925))*(lep_isTriggerSafe_v1[{idx}])*(lep_relIso03EAv2Lep[{idx}]<0.4)*(lep_pt[{idx}]>20.)" el_tighttemp = "({loose})*(lep_relIso03EAv2Lep[{idx}]<0.03)".format( loose=el_loosetemp, idx="{idx}") leadel_loose = el_loosetemp.format(idx="0") leadel_tight = el_tighttemp.format(idx="0") trailel_loose = el_loosetemp.format(idx="1") trailel_tight = el_tighttemp.format(idx="1") bothel_loose = "({})&&({})".format(leadel_loose, trailel_loose) bothel_tight = "({})&&({})".format(leadel_tight, trailel_tight) # Complicated string construction for looes and tight ID electron for three lepton region el3l_loosetemp = "(TMath::Abs(lep_eta[{idx}]<2.4))*(abs(lep_dz[{idx}])<0.1)*(abs(lep_dxy[{idx}])<0.05)*(abs(lep_ip3d[{idx}])<0.015)*((abs(lep_etaSC[{idx}])<=1.479)*(lep_MVA[{idx}]>0.92)+(abs(lep_etaSC[{idx}])>1.479)*(lep_MVA[{idx}]>0.88))*(lep_isTriggerSafe_v1[{idx}])*(lep_relIso03EAv2Lep[{idx}]<0.4)*(lep_pt[{idx}]>20.)" el3l_tighttemp = "({loose})*(lep_relIso03EAv2Lep[{idx}]<0.05)".format( loose=el3l_loosetemp, idx="{idx}") leadel3l_loose = el3l_loosetemp.format(idx="0") leadel3l_tight = el3l_tighttemp.format(idx="0") trailel3l_loose = el3l_loosetemp.format(idx="1") trailel3l_tight = el3l_tighttemp.format(idx="1") bothel3l_loose = "({})&&({})".format(leadel3l_loose, trailel3l_loose) bothel3l_tight = "({})&&({})".format(leadel3l_tight, trailel3l_tight) # Expressions to divide heavy flavor and !(heavy flavor) leadhf = "((lep_motherIdSS[0]==-1)+(lep_motherIdSS[0]==-2))" leadlf = "((lep_motherIdSS[0]!=-1)*(lep_motherIdSS[0]!=-2))" trailhf = "((lep_motherIdSS[1]==-1)+(lep_motherIdSS[1]==-2))" traillf = "((lep_motherIdSS[1]!=-1)*(lep_motherIdSS[1]!=-2))" # MT expression (as I forgot to add a one lepton MT variable in the WWW baby.) MTexpr = "(TMath::Sqrt(2*met" + jecvar + "_pt*lep_pt[0]*(1.0-TMath::Cos(lep_phi[0]-met" + jecvar + "_phi))))" # One lepton kinematic selection onelep_cuts = "(jets" + jecvar + "_p4[0].pt()>40.)" twolep_cuts = "(lep_pdgId[0]*lep_pdgId[1]>0)*(nj30" + jecvar + ">=2)" # if removing bveto twolepos_cuts = "(lep_pdgId[0]*lep_pdgId[1]<0)" # Electroweak control region selection # TwoMuHLT17/Mll_Z fSumw[1]=155.889, x=90, error=1.35664 # TwoElHLT17/Mll_Z fSumw[1]=650.599, x=90, error=18.1318 # The reason they are not integer is because the prescales are run/lumi dependent and this number is an "effective" prescale value calculated by comparing MC to data in a dilepton z-peak from this trigger hlt_mu17_prescale = 155.889 hlt_el17_prescale = 650.599 onelepewkcr_cuts = "(jets" + jecvar + "_p4[0].pt()>40.)*(met_pt>30.)" onelepewkcr2_cuts = "(jets" + jecvar + "_p4[0].pt()>40.)*(lep_pt[0]>30.)*(met_pt<20.)" onelepewkcr3_cuts = "(jets" + jecvar + "_p4[0].pt()>40.)*(lep_pt[0]>50.)" onelepmr_cuts = "(met_pt<20.)*(" + MTexpr + "<20.)*(jets" + jecvar + "_p4[0].pt()>40.)" oneleptrig_cuts = "(abs(lep_pdgId[0])==11)*(mc_HLT_SingleIsoEl17)+(abs(lep_pdgId[0])==13)*(mc_HLT_SingleIsoMu17)" oneleptrig_wgts = "{$(usefakeweight)?([abs(lep_pdgId[0])==11])*([mc_HLT_SingleIsoEl17])*(" + str( hlt_el17_prescale ) + ")+([abs(lep_pdgId[0])==13])*([mc_HLT_SingleIsoMu17])*(" + str( hlt_mu17_prescale) + "):1.}" onelepnvtx_wgts = "{$(usefakeweight)?1.:([abs(lep_pdgId[0])==11])*([TH1Map:nvtxreweight.root:OneElTightEWKCR3NoNvtxRewgt_nvtx([nVert])])+([abs(lep_pdgId[0])==13])*([TH1Map:nvtxreweight.root:OneMuTightEWKCR3NoNvtxRewgt_nvtx([nVert])])}" # These weights are for closure tests. The closure tests are performed for same-sign channel only. weight_elcomb = "([abs(lep_pdgId[0])==11])*([TH2Map:qcd_fakerates.root:qcdel([abs(lep_eta[0])],[lep_pt[0]*(1.0+TMath::Max(0.0, lep_relIso03EAv2Lep[0]-0.03))])])+([abs(lep_pdgId[1])==11])*([TH2Map:qcd_fakerates.root:qcdel([abs(lep_eta[1])],[lep_pt[1]*(1.0+TMath::Max(0.0, lep_relIso03EAv2Lep[1]-0.03))])])" weight_el = "([abs(lep_pdgId[0])==11])*([TH2Map:qcd_fakerates.root:qcdelbcToE([abs(lep_eta[0])],[lep_pt[0]*(1.0+TMath::Max(0.0, lep_relIso03EAv2Lep[0]-0.03))])])+([abs(lep_pdgId[1])==11])*([TH2Map:qcd_fakerates.root:qcdelbcToE([abs(lep_eta[1])],[lep_pt[1]*(1.0+TMath::Max(0.0, lep_relIso03EAv2Lep[1]-0.03))])])" weight_mu = "([abs(lep_pdgId[0])==13])*([TH2Map:qcd_fakerates.root:qcdmu([abs(lep_eta[0])],[lep_pt[0]*(1.0+TMath::Max(0.0, lep_relIso03EAv2Lep[0]-0.03))])])+([abs(lep_pdgId[1])==13])*([TH2Map:qcd_fakerates.root:qcdmu([abs(lep_eta[1])],[lep_pt[1]*(1.0+TMath::Max(0.0, lep_relIso03EAv2Lep[1]-0.03))])])" weight_elEM1D = "([abs(lep_pdgId[0])==11])*([TH1Map:qcd_fakerates.root:qcdelEM1D([lep_pt[0]*(1.0+TMath::Max(0.0, lep_relIso03EAv2Lep[0]-0.03))])])+([abs(lep_pdgId[1])==11])*([TH1Map:qcd_fakerates.root:qcdelEM1D([lep_pt[1]*(1.0+TMath::Max(0.0, lep_relIso03EAv2Lep[1]-0.03))])])" # TQCut objects tqcuts = {} tqcuts["Presel"] = TQCut("Presel", "Presel", PreselCutExpr, PreselWgtExpr) tqcuts["OneLep"] = TQCut( "OneLep", "OneLep", "(nVlep==1)*({})*({})".format(oneleptrig_cuts, onelep_cuts), "({})*({})".format(oneleptrig_wgts, onelepnvtx_wgts)) tqcuts["OneLepNoNvtxRewgt"] = TQCut( "OneLepNoNvtxRewgt", "OneLepNoNvtxRewgt", "(nVlep==1)*({})*({})".format(oneleptrig_cuts, onelep_cuts), "({})".format(oneleptrig_wgts)) tqcuts["TwoLep"] = TQCut("TwoLep", "TwoLep", "(nVlep==2)*({})".format(twolep_cuts), "1") tqcuts["TwoLepOS"] = TQCut("TwoLepOS", "TwoLepOS", "(nVlep==2)*({})".format(twolepos_cuts), "1") tqcuts["OneLepMR"] = TQCut("OneLepMR", "OneLepMR", "(nVlep==1)*({})".format(onelepmr_cuts), "1") tqcuts["OneLepEWKCR"] = TQCut("OneLepEWKCR", "OneLepEWKCR", "(nVlep==1)*({})".format(onelepewkcr_cuts), "1") tqcuts["OneLepEWKCR2"] = TQCut("OneLepEWKCR2", "OneLepEWKCR2", "(nVlep==1)*({})".format(onelepewkcr2_cuts), "1") tqcuts["OneLepEWKCR3"] = TQCut("OneLepEWKCR3", "OneLepEWKCR3", "(nVlep==1)*({})".format(onelepewkcr3_cuts), "1") tqcuts["OneLepEWKCR3NoNvtxRewgt"] = TQCut( "OneLepEWKCR3NoNvtxRewgt", "OneLepEWKCR3NoNvtxRewgt", "(nVlep==1)*({})".format(onelepewkcr3_cuts), "1") tqcuts["OneMu"] = TQCut("OneMu", "OneMu", "(abs(lep_pdgId[0])==13)", "1") tqcuts["OneMuLoose"] = TQCut("OneMuLoose", "OneMuLoose", leadmu_loose, "1") tqcuts["OneMuTight"] = TQCut("OneMuTight", "OneMuTight", leadmu_tight, "1") tqcuts["OneMu3lLoose"] = TQCut("OneMu3lLoose", "OneMu3lLoose", leadmu3l_loose, "1") tqcuts["OneMu3lTight"] = TQCut("OneMu3lTight", "OneMu3lTight", leadmu3l_tight, "1") tqcuts["OneEl"] = TQCut("OneEl", "OneEl", "(abs(lep_pdgId[0])==11)", "1") tqcuts["OneElLoose"] = TQCut("OneElLoose", "OneElLoose", leadel_loose, "1") tqcuts["OneElTight"] = TQCut("OneElTight", "OneElTight", leadel_tight, "1") tqcuts["OneEl3lLoose"] = TQCut("OneEl3lLoose", "OneEl3lLoose", leadel3l_loose, "1") tqcuts["OneEl3lTight"] = TQCut("OneEl3lTight", "OneEl3lTight", leadel3l_tight, "1") tqcuts["OneMuEWKCR"] = TQCut("OneMuEWKCR", "OneMuEWKCR", "(abs(lep_pdgId[0])==13)", "1") tqcuts["OneElEWKCR"] = TQCut("OneElEWKCR", "OneElEWKCR", "(abs(lep_pdgId[0])==11)", "1") tqcuts["OneMuTightEWKCR"] = TQCut("OneMuTightEWKCR", "OneMuTightEWKCR", leadmu_tight, "1") tqcuts["OneElTightEWKCR"] = TQCut("OneElTightEWKCR", "OneElTightEWKCR", leadel_tight, "1") tqcuts["OneMu3lTightEWKCR"] = TQCut("OneMu3lTightEWKCR", "OneMu3lTightEWKCR", leadmu3l_tight, "1") tqcuts["OneEl3lTightEWKCR"] = TQCut("OneEl3lTightEWKCR", "OneEl3lTightEWKCR", leadel3l_tight, "1") tqcuts["OneMuEWKCR2"] = TQCut("OneMuEWKCR2", "OneMuEWKCR2", "(abs(lep_pdgId[0])==13)", "1") tqcuts["OneElEWKCR2"] = TQCut("OneElEWKCR2", "OneElEWKCR2", "(abs(lep_pdgId[0])==11)", "1") tqcuts["OneMuTightEWKCR2"] = TQCut("OneMuTightEWKCR2", "OneMuTightEWKCR2", leadmu_tight, "1") tqcuts["OneElTightEWKCR2"] = TQCut("OneElTightEWKCR2", "OneElTightEWKCR2", leadel_tight, "1") tqcuts["OneMu3lTightEWKCR2"] = TQCut("OneMu3lTightEWKCR2", "OneMu3lTightEWKCR2", leadmu3l_tight, "1") tqcuts["OneEl3lTightEWKCR2"] = TQCut("OneEl3lTightEWKCR2", "OneEl3lTightEWKCR2", leadel3l_tight, "1") tqcuts["OneMuEWKCR3"] = TQCut("OneMuEWKCR3", "OneMuEWKCR3", "(abs(lep_pdgId[0])==13)", "1") tqcuts["OneElEWKCR3"] = TQCut("OneElEWKCR3", "OneElEWKCR3", "(abs(lep_pdgId[0])==11)", "1") tqcuts["OneMuTightEWKCR3"] = TQCut("OneMuTightEWKCR3", "OneMuTightEWKCR3", leadmu_tight, "1") tqcuts["OneElTightEWKCR3"] = TQCut("OneElTightEWKCR3", "OneElTightEWKCR3", leadel_tight, "1") tqcuts["OneMu3lTightEWKCR3"] = TQCut("OneMu3lTightEWKCR3", "OneMu3lTightEWKCR3", leadmu3l_tight, "1") tqcuts["OneEl3lTightEWKCR3"] = TQCut("OneEl3lTightEWKCR3", "OneEl3lTightEWKCR3", leadel3l_tight, "1") tqcuts["OneMuEWKCR3NoNvtxRewgt"] = TQCut("OneMuEWKCR3NoNvtxRewgt", "OneMuEWKCR3NoNvtxRewgt", "(abs(lep_pdgId[0])==13)", "1") tqcuts["OneElEWKCR3NoNvtxRewgt"] = TQCut("OneElEWKCR3NoNvtxRewgt", "OneElEWKCR3NoNvtxRewgt", "(abs(lep_pdgId[0])==11)", "1") tqcuts["OneMuTightEWKCR3NoNvtxRewgt"] = TQCut( "OneMuTightEWKCR3NoNvtxRewgt", "OneMuTightEWKCR3NoNvtxRewgt", leadmu_tight, "1") tqcuts["OneElTightEWKCR3NoNvtxRewgt"] = TQCut( "OneElTightEWKCR3NoNvtxRewgt", "OneElTightEWKCR3NoNvtxRewgt", leadel_tight, "1") tqcuts["OneMu3lTightEWKCR3NoNvtxRewgt"] = TQCut( "OneMu3lTightEWKCR3NoNvtxRewgt", "OneMu3lTightEWKCR3NoNvtxRewgt", leadmu3l_tight, "1") tqcuts["OneEl3lTightEWKCR3NoNvtxRewgt"] = TQCut( "OneEl3lTightEWKCR3NoNvtxRewgt", "OneEl3lTightEWKCR3NoNvtxRewgt", leadel3l_tight, "1") tqcuts["TwoMu"] = TQCut( "TwoMu", "TwoMu", "([ClosureEvtType]==0)*[(abs(lep_pdgId[0]*lep_pdgId[1])==143)]*[(abs(lep_pdgId[0])==11)*(lep_pass_VVV_cutbased_tight[0])+(abs(lep_pdgId[1])==11)*(lep_pass_VVV_cutbased_tight[1])]", "1") # one any muon and one real tight electron with two total leptons tqcuts["TwoMuLoose"] = TQCut( "TwoMuLoose", "TwoMuLoose", "(abs(lep_pdgId[0])==13)*({})+(abs(lep_pdgId[1])==13)*({})".format( leadmu_loose, trailmu_loose), "1") tqcuts["TwoMuTight"] = TQCut( "TwoMuTight", "TwoMuTight", "(abs(lep_pdgId[0])==13)*({})+(abs(lep_pdgId[1])==13)*({})".format( leadmu_tight, trailmu_tight), "1") tqcuts["TwoMuLoosePredict"] = TQCut( "TwoMuLoosePredict", "TwoMuLoosePredict", "(abs(lep_pdgId[0])==13)*({})+(abs(lep_pdgId[1])==13)*({})".format( leadmu_tight, trailmu_tight), "1") tqcuts["TwoMuTightPredict"] = TQCut( "TwoMuTightPredict", "TwoMuTightPredict", "(abs(lep_pdgId[0])==13)*({})*(!({}))+(abs(lep_pdgId[1])==13)*({})*(!({}))" .format(leadmu_loose, leadmu_tight, trailmu_loose, trailmu_tight), weight_mu) tqcuts["TwoMuLoosePredictBVeto"] = TQCut( "TwoMuLoosePredictBVeto", "TwoMuLoosePredictBVeto", "(nb" + jecvar + "==0)*((abs(lep_pdgId[0])==13)*({})+(abs(lep_pdgId[1])==13)*({}))". format(leadmu_tight, trailmu_tight), "1") tqcuts["TwoMuTightPredictBVeto"] = TQCut( "TwoMuTightPredictBVeto", "TwoMuTightPredictBVeto", "(nb" + jecvar + "==0)*((abs(lep_pdgId[0])==13)*({})*(!({}))+(abs(lep_pdgId[1])==13)*({})*(!({})))" .format(leadmu_loose, leadmu_tight, trailmu_loose, trailmu_tight), weight_mu) tqcuts["TwoEl"] = TQCut( "TwoEl", "TwoEl", "([ClosureEvtType]==1)*[(abs(lep_pdgId[0]*lep_pdgId[1])==143)]*[(abs(lep_pdgId[0])==13)*(lep_pass_VVV_cutbased_tight[0])+(abs(lep_pdgId[1])==13)*(lep_pass_VVV_cutbased_tight[1])]", "1") # one any electron and one real tight muon with two total leptons tqcuts["TwoElLoose"] = TQCut( "TwoElLoose", "TwoElLoose", "(abs(lep_pdgId[0])==11)*({})+(abs(lep_pdgId[1])==11)*({})".format( leadel_loose, trailel_loose), "1") tqcuts["TwoElTight"] = TQCut( "TwoElTight", "TwoElTight", "(abs(lep_pdgId[0])==11)*({})+(abs(lep_pdgId[1])==11)*({})".format( leadel_tight, trailel_tight), "1") tqcuts["TwoElLoosePredict"] = TQCut( "TwoElLoosePredict", "TwoElLoosePredict", "(abs(lep_pdgId[0])==11)*({})+(abs(lep_pdgId[1])==11)*({})".format( leadel_tight, trailel_tight), "1") tqcuts["TwoElTightPredict"] = TQCut( "TwoElTightPredict", "TwoElTightPredict", "(abs(lep_pdgId[0])==11)*({})*(!({}))+(abs(lep_pdgId[1])==11)*({})*(!({}))" .format(leadel_loose, leadel_tight, trailel_loose, trailel_tight), weight_el) tqcuts["TwoElLoosePredictHF"] = TQCut( "TwoElLoosePredictHF", "TwoElLoosePredictHF", "(abs(lep_pdgId[0])==11)*({})*({})+(abs(lep_pdgId[1])==11)*({})*({})". format(leadel_tight, leadhf, trailel_tight, trailhf), "1") tqcuts["TwoElTightPredictHF"] = TQCut( "TwoElTightPredictHF", "TwoElTightPredictHF", "(abs(lep_pdgId[0])==11)*({})*(!({}))*({})+(abs(lep_pdgId[1])==11)*({})*(!({}))*({})" .format(leadel_loose, leadel_tight, leadhf, trailel_loose, trailel_tight, trailhf), weight_el) tqcuts["TwoElLoosePredictEM1DLF"] = TQCut( "TwoElLoosePredictEM1DLF", "TwoElLoosePredictEM1DLF", "(abs(lep_pdgId[0])==11)*({})*({})+(abs(lep_pdgId[1])==11)*({})*({})". format(leadel_tight, leadlf, trailel_tight, traillf), "1") tqcuts["TwoElTightPredictEM1DLF"] = TQCut( "TwoElTightPredictEM1DLF", "TwoElTightPredictEM1DLF", "(abs(lep_pdgId[0])==11)*({})*(!({}))*({})+(abs(lep_pdgId[1])==11)*({})*(!({}))*({})" .format(leadel_loose, leadel_tight, leadlf, trailel_loose, trailel_tight, traillf), weight_elEM1D) tqcuts["TwoElLoosePredictComb"] = TQCut( "TwoElLoosePredictComb", "TwoElLoosePredictComb", "(abs(lep_pdgId[0])==11)*({})+(abs(lep_pdgId[1])==11)*({})".format( leadel_tight, trailel_tight), "1") tqcuts["TwoElTightPredictComb"] = TQCut( "TwoElTightPredictComb", "TwoElTightPredictComb", "(abs(lep_pdgId[0])==11)*({})*(!({}))+(abs(lep_pdgId[1])==11)*({})*(!({}))" .format(leadel_loose, leadel_tight, trailel_loose, trailel_tight), weight_elcomb) tqcuts["TwoElLoosePredictBVeto"] = TQCut( "TwoElLoosePredictBVeto", "TwoElLoosePredictBVeto", "(nb" + jecvar + "==0)*((abs(lep_pdgId[0])==11)*({})+(abs(lep_pdgId[1])==11)*({}))". format(leadel_tight, trailel_tight), "1") tqcuts["TwoElTightPredictBVeto"] = TQCut( "TwoElTightPredictBVeto", "TwoElTightPredictBVeto", "(nb" + jecvar + "==0)*((abs(lep_pdgId[0])==11)*({})*(!({}))+(abs(lep_pdgId[1])==11)*({})*(!({})))" .format(leadel_loose, leadel_tight, trailel_loose, trailel_tight), weight_el) tqcuts["TwoElLoosePredictBVetoHF"] = TQCut( "TwoElLoosePredictBVetoHF", "TwoElLoosePredictBVetoHF", "(nb" + jecvar + "==0)*((abs(lep_pdgId[0])==11)*({})*({})+(abs(lep_pdgId[1])==11)*({})*({}))" .format(leadel_tight, leadhf, trailel_tight, trailhf), "1") tqcuts["TwoElTightPredictBVetoHF"] = TQCut( "TwoElTightPredictBVetoHF", "TwoElTightPredictBVetoHF", "(nb" + jecvar + "==0)*((abs(lep_pdgId[0])==11)*({})*(!({}))*({})+(abs(lep_pdgId[1])==11)*({})*(!({}))*({}))" .format(leadel_loose, leadel_tight, leadhf, trailel_loose, trailel_tight, trailhf), weight_el) tqcuts["TwoElLoosePredictBVetoEM1DLF"] = TQCut( "TwoElLoosePredictBVetoEM1DLF", "TwoElLoosePredictBVetoEM1DLF", "(nb" + jecvar + "==0)*((abs(lep_pdgId[0])==11)*({})*({})+(abs(lep_pdgId[1])==11)*({})*({}))" .format(leadel_tight, leadlf, trailel_tight, traillf), "1") tqcuts["TwoElTightPredictBVetoEM1DLF"] = TQCut( "TwoElTightPredictBVetoEM1DLF", "TwoElTightPredictBVetoEM1DLF", "(nb" + jecvar + "==0)*((abs(lep_pdgId[0])==11)*({})*(!({}))*({})+(abs(lep_pdgId[1])==11)*({})*(!({}))*({}))" .format(leadel_loose, leadel_tight, leadlf, trailel_loose, trailel_tight, traillf), weight_elEM1D) tqcuts["TwoElLoosePredictBVetoComb"] = TQCut( "TwoElLoosePredictBVetoComb", "TwoElLoosePredictBVetoComb", "(nb" + jecvar + "==0)*((abs(lep_pdgId[0])==11)*({})+(abs(lep_pdgId[1])==11)*({}))". format(leadel_tight, trailel_tight), "1") tqcuts["TwoElTightPredictBVetoComb"] = TQCut( "TwoElTightPredictBVetoComb", "TwoElTightPredictBVetoComb", "(nb" + jecvar + "==0)*((abs(lep_pdgId[0])==11)*({})*(!({}))+(abs(lep_pdgId[1])==11)*({})*(!({})))" .format(leadel_loose, leadel_tight, trailel_loose, trailel_tight), weight_elcomb) tqcuts["TwoMuHLT8"] = TQCut( "TwoMuHLT8", "TwoMuHLT8", "(mc_HLT_SingleIsoMu8)*(MllSS>60.)*(MllSS<120.)", "1") tqcuts["TwoMuHLT17"] = TQCut( "TwoMuHLT17", "TwoMuHLT17", "(mc_HLT_SingleIsoMu17)*(MllSS>60.)*(MllSS<120.)", "1") tqcuts["TwoElHLT8"] = TQCut( "TwoElHLT8", "TwoElHLT8", "(mc_HLT_SingleIsoEl8)*(MllSS>60.)*(MllSS<120.)", "1") tqcuts["TwoElHLT17"] = TQCut( "TwoElHLT17", "TwoElHLT17", "(mc_HLT_SingleIsoEl17)*(MllSS>60.)*(MllSS<120.)", "1") # Linking TQCut objects tqcuts["Presel"].addCut(tqcuts["OneLep"]) tqcuts["Presel"].addCut(tqcuts["OneLepNoNvtxRewgt"]) tqcuts["Presel"].addCut(tqcuts["TwoLep"]) tqcuts["Presel"].addCut(tqcuts["TwoLepOS"]) tqcuts["OneLep"].addCut(tqcuts["OneLepMR"]) tqcuts["OneLep"].addCut(tqcuts["OneLepEWKCR"]) tqcuts["OneLep"].addCut(tqcuts["OneLepEWKCR2"]) tqcuts["OneLep"].addCut(tqcuts["OneLepEWKCR3"]) tqcuts["OneLepNoNvtxRewgt"].addCut(tqcuts["OneLepEWKCR3NoNvtxRewgt"]) tqcuts["OneLepMR"].addCut(tqcuts["OneMu"]) tqcuts["OneMu"].addCut(tqcuts["OneMuLoose"]) tqcuts["OneMuLoose"].addCut(tqcuts["OneMuTight"]) tqcuts["OneMu"].addCut(tqcuts["OneMu3lLoose"]) tqcuts["OneMu3lLoose"].addCut(tqcuts["OneMu3lTight"]) tqcuts["OneLepMR"].addCut(tqcuts["OneEl"]) tqcuts["OneEl"].addCut(tqcuts["OneElLoose"]) tqcuts["OneElLoose"].addCut(tqcuts["OneElTight"]) tqcuts["OneEl"].addCut(tqcuts["OneEl3lLoose"]) tqcuts["OneEl3lLoose"].addCut(tqcuts["OneEl3lTight"]) tqcuts["OneLepEWKCR"].addCut(tqcuts["OneMuEWKCR"]) tqcuts["OneLepEWKCR"].addCut(tqcuts["OneElEWKCR"]) tqcuts["OneMuEWKCR"].addCut(tqcuts["OneMuTightEWKCR"]) tqcuts["OneElEWKCR"].addCut(tqcuts["OneElTightEWKCR"]) tqcuts["OneMuEWKCR"].addCut(tqcuts["OneMu3lTightEWKCR"]) tqcuts["OneElEWKCR"].addCut(tqcuts["OneEl3lTightEWKCR"]) tqcuts["OneLepEWKCR2"].addCut(tqcuts["OneMuEWKCR2"]) tqcuts["OneLepEWKCR2"].addCut(tqcuts["OneElEWKCR2"]) tqcuts["OneMuEWKCR2"].addCut(tqcuts["OneMuTightEWKCR2"]) tqcuts["OneElEWKCR2"].addCut(tqcuts["OneElTightEWKCR2"]) tqcuts["OneMuEWKCR2"].addCut(tqcuts["OneMu3lTightEWKCR2"]) tqcuts["OneElEWKCR2"].addCut(tqcuts["OneEl3lTightEWKCR2"]) tqcuts["OneLepEWKCR3"].addCut(tqcuts["OneMuEWKCR3"]) tqcuts["OneLepEWKCR3"].addCut(tqcuts["OneElEWKCR3"]) tqcuts["OneMuEWKCR3"].addCut(tqcuts["OneMuTightEWKCR3"]) tqcuts["OneElEWKCR3"].addCut(tqcuts["OneElTightEWKCR3"]) tqcuts["OneMuEWKCR3"].addCut(tqcuts["OneMu3lTightEWKCR3"]) tqcuts["OneElEWKCR3"].addCut(tqcuts["OneEl3lTightEWKCR3"]) tqcuts["OneLepEWKCR3NoNvtxRewgt"].addCut(tqcuts["OneMuEWKCR3NoNvtxRewgt"]) tqcuts["OneLepEWKCR3NoNvtxRewgt"].addCut(tqcuts["OneElEWKCR3NoNvtxRewgt"]) tqcuts["OneMuEWKCR3NoNvtxRewgt"].addCut( tqcuts["OneMuTightEWKCR3NoNvtxRewgt"]) tqcuts["OneElEWKCR3NoNvtxRewgt"].addCut( tqcuts["OneElTightEWKCR3NoNvtxRewgt"]) tqcuts["OneMuEWKCR3NoNvtxRewgt"].addCut( tqcuts["OneMu3lTightEWKCR3NoNvtxRewgt"]) tqcuts["OneElEWKCR3NoNvtxRewgt"].addCut( tqcuts["OneEl3lTightEWKCR3NoNvtxRewgt"]) tqcuts["TwoLep"].addCut(tqcuts["TwoMu"]) tqcuts["TwoMu"].addCut(tqcuts["TwoMuLoosePredict"]) tqcuts["TwoMu"].addCut(tqcuts["TwoMuTightPredict"]) tqcuts["TwoMu"].addCut(tqcuts["TwoMuLoosePredictBVeto"]) tqcuts["TwoMu"].addCut(tqcuts["TwoMuTightPredictBVeto"]) tqcuts["TwoMu"].addCut(tqcuts["TwoMuLoose"]) tqcuts["TwoMuLoose"].addCut(tqcuts["TwoMuTight"]) tqcuts["TwoLep"].addCut(tqcuts["TwoEl"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElLoosePredict"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElTightPredict"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElLoosePredictComb"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElTightPredictComb"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElLoosePredictHF"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElTightPredictHF"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElLoosePredictEM1DLF"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElTightPredictEM1DLF"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElLoosePredictBVeto"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElTightPredictBVeto"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElLoosePredictBVetoComb"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElTightPredictBVetoComb"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElLoosePredictBVetoHF"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElTightPredictBVetoHF"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElLoosePredictBVetoEM1DLF"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElTightPredictBVetoEM1DLF"]) tqcuts["TwoEl"].addCut(tqcuts["TwoElLoose"]) tqcuts["TwoElLoose"].addCut(tqcuts["TwoElTight"]) tqcuts["TwoLepOS"].addCut(tqcuts["TwoMuHLT8"]) tqcuts["TwoLepOS"].addCut(tqcuts["TwoMuHLT17"]) tqcuts["TwoLepOS"].addCut(tqcuts["TwoElHLT8"]) tqcuts["TwoLepOS"].addCut(tqcuts["TwoElHLT17"]) # Grand cut cuts = tqcuts["Presel"] # # # Define histograms # # # N.B. Any 2D histogram must have "_vs_" in the name. This is an important conventino for the makeplot.py script to be able to distinguish the 1D vs. 2D histogram. filename = ".histo.mr.{}.cfg".format(index) f = open(filename, "w") f.write(""" TH2F('lep_pt_vs_eta' , '' , {{0, 0.9, 1.6, 1.9, 2.4}}, {{20, 30, 40, 50, 60, 70, 150, 2000}} ) << (abs(lep_eta[0]) : '|\#eta|', lep_pt[0] : '\#it{{p}}_{{T}} [GeV]'); @OneLep/*: lep_pt_vs_eta; TH2F('lep_ptcorr_vs_eta' , '' , {{0, 0.9, 1.6, 1.9, 2.4}}, {{0., 5., 10., 15., 20., 25., 30., 35., 40., 45., 60., 80., 120.}} ) << (abs(lep_eta[0]) : '|\#eta|', TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.) : '\#it{{p}}_{{T}} [GeV]'); @OneLep/*: lep_ptcorr_vs_eta; TH2F('lep_ptcorrcoarse_vs_eta' , '' , {{0, 0.9, 1.6, 1.9, 2.4}}, {{0., 10., 20., 25., 30., 40., 120.}} ) << (abs(lep_eta[0]) : '|\#eta|', TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.) : '\#it{{p}}_{{T}} [GeV]'); @OneLep/*: lep_ptcorrcoarse_vs_eta; TH2F('lep_ptcorrcoarse_vs_etacoarse' , '' , {{0, 1.6, 2.4}}, {{0., 10., 20., 25., 30., 40., 120.}} ) << (abs(lep_eta[0]) : '|\#eta|', TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.) : '\#it{{p}}_{{T}} [GeV]'); @OneLep/*: lep_ptcorrcoarse_vs_etacoarse; TH2F('el3l_ptcorrcoarse_vs_etacoarse' , '' , {{0, 1.6, 2.4}}, {{0., 10., 20., 25., 30., 40., 120.}} ) << (abs(lep_eta[0]) : '|\#eta|', TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.05)),119.) : '\#it{{p}}_{{T}} [GeV]'); @OneLep/*: el3l_ptcorrcoarse_vs_etacoarse; TH2F('mu3l_ptcorrcoarse_vs_etacoarse' , '' , {{0, 1.6, 2.4}}, {{0., 10., 20., 25., 30., 40., 120.}} ) << (abs(lep_eta[0]) : '|\#eta|', TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.07)),119.) : '\#it{{p}}_{{T}} [GeV]'); @OneLep/*: mu3l_ptcorrcoarse_vs_etacoarse; TH1F('lep_pt' , '' , 180 , 0. , 250 ) << (lep_pt[0] : '\#it{{p}}_{{T}} [GeV]'); @OneLep/*: lep_pt; TH1F('lep_pt' , '' , 180 , 0. , 250 ) << (lep_pt[0] : '\#it{{p}}_{{T}} [GeV]'); @OneLep/*: lep_pt; TH1F('lep_pdgId' , '' , 40 , -20. , 20 ) << (lep_pdgId[0] : 'Lepton PDG ID'); @OneLep/*: lep_pdgId; TH1F('lep_ptcorr' , '' , 180 , 0. , 250 ) << (lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)) : '\#it{{p}}_{{T, cone-corr}} [GeV]'); @OneLep/*: lep_ptcorr; TH1F('lep_ptcorrvarbin' , '' , {{0., 5., 10., 15., 20., 25., 30., 35., 40., 45., 60., 80., 120.}}) << (TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.) : '\#it{{p}}_{{T, cone-corr}} [GeV]'); @OneLep/*: lep_ptcorrvarbin; TH1F('lep_ptcorrvarbincoarse' , '' , {{0., 10., 20., 25., 30., 40., 120.}}) << (TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.) : '\#it{{p}}_{{T, cone-corr}} [GeV]'); @OneLep/*: lep_ptcorrvarbincoarse; TH1F('el3l_ptcorrvarbincoarse' , '' , {{0., 10., 20., 25., 30., 40., 120.}}) << (TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.05)),119.) : '\#it{{p}}_{{T, cone-corr}} [GeV]'); @OneLep/*: el3l_ptcorrvarbincoarse; TH1F('mu3l_ptcorrvarbincoarse' , '' , {{0., 10., 20., 25., 30., 40., 120.}}) << (TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.07)),119.) : '\#it{{p}}_{{T, cone-corr}} [GeV]'); @OneLep/*: mu3l_ptcorrvarbincoarse; TH1F('lep_yield' , '' , 1, 0, 1) << (0 : 'yield'); @OneLep/*: lep_yield; TH1F('lep_eta' , '' , 180 , -2.5 , 2.5 ) << (lep_eta[0] : '\#eta'); @OneLep/*: lep_eta; TH1F('lep_etavarbin' , '' , {{-2.5, -2.1, -1.6, -1.0, 0.0, 1.0, 1.6, 2.1, 2.5}} ) << (lep_eta[0] : '\#eta'); @OneLep/*: lep_etavarbin; TH1F('lep_relIso03EAv2Lep' , '' , 180 , 0.0 , 0.6 ) << (lep_relIso03EAv2Lep[0] : 'I_{{R=0.3,EA,Lep}}'); @OneLep/*: lep_relIso03EAv2Lep; TH1F('mu_ptcorrvarbin' , '' , {{0., 5., 10., 15., 20., 25., 30., 35., 40., 45., 60., 80., 120.}}) << ((TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.))*(abs(lep_pdgId[0])==13)+(TMath::Min(lep_pt[1]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[1]-0.03)),119.))*(abs(lep_pdgId[1])==13) : '\#it{{p}}_{{T, cone-corr, mu}} [GeV]'); @TwoMu/*: mu_ptcorrvarbin; TH1F('mu_ptcorrvarbincoarse' , '' , {{0., 10., 20., 25., 30., 40., 120.}}) << ((TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.))*(abs(lep_pdgId[0])==13)+(TMath::Min(lep_pt[1]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[1]-0.03)),119.))*(abs(lep_pdgId[1])==13) : '\#it{{p}}_{{T, cone-corr, mu}} [GeV]'); @TwoMu/*: mu_ptcorrvarbincoarse; TH1F('mu_yield' , '' , 1, 0, 1) << (0 : 'yield'); @TwoMu/*: mu_yield; TH2F('mu_ptcorr_vs_eta' , '' , {{0, 0.9, 1.6, 1.9, 2.4}}, {{0., 5., 10., 15., 20., 25., 30., 35., 40., 45., 60., 80., 120.}} ) << ((abs(lep_eta[0]))*(abs(lep_pdgId[0])==13)+(abs(lep_eta[1]))*(abs(lep_pdgId[1])==13) : '|\#eta|', (TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.))*(abs(lep_pdgId[0])==13)+(TMath::Min(lep_pt[1]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[1]-0.03)),119.))*(abs(lep_pdgId[1])==13) : '\#it{{p}}_{{T, cone-corr, mu}} [GeV]'); @TwoMu/*: mu_ptcorr_vs_eta; TH2F('mu_ptcorrcoarse_vs_eta' , '' , {{0, 0.9, 1.6, 1.9, 2.4}}, {{0., 10., 20., 25., 30., 40., 60., 120.}} ) << ((abs(lep_eta[0]))*(abs(lep_pdgId[0])==13)+(abs(lep_eta[1]))*(abs(lep_pdgId[1])==13) : '|\#eta|', (TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.))*(abs(lep_pdgId[0])==13)+(TMath::Min(lep_pt[1]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[1]-0.03)),119.))*(abs(lep_pdgId[1])==13) : '\#it{{p}}_{{T, cone-corr, mu}} [GeV]'); @TwoMu/*: mu_ptcorrcoarse_vs_eta; TH2F('mu_ptcorrcoarse_vs_etacoarse' , '' , {{0, 1.6, 2.4}}, {{0., 10., 20., 25., 30., 40., 120.}} ) << ((abs(lep_eta[0]))*(abs(lep_pdgId[0])==13)+(abs(lep_eta[1]))*(abs(lep_pdgId[1])==13) : '|\#eta|', (TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.))*(abs(lep_pdgId[0])==13)+(TMath::Min(lep_pt[1]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[1]-0.03)),119.))*(abs(lep_pdgId[1])==13) : '\#it{{p}}_{{T, cone-corr, mu}} [GeV]'); @TwoMu/*: mu_ptcorrcoarse_vs_etacoarse; TH1F('mu_pt' , '' , 180 , 0., 250) << ((lep_pt[0])*(abs(lep_pdgId[0])==13)+(lep_pt[1])*(abs(lep_pdgId[1])==13) : '\#it{{p}}_{{T, \#mu}} [GeV]'); @TwoMu/*: mu_pt; TH1F('mu_eta' , '' , 180 , -2.5, 2.5) << ((lep_eta[0])*(abs(lep_pdgId[0])==13)+(lep_eta[1])*(abs(lep_pdgId[1])==13) : '\#eta_{{\#mu}}'); @TwoMu/*: mu_eta; TH1F('mu_etavarbin' , '' , {{-2.5, -2.1, -1.6, -1.0, 0.0, 1.0, 1.6, 2.1, 2.5}} ) << (lep_eta[0] : '\#eta'); @TwoMu/*: mu_etavarbin; TH1F('mu_relIso03EAv2Lep' , '' , 180 , 0., 0.6) << ((lep_relIso03EAv2Lep[0])*(abs(lep_pdgId[0])==13)+(lep_relIso03EAv2Lep[1])*(abs(lep_pdgId[1])==13) : 'I_{{R=0.3,EA,Lep,\#mu}}'); @TwoMu/*: mu_relIso03EAv2Lep; TH2F('el_ptcorr_vs_eta' , '' , {{0, 0.9, 1.6, 1.9, 2.4}}, {{0., 5., 10., 15., 20., 25., 30., 35., 40., 45., 60., 80., 120.}} ) << ((abs(lep_eta[0]))*(abs(lep_pdgId[0])==11)+(abs(lep_eta[1]))*(abs(lep_pdgId[1])==11) : '|\#eta|', (TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.))*(abs(lep_pdgId[0])==11)+(TMath::Min(lep_pt[1]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[1]-0.03)),119.))*(abs(lep_pdgId[1])==11) : '\#it{{p}}_{{T, cone-corr, mu}} [GeV]'); @TwoEl/*: el_ptcorr_vs_eta; TH2F('el_ptcorrcoarse_vs_eta' , '' , {{0, 0.9, 1.6, 1.9, 2.4}}, {{0., 10., 20., 25., 30., 40., 120.}} ) << ((abs(lep_eta[0]))*(abs(lep_pdgId[0])==11)+(abs(lep_eta[1]))*(abs(lep_pdgId[1])==11) : '|\#eta|', (TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.))*(abs(lep_pdgId[0])==11)+(TMath::Min(lep_pt[1]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[1]-0.03)),119.))*(abs(lep_pdgId[1])==11) : '\#it{{p}}_{{T, cone-corr, mu}} [GeV]'); @TwoEl/*: el_ptcorrcoarse_vs_eta; TH2F('el_ptcorrcoarse_vs_etacoarse' , '' , {{0, 1.6, 2.4}}, {{0., 10., 20., 25., 30., 40., 120.}} ) << ((abs(lep_eta[0]))*(abs(lep_pdgId[0])==11)+(abs(lep_eta[1]))*(abs(lep_pdgId[1])==11) : '|\#eta|', (TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.))*(abs(lep_pdgId[0])==11)+(TMath::Min(lep_pt[1]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[1]-0.03)),119.))*(abs(lep_pdgId[1])==11) : '\#it{{p}}_{{T, cone-corr, mu}} [GeV]'); @TwoEl/*: el_ptcorrcoarse_vs_etacoarse; TH1F('el_ptcorrvarbin' , '' , {{0., 5., 10., 15., 20., 25., 30., 35., 40., 45., 60., 80., 120.}}) << ((TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.))*(abs(lep_pdgId[0])==11)+(TMath::Min(lep_pt[1]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[1]-0.03)),119.))*(abs(lep_pdgId[1])==11) : '\#it{{p}}_{{T, cone-corr, el}} [GeV]'); @TwoEl/*: el_ptcorrvarbin; TH1F('el_ptcorrvarbincoarse' , '' , {{0., 10., 20., 25., 30., 40., 120.}}) << ((TMath::Min(lep_pt[0]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[0]-0.03)),119.))*(abs(lep_pdgId[0])==11)+(TMath::Min(lep_pt[1]*(1.+TMath::Max(0.,lep_relIso03EAv2Lep[1]-0.03)),119.))*(abs(lep_pdgId[1])==11) : '\#it{{p}}_{{T, cone-corr, el}} [GeV]'); @TwoEl/*: el_ptcorrvarbincoarse; TH1F('el_yield' , '' , 1, 0, 1) << (0 : 'yield'); @TwoEl/*: el_yield; TH1F('el_pt' , '' , 180 , 0., 250) << ((lep_pt[0])*(abs(lep_pdgId[0])==11)+(lep_pt[1])*(abs(lep_pdgId[1])==11) : '\#it{{p}}_{{T, el}} [GeV]'); @TwoEl/*: el_pt; TH1F('el_eta' , '' , 180 , -2.5, 2.5) << ((lep_eta[0])*(abs(lep_pdgId[0])==11)+(lep_eta[1])*(abs(lep_pdgId[1])==11) : '\#eta_{{el}}'); @TwoEl/*: el_eta; TH1F('el_etavarbin' , '' , {{-2.5, -2.1, -1.6, -1.0, 0.0, 1.0, 1.6, 2.1, 2.5}} ) << (lep_eta[0] : '\#eta'); @TwoEl/*: el_etavarbin; TH1F('el_relIso03EAv2Lep' , '' , 180 , 0., 0.6) << ((lep_relIso03EAv2Lep[0])*(abs(lep_pdgId[0])==11)+(lep_relIso03EAv2Lep[1])*(abs(lep_pdgId[1])==11) : 'I_{{R=0.3,EA,Lep,el}}'); @TwoEl/*: el_relIso03EAv2Lep; TH1F('Mjj_el' , '' , 180 , 0., 180. ) << ({Mjj} : '\#it{{m}}_{{jj}} [GeV]'); @TwoEl/*: Mjj_el; TH1F('Mjj_mu' , '' , 180 , 0., 180. ) << ({Mjj} : '\#it{{m}}_{{jj}} [GeV]'); @TwoMu/*: Mjj_mu; TH1F('Mll_el' , '' , 180 , 0., 180. ) << (MllSS : '\#it{{m}}_{{ll}} [GeV]'); @TwoEl/*: Mll_el; TH1F('Mll_mu' , '' , 180 , 0., 180. ) << (MllSS : '\#it{{m}}_{{ll}} [GeV]'); @TwoMu/*: Mll_mu; TH1F('DPhill_el' , '' , 180 , 0., 3.1416 ) << (TMath::Abs(TVector2::Phi_mpi_pi(lep_phi[0]-lep_phi[1])) : '\#it{{m}}_{{ll}} [GeV]'); @TwoEl/*: DPhill_el; TH1F('DPhill_mu' , '' , 180 , 0., 3.1416 ) << (TMath::Abs(TVector2::Phi_mpi_pi(lep_phi[0]-lep_phi[1])) : '\#it{{m}}_{{ll}} [GeV]'); @TwoMu/*: DPhill_mu; TH1F('MET_el' , '' , 180 , 0., 180. ) << ({MET} : 'MET [GeV]'); @TwoEl/*: MET_el; TH1F('MET_mu' , '' , 180 , 0., 180. ) << ({MET} : 'MET [GeV]'); @TwoMu/*: MET_mu; TH1F('MTmax_el' , '' , 180 , 0., 180. ) << ({MTmax} : '\#it{{m}}_{{T,max}} [GeV]'); @TwoEl/*: MTmax_el; TH1F('MTmax_mu' , '' , 180 , 0., 180. ) << ({MTmax} : '\#it{{m}}_{{T,max}} [GeV]'); @TwoMu/*: MTmax_mu; TH1F('nb_el' , '' , 5, 0., 5.) << ({nb} : 'N_{{b-jets}}'); @TwoEl/*: nb_el; TH1F('nb_mu' , '' , 5, 0., 5.) << ({nb} : 'N_{{b-jets}}'); @TwoMu/*: nb_mu; TH1F('nj30_el' , '' , 5, 0., 5.) << ({nj30} : 'N_{{jets,30,cent}}'); @TwoEl/*: nj30_el; TH1F('nj30_mu' , '' , 5, 0., 5.) << ({nj30} : 'N_{{jets,30,cent}}'); @TwoMu/*: nj30_mu; TH1F('nj_el' , '' , 5, 0., 5.) << ({nj} : 'N_{{jets,all}}'); @TwoEl/*: nj_el; TH1F('nj_mu' , '' , 5, 0., 5.) << ({nj} : 'N_{{jets,all}}'); @TwoMu/*: nj_mu; TH1F('Mll_Z' , '' , 180 , 60., 120. ) << (MllSS : '\#it{{m}}_{{ll}} [GeV]'); @TwoLepOS/*: Mll_Z; TH1F('MTOneLep' , '' , 180 , 0., 180. ) << ({MT} : '\#it{{m}}_{{T}} [GeV]'); @*/*: MTOneLep; TH1F('MTOneLepFixed' , '' , 20 , 0., 200. ) << ({MT} : '\#it{{m}}_{{T}} [GeV]'); @*/*: MTOneLepFixed; TH1F('nvtx' , '' , 60 , 0., 60. ) << (nVert : 'N_{{vtx}}'); @*/*: nvtx; """.format(Mjj="Mjj" + jecvar, MET="met" + jecvar + "_pt", nb="nb" + jecvar, nj30="nj30" + jecvar, nj="nj" + jecvar, MT=MTexpr, MTmax="MTmax" + jecvar)) f.close() # # # Book Analysis Jobs (Histogramming, Cutflow, Event lists) # # histojob = TQHistoMakerAnalysisJob() histojob.importJobsFromTextFiles(filename, cuts, "*", True if index < 0 else False) # Analysis jobs cutflowjob = TQCutflowAnalysisJob("cutflow") cuts.addAnalysisJob(cutflowjob, "*") # Eventlist jobs (use this if we want to print out some event information in a text format e.g. run, lumi, evt or other variables.) #eventlistjob = TQEventlistAnalysisJob("eventlist") #eventlistjob.importJobsFromTextFiles("eventlist.cfg", cuts, "*", True) # Print cuts and numebr of booked analysis jobs for debugging purpose if index < 0: samples.printContents("t[*status]dr") cuts.printCut("trd") return # # # Add custom tqobservable that can do more than just string based draw statements # # from QFramework import TQWWWMTOneLep, TQWWWClosureEvtType customobservables = {} customobservables["MTOneLep"] = TQWWWMTOneLep("MTOneLep") customobservables["ClosureEvtType"] = TQWWWClosureEvtType("ClosureEvtType") TQObservable.addObservable(customobservables["MTOneLep"], "MTOneLep") TQObservable.addObservable(customobservables["ClosureEvtType"], "ClosureEvtType") # # # Loop over the samples # # # setup a visitor to actually loop over ROOT files vis = TQAnalysisSampleVisitor(cuts, True) #vis.setMaxEvents(30000) # to debug by restricting the looping to 30k max events if index >= 0: # Get all sample lists sample_names, sample_full_names = getSampleLists(samples) # Select the job based on the index sample_name = sample_names[index] sample_full_name = sample_full_names[sample_name] # Run the job! samples.visitSampleFolders(vis, "/*/{}".format(sample_full_name)) # Write the output histograms and cutflow cut values and etc. samples.writeToFile(".output_{}.root".format(sample_name), True) else: # Run the job! samples.visitSampleFolders(vis) # Write the output histograms and cutflow cut values and etc. samples.writeToFile("output.root", True)
print "" print "NOTE : Running with default mode of MODE=0!" print "NOTE : Running with default mode of MODE=0!" print "NOTE : Running with default mode of MODE=0!" print "NOTE : Running with default mode of MODE=0!" mode = 0 donotrun = len(sys.argv) >= 3 # Delete previous remnants os.system("rm -f .output_*.root") os.system("rm -f .histo.mr.*.cfg") import multiprocessing samples = TQSampleFolder("samples") connectNtuples(samples, samplescfgpath, nfspath, ">4", ">5") # Get all sample lists sample_names, sample_full_names = getSampleLists(samples) njobs = len(sample_names) if donotrun: main(-1, mode, donotrun) sys.exit() jobs = [] for i in range(njobs): p = multiprocessing.Process(target=main, args=( i, mode, donotrun,
#!/bin/env python import os import sys import ROOT from QFramework import TQSampleFolder, TQEventlistPrinter, TQTaggable from rooutil import plottery_wrapper as p ROOT.gROOT.SetBatch(True) path = "eventlists/" filename = sys.argv[1] samples = TQSampleFolder.loadSampleFolder("{}:samples".format(filename)) printer = TQEventlistPrinter(samples) printer.addCut("SRSSeeFull") printer.addCut("SRSSemFull") printer.addCut("SRSSmmFull") printer.addCut("SideSSeeFull") printer.addCut("SideSSemFull") printer.addCut("SideSSmmFull") printer.addCut("SR0SFOSFull") printer.addCut("SR1SFOSFull") printer.addCut("SR2SFOSFull") printer.addCut("WZCRSSeeFull") printer.addCut("WZCRSSemFull") printer.addCut("WZCRSSmmFull") printer.addCut("WZCR1SFOSFull") printer.addCut("WZCR2SFOSFull") #printer.addProcess("/sig/whwww") printer.addProcess("/typebkg/lostlep/WZ")
def main(transformation, p, batch_index): ott_parser = OTTParser() transformation = ott_parser.parse_file(transformation) transformation.set_root(transformation) batch_index = str(batch_index) or "all" transformation.compile({"-batch-index": batch_index}) if p: print transformation for input in transformation.get("input"): sf = False print "Open: %s" % input.attributes['path'] if "sample-folder" in input.attributes: input_file = TQSampleFolder.loadSampleFolder( "%s:%s" % (input.attributes['path'], input.attributes['sample-folder'])) sf = True else: input_file = ROOT.TFile.Open(input.attributes['path']) for output in transformation.get("output"): output_file = ROOT.TFile.Open(output.attributes['path'], "RECREATE") for TH1F in output.get("TH1F"): output_file.cd() min = float(TH1F.get("min")[0].content) max = float(TH1F.get("max")[0].content) destination = TH1F.get("destination")[0].content bins = TH1F.get("bin") token = destination.rsplit("/", 1) if len(token) == 1: token = "", token[0] if not output_file.Get(token[0]): output_file.mkdir(token[0]) output_file.cd(token[0]) destination_name = token[1] print " TH1F(\"%s\", \"\", %d, %g, %g) -> %s %s" % \ (destination_name, len(bins), min, max, output.attributes['path'], destination) histogram = ROOT.TH1F(destination_name, "", len(bins), min, max) for i, bin in enumerate(bins): histogram.SetBinContent(i + 1, float(bin.content)) histogram.Write() for duplicate in output.get("duplicate"): output_file.cd() source = duplicate.get("source")[0].content destination = duplicate.get("destination")[0].content token = destination.rsplit("/", 1) if len(token) == 1: token = "", token[0] if not output_file.Get(token[0]): output_file.mkdir(token[0]) output_file.cd(token[0]) destination_name = token[1] if sf: cut = duplicate.get("cut")[0].content histogram_name = duplicate.get("histogram")[0].content histogram = input_file.getHistogram( ROOT.TString(source), ROOT.TString("%s/%s" % (cut, histogram_name))) if not histogram: continue print " %s %s/%s -> %s %s" % \ (source, cut, histogram_name, output.attributes['path'], destination) clone = histogram.Clone(destination_name) clone.Write() # todo output_file.Close()
def main(model="sm", mass0=-1, mass1=-1): # Print the model name and mass points print model, mass0, mass1 # Suffis that will be attached to output file names for bookkeeping suffix = make_suffix(model, mass0, mass1) # Create directory where the outputs will be makedir("statinputs") # Open input files filename = "output_sf_applied.root" samples = TQSampleFolder.loadSampleFolder("{}:samples".format(filename)) samples_jec_up = TQSampleFolder.loadSampleFolder("output_jec_up.root:samples") samples_jec_dn = TQSampleFolder.loadSampleFolder("output_jec_dn.root:samples") samples_gen_met = TQSampleFolder.loadSampleFolder("output_gen_met.root:samples") # Set the histogram name to perform the fit on (we use the 9 bin histogram histname = "{SRSSeeFull,SRSSemFull,SRSSmmFull,SideSSeeFull,SideSSemFull,SideSSmmFull,SR0SFOSFull,SR1SFOSFull,SR2SFOSFull}" # We have 8 categories for the fit processes = [ "vbsww" , "ttw" , "lostlep" , "photon" , "qflip" , "prompt" , "fake" , "www" ,] ####### # NOTE "www" means "signal" - i.e. for whsusy model www = whsusy and www is included in prompt ####### # Set the diectionary of the paths where we will retrieve the histograms from sampledirpaths = { "vbsww" : "/typebkg/?/VBSWW", "ttw" : "/typebkg/?/ttW", "lostlep" : "/typebkg/lostlep/[ttZ+WZ+Other]", "photon" : "/typebkg/photon/[ttZ+WZ+Other]", "qflip" : "/typebkg/qflip/[ttZ+WZ+Other]", "prompt" : "/typebkg/prompt/[ttZ+WZ+Other]" if model == "sm" else "/typebkg/prompt/[ttZ+WZ+Other]+sig" if model != "whwww" else "/typebkg/prompt/[ttZ+WZ+Other]+sig/www", "fake" : "/fake", "www" : get_sigmodel_path(model, mass0, mass1), } # Create output file ofile = ROOT.TFile("statinputs/hist_{}.root".format(suffix), "recreate") ofile.cd() # Array of numbers where we will aggregate some results for nice tables rates = {} # Write histograms for process in processes: #print process, histname # Get nominal histogram h_nom = samples.getHistogram(sampledirpaths[process], histname).Clone(process) # If lost lepton get the nominal number directly from the AN Table 13 if process == "lostlep": h_nom = set_to_lostlep_nominal_hist(h_nom) # If whsusy model with signal then get the average of the two histogram #if model == "whsusy" and process == "www": set_to_average_and_write_genmet_syst_hist(h_nom, samples_gen_met.getHistogram(sampledirpaths[process], histname).Clone(process)) # Write nominal histogram #h_nom.Write() mask_bins(h_nom).Write() # Save the total number that will be used to output to datacards rates[process] = h_nom.Integral() # Nominal histogram errors are needed to be varied one by one to create an effective uncorrelated histograms if process != "lostlep" and process != "fake": write_nominal_stat_variations(h_nom, process) # Write systematic histograms that are from weight variations for systvar in systvars: # Some process or some variations do not need to be written if do_not_write_syst_hist(process, systvar, model): continue # Write the systvariation histograms #samples.getHistogram(sampledirpaths[process], histname.replace("Full", "Full" + systvar)).Clone(process + "_" + systvar).Write() mask_bins(samples.getHistogram(sampledirpaths[process], histname.replace("Full", "Full" + systvar)).Clone(process + "_" + systvar)).Write() # JEC systematic histograms need to be called from a different sample output if process != "fake" and process != "lostlep": #samples_jec_up.getHistogram(sampledirpaths[process], histname).Clone(process + "_JECUp").Write() #samples_jec_dn.getHistogram(sampledirpaths[process], histname).Clone(process + "_JECDown").Write() mask_bins(samples_jec_up.getHistogram(sampledirpaths[process], histname).Clone(process + "_JECUp")).Write() mask_bins(samples_jec_dn.getHistogram(sampledirpaths[process], histname).Clone(process + "_JECDown")).Write() # Lost lepton has special treatment if process == "lostlep": #write_lostlep_stat_variations(h_nom) #write_lostlep_syst_variations(h_nom) write_lostlep_CRstat_variations(h_nom) write_lostlep_TFstat_variations(h_nom) write_lostlep_TFsyst_variations(h_nom) write_lostlep_Mjjsyst_variations(h_nom) write_lostlep_MllSSsyst_variations(h_nom) write_lostlep_Mll3lsyst_variations(h_nom) # WWW signal theory systematics if model == "sm": if process == "www": write_www_theory_syst_variations(h_nom) # Fake has AR statistics if process == "fake": write_fake_ARstat_variations(h_nom) # Write data histogram h_data = samples.getHistogram("/typebkg", histname).Clone("data_obs") for i in xrange(0,h_data.GetNbinsX()+2): h_data.SetBinContent(i, int(h_data.GetBinContent(i))) #h_data.Write() mask_bins(h_data).Write() datacard="""imax 1 number of bins jmax * number of processes kmax * number of nuisance parameters ---------------------------------------------------------------------------------------------------------------------------------- shapes * * statinputs/hist_{}.root $PROCESS $PROCESS_$SYSTEMATIC ---------------------------------------------------------------------------------------------------------------------------------- bin SR observation {:.1f} ---------------------------------------------------------------------------------------------------------------------------------- bin SR SR SR SR SR SR SR SR process 0 1 2 3 4 5 6 7 process www fake photon lostlep qflip prompt ttw vbsww rate {:<6.3f} {:<6.3f} {:<6.3f} {:<6.3f} {:<6.3f} {:<6.3f} {:<6.3f} {:<6.3f} ---------------------------------------------------------------------------------------------------------------------------------- JEC shape 1 - 1 - - 1 1 1 LepSF shape 1 - 1 - - 1 1 1 TrigSF shape 1 - 1 - - 1 1 1 BTagHF shape 1 - 1 - - 1 1 1 BTagLF shape 1 - 1 - - 1 1 1 Pileup shape 1 - 1 - - 1 1 1 FakeRateEl shape - 1 - - - - - - FakeRateMu shape - 1 - - - - - - FakeClosureEl shape - 1 - - - - - - FakeClosureMu shape - 1 - - - - - - LostLepSyst shape - - - 1 - - - - MjjModeling shape - - - 1 - - - - MllSSModeling shape - - - 1 - - - - Mll3lModeling shape - - - 1 - - - - SigXSec lnN 1.06 - - - - - - - LumSyst lnN 1.025 - 1.025 - 1.025 1.025 1.025 1.025 vbsww_xsec lnN - - - - - - - 1.20 vbsww_validation lnN - - - - - - - 1.22 ttw_xsec lnN - - - - - - 1.20 - ttw_validation lnN - - - - - - 1.18 - photon_syst lnN - - 1.50 - - - - - qflip_syst lnN - - - - 1.50 - - - www_stat_in_ee shape 1 - - - - - - - www_stat_in_em shape 1 - - - - - - - www_stat_in_mm shape 1 - - - - - - - www_stat_out_ee shape 1 - - - - - - - www_stat_out_em shape 1 - - - - - - - www_stat_out_mm shape 1 - - - - - - - www_stat_0sfos shape 1 - - - - - - - www_stat_1sfos shape 1 - - - - - - - www_stat_2sfos shape 1 - - - - - - - fake_ARstat_in_ee shape - 1 - - - - - - fake_ARstat_in_em shape - 1 - - - - - - fake_ARstat_in_mm shape - 1 - - - - - - fake_ARstat_out_ee shape - 1 - - - - - - fake_ARstat_out_em shape - 1 - - - - - - fake_ARstat_out_mm shape - 1 - - - - - - fake_ARstat_0sfos shape - 1 - - - - - - fake_ARstat_1sfos shape - 1 - - - - - - fake_ARstat_2sfos shape - 1 - - - - - - photon_stat_in_ee shape - - 1 - - - - - photon_stat_in_em shape - - 1 - - - - - photon_stat_in_mm shape - - 1 - - - - - photon_stat_out_ee shape - - 1 - - - - - photon_stat_out_em shape - - 1 - - - - - photon_stat_out_mm shape - - 1 - - - - - photon_stat_0sfos shape - - 1 - - - - - photon_stat_1sfos shape - - 1 - - - - - photon_stat_2sfos shape - - 1 - - - - - lostlep_stat_in_ee shape - - - 1 - - - - lostlep_stat_in_em shape - - - 1 - - - - lostlep_stat_in_mm shape - - - 1 - - - - lostlep_stat_out_ee shape - - - 1 - - - - lostlep_stat_out_em shape - - - 1 - - - - lostlep_stat_out_mm shape - - - 1 - - - - lostlep_stat_0sfos shape - - - 1 - - - - lostlep_stat_1sfos shape - - - 1 - - - - lostlep_stat_2sfos shape - - - 1 - - - - qflip_stat_in_ee shape - - - - 1 - - - qflip_stat_in_em shape - - - - 1 - - - qflip_stat_in_mm shape - - - - 1 - - - qflip_stat_out_ee shape - - - - 1 - - - qflip_stat_out_em shape - - - - 1 - - - qflip_stat_out_mm shape - - - - 1 - - - qflip_stat_0sfos shape - - - - 1 - - - qflip_stat_1sfos shape - - - - 1 - - - qflip_stat_2sfos shape - - - - 1 - - - prompt_stat_in_ee shape - - - - - 1 - - prompt_stat_in_em shape - - - - - 1 - - prompt_stat_in_mm shape - - - - - 1 - - prompt_stat_out_ee shape - - - - - 1 - - prompt_stat_out_em shape - - - - - 1 - - prompt_stat_out_mm shape - - - - - 1 - - prompt_stat_0sfos shape - - - - - 1 - - prompt_stat_1sfos shape - - - - - 1 - - prompt_stat_2sfos shape - - - - - 1 - - ttw_stat_in_ee shape - - - - - - 1 - ttw_stat_in_em shape - - - - - - 1 - ttw_stat_in_mm shape - - - - - - 1 - ttw_stat_out_ee shape - - - - - - 1 - ttw_stat_out_em shape - - - - - - 1 - ttw_stat_out_mm shape - - - - - - 1 - ttw_stat_0sfos shape - - - - - - 1 - ttw_stat_1sfos shape - - - - - - 1 - ttw_stat_2sfos shape - - - - - - 1 - vbsww_stat_in_ee shape - - - - - - - 1 vbsww_stat_in_em shape - - - - - - - 1 vbsww_stat_in_mm shape - - - - - - - 1 vbsww_stat_out_ee shape - - - - - - - 1 vbsww_stat_out_em shape - - - - - - - 1 vbsww_stat_out_mm shape - - - - - - - 1 vbsww_stat_0sfos shape - - - - - - - 1 vbsww_stat_1sfos shape - - - - - - - 1 vbsww_stat_2sfos shape - - - - - - - 1 lostlep_CRstat_ee shape - - - 1 - - - - lostlep_CRstat_em shape - - - 1 - - - - lostlep_CRstat_mm shape - - - 1 - - - - lostlep_CRstat_0sfos shape - - - 1 - - - - lostlep_CRstat_1sfos shape - - - 1 - - - - lostlep_CRstat_2sfos shape - - - 1 - - - - """.format(suffix, h_data.Integral(), rates["www"], rates["fake"], rates["photon"], rates["lostlep"], rates["qflip"], rates["prompt"], rates["ttw"], rates["vbsww"]) if model == "sm": datacard += """SigPDF shape 1 - - - - - - - SigQsq shape 1 - - - - - - - SigAlpha shape 1 - - - - - - - """ if model == "whsusy": datacard += """ISR shape 1 - - - - - - - Qsq shape 1 - - - - - - - PDF shape 1 - - - - - - - AlphaS shape 1 - - - - - - - GenMET shape 1 - - - - - - - """ f = open('statinputs/datacard_{}.txt'.format(suffix), 'w') f.write(datacard) f.close() ofile.Close()