def __init__(self): # we can use a large number of bins and rebin later dataset_axis = hist.Cat("dataset", "Primary dataset") pt_axis = hist.Bin("pt", r"$p_{T}$ (GeV)", 600, 0, 1000) eta_axis = hist.Bin("eta", r"$\eta$", 60, -5.5, 5.5) multiplicity_axis = hist.Bin("multiplicity", r"N", 20, -0.5, 19.5) self._accumulator = processor.dict_accumulator({ "MET_pt" : hist.Hist("Counts", dataset_axis, pt_axis), "Jet_pt" : hist.Hist("Counts", dataset_axis, pt_axis), "Jet_pt_fwd" : hist.Hist("Counts", dataset_axis, pt_axis), "Jet_eta" : hist.Hist("Counts", dataset_axis, eta_axis), "GenJet_pt_fwd" : hist.Hist("Counts", dataset_axis, pt_axis), "Spectator_pt" : hist.Hist("Counts", dataset_axis, pt_axis), "Spectator_eta" : hist.Hist("Counts", dataset_axis, eta_axis), "W_pt_notFromTop" : hist.Hist("Counts", dataset_axis, pt_axis), "Top_pt" : hist.Hist("Counts", dataset_axis, pt_axis), "Top_eta" : hist.Hist("Counts", dataset_axis, eta_axis), "Antitop_pt" : hist.Hist("Counts", dataset_axis, pt_axis), "Antitop_eta" : hist.Hist("Counts", dataset_axis, eta_axis), "W_pt" : hist.Hist("Counts", dataset_axis, pt_axis), "W_eta" : hist.Hist("Counts", dataset_axis, eta_axis), "N_b" : hist.Hist("Counts", dataset_axis, multiplicity_axis), "N_jet" : hist.Hist("Counts", dataset_axis, multiplicity_axis), 'cutflow_bkg': processor.defaultdict_accumulator(int), 'cutflow_signal': processor.defaultdict_accumulator(int), })
def __init__(self): ## make binning for hists self.dataset_axis = hist.Cat("dataset", "Event Process") self.pu_nTrueInt_axis = hist.Bin("pu_nTrueInt", "nTrueInt", 100, 0, 100) self.pu_nPU_axis = hist.Bin("pu_nPU", "nPU", 100, 0, 100) ## make dictionary of hists histo_dict = {} histo_dict['PU_nTrueInt'] = hist.Hist("PU_nTrueInt", self.dataset_axis, self.pu_nTrueInt_axis) histo_dict['PU_nPU'] = hist.Hist("PU_nPU", self.dataset_axis, self.pu_nPU_axis) #set_trace() ## construct dictionary of dictionaries to hold meta info for each sample for sample in fileset.keys(): if 'Int' in sample: histo_dict['%s_pos' % sample] = processor.defaultdict_accumulator(int) histo_dict['%s_pos_runs_to_lumis' % sample] = processor.value_accumulator(list) histo_dict['%s_neg' % sample] = processor.defaultdict_accumulator(int) histo_dict['%s_neg_runs_to_lumis' % sample] = processor.value_accumulator(list) else: histo_dict[sample] = processor.defaultdict_accumulator(int) histo_dict['%s_runs_to_lumis' % sample] = processor.value_accumulator(list) self._accumulator = processor.dict_accumulator(histo_dict) self.sample_name = ''
def __init__(self, year='2018', corrections={}): self._year = year self._corrections = corrections self._rochester = lookup_tools.rochester_lookup.rochester_lookup( corrections['rochester_data']) dataset_axis = hist.Cat("dataset", "Primary dataset") channel_axis = hist.Cat("channel", "Channel") zmass_axis = hist.Bin("mass", r"$m_{2\ell}$ [GeV]", 240, 0, 120) met_axis = hist.Bin("met", r"$E_{T}^{miss}$ [GeV]", 3000, 0, 3000) npvs_axis = hist.Bin("npvs", "Number of Vertices", 120, 0, 120) self._selections = ['massWindow'] hist.Hist.DEFAULT_DTYPE = 'f' # save some space by keeping float bin counts instead of double self._accumulator = processor.dict_accumulator() for sel in self._selections: self._accumulator[sel + '_zmass'] = hist.Hist( "Counts", dataset_axis, channel_axis, zmass_axis) self._accumulator[sel + '_met'] = hist.Hist( "Counts", dataset_axis, channel_axis, met_axis) self._accumulator[sel + '_pileup'] = hist.Hist( "Counts", dataset_axis, channel_axis, npvs_axis) self._accumulator['cutflow'] = processor.defaultdict_accumulator(int) self._accumulator['sumw'] = processor.defaultdict_accumulator(int)
def __init__(self): # dataset_axis = hist.Cat("dataset", "Primary dataset") # pt_axis = hist.Bin("pt", r"$p_{T}$ [GeV]", 40, 0, 3500) ntrack_axis = hist.Bin("ntracks", "Number of Tracks", 20, 0.0, 500.0) njet_axis = hist.Bin("njets", "Number of Jets", 20, 0.0, 20.0) ht_axis = hist.Bin("ht", "H_{T} (GeV)", 500, 0.0, 5000.0) st_axis = hist.Bin("st", "S_{T} (GeV)", 500, 0.0, 5000.0) met_axis = hist.Bin("MET", "E_{T}^{miss} (GeV)", 200, 0.0, 2000.0) self._accumulator = processor.dict_accumulator({ # 'jtpt':hist.Hist("Counts", dataset_axis, pt_axis), # 'jteta':hist.Hist("Counts",dataset_axis,eta_axis), 'h_ntracks': hist.Hist("h_ntracks", ntrack_axis), 'h_njets': hist.Hist("h_njets", njet_axis), 'h_ht': hist.Hist("h_ht", ht_axis), 'h_st': hist.Hist("h_st", st_axis), 'h_met': hist.Hist("h_met", met_axis), 'cutflow': processor.defaultdict_accumulator(int), 'trigger': processor.defaultdict_accumulator(int), })
def __init__(self): self._acc = dict_accumulator() self._acc['sumw'] = processor.defaultdict_accumulator(float) self._acc['sumw_scale'] = processor.defaultdict_accumulator( empty_array_100) self._acc['sumw_pdf'] = processor.defaultdict_accumulator( empty_array_100)
def __init__(self): ## load b-tag SFs #self.btag_sf = BTagScaleFactor(os.path.expandvars("$TWHOME/data/DeepCSV_102XSF_V1.btag.csv.gz", "reshape") # we can use a large number of bins and rebin later dataset_axis = hist.Cat("dataset", "Primary dataset") pt_axis = hist.Bin("pt", r"$p_{T}$ (GeV)", 1000, 0, 1000) self._accumulator = processor.dict_accumulator({ 'diboson': processor.defaultdict_accumulator(int), 'ttbar': processor.defaultdict_accumulator(int), 'TTW': processor.defaultdict_accumulator(int), 'TTZ': processor.defaultdict_accumulator(int), 'TTH': processor.defaultdict_accumulator(int), 'TTTT': processor.defaultdict_accumulator(int), 'tW_scattering': processor.defaultdict_accumulator(int), 'DY': processor.defaultdict_accumulator(int), 'totalEvents': processor.defaultdict_accumulator(int), 'passedEvents': processor.defaultdict_accumulator(int), })
def get_accumulator(): dataset_ax = Cat("dataset", "Primary dataset") region_ax = Cat("region", "Selection region") mjj_ax = Bin("mjj", r"$M_{jj}$ (GeV)", 150, 0, 7500) jet_pt_ax = Bin("jetpt", r"$p_{T}$ (GeV)", 100, 0, 1000) jet_eta_ax = Bin("jeteta", r"$\eta$", 50, -5, 5) jet_phi_ax = Bin("jetphi", r"$\phi$", 50, -np.pi, np.pi) ht_ax = Bin("ht", r"$H_{T}$ (GeV)", 100, 0, 4000) items = {} items['sumw'] = processor.defaultdict_accumulator(float) items['sumw2'] = processor.defaultdict_accumulator(float) items["ak4_pt0"] = Hist("Counts", dataset_ax, region_ax, jet_pt_ax) items["ak4_eta0"] = Hist("Counts", dataset_ax, region_ax, jet_eta_ax) items["ak4_phi0"] = Hist("Counts", dataset_ax, region_ax, jet_phi_ax) items["ak4_pt1"] = Hist("Counts", dataset_ax, region_ax, jet_pt_ax) items["ak4_eta1"] = Hist("Counts", dataset_ax, region_ax, jet_eta_ax) items["ak4_phi1"] = Hist("Counts", dataset_ax, region_ax, jet_phi_ax) items["mjj"] = Hist("Counts", dataset_ax, region_ax, mjj_ax) items["ht"] = Hist("Counts", dataset_ax, region_ax, ht_ax) items["htmiss"] = Hist("Counts", dataset_ax, region_ax, ht_ax) return processor.dict_accumulator(items)
def __init__(self): # Histograms dataset_axis = hist.Cat("dataset", "Primary dataset") selection_axis = hist.Cat("selection", "Selection name") self._accumulator = processor.dict_accumulator() self._accumulator["nevents"] = processor.defaultdict_accumulator(int) self._accumulator["run_counter"] = processor.defaultdict_accumulator(partial(processor.defaultdict_accumulator, int))
def test_accumulators(): a = processor.value_accumulator(float) a += 3.0 assert a.value == 3.0 assert a.identity().value == 0.0 a = processor.value_accumulator(partial(np.array, [2.0])) a += 3.0 assert np.array_equal(a.value, np.array([5.0])) assert np.array_equal(a.identity().value, np.array([2.0])) lacc = processor.list_accumulator(range(4)) lacc += [3] lacc += processor.list_accumulator([1, 2]) assert lacc == [0, 1, 2, 3, 3, 1, 2] b = processor.set_accumulator({"apples", "oranges"}) b += {"pears"} b += "grapes" assert b == {"apples", "oranges", "pears", "grapes"} c = processor.dict_accumulator({"num": a, "fruit": b}) c["num"] += 2.0 c += processor.dict_accumulator({ "num2": processor.value_accumulator(int), "fruit": processor.set_accumulator({"apples", "cherries"}), }) assert c["num2"].value == 0 assert np.array_equal(c["num"].value, np.array([7.0])) assert c["fruit"] == {"apples", "oranges", "pears", "grapes", "cherries"} d = processor.defaultdict_accumulator(float) d["x"] = 0.0 d["x"] += 4.0 d["y"] += 5.0 d["z"] += d["x"] d["x"] += d["y"] assert d["x"] == 9.0 assert d["y"] == 5.0 assert d["z"] == 4.0 assert d["w"] == 0.0 f = processor.defaultdict_accumulator(lambda: 2.0) f["x"] += 4.0 assert f["x"] == 6.0 f += f assert f["x"] == 12.0 assert f["y"] == 2.0 a = processor.column_accumulator(np.arange(6).reshape(2, 3)) b = processor.column_accumulator(np.arange(12).reshape(4, 3)) a += b assert a.value.sum() == 81
def test_accumulators(): a = processor.value_accumulator(float) a += 3. assert a.value == 3. assert a.identity().value == 0. a = processor.value_accumulator(partial(np.array, [2.])) a += 3. assert np.array_equal(a.value, np.array([5.])) assert np.array_equal(a.identity().value, np.array([2.])) l = processor.list_accumulator(range(4)) l += [3] l += processor.list_accumulator([1, 2]) assert l == [0, 1, 2, 3, 3, 1, 2] b = processor.set_accumulator({'apples', 'oranges'}) b += {'pears'} b += 'grapes' assert b == {'apples', 'oranges', 'pears', 'grapes'} c = processor.dict_accumulator({'num': a, 'fruit': b}) c['num'] += 2. c += processor.dict_accumulator({ 'num2': processor.value_accumulator(int), 'fruit': processor.set_accumulator({'apples', 'cherries'}), }) assert c['num2'].value == 0 assert np.array_equal(c['num'].value, np.array([7.])) assert c['fruit'] == {'apples', 'oranges', 'pears', 'grapes', 'cherries'} d = processor.defaultdict_accumulator(float) d['x'] = 0. d['x'] += 4. d['y'] += 5. d['z'] += d['x'] d['x'] += d['y'] assert d['x'] == 9. assert d['y'] == 5. assert d['z'] == 4. assert d['w'] == 0. e = d + c f = processor.defaultdict_accumulator(lambda: 2.) f['x'] += 4. assert f['x'] == 6. f += f assert f['x'] == 12. assert f['y'] == 2. a = processor.column_accumulator(np.arange(6).reshape(2,3)) b = processor.column_accumulator(np.arange(12).reshape(4,3)) a += b assert a.value.sum() == 81
def __init__(self, year='2017'): self._year = year self._triggers = { '2017': [ 'PFHT1050', 'AK8PFJet400_TrimMass30', 'AK8PFJet420_TrimMass30', 'AK8PFHT800_TrimMass50', 'PFJet500', 'AK8PFJet500', 'AK8PFJet550', 'CaloJet500_NoJetID', 'CaloJet550_NoJetID', ] } self._muontriggers = { '2017': [ 'Mu50', #'TkMu50', ] } self._accumulator = processor.dict_accumulator({ 'sumw': processor.defaultdict_accumulator(float), 'templates': hist.Hist( 'Events', hist.Cat('dataset', 'Dataset'), hist.Cat('region', 'Region'), #hist.Cat('systematic', 'Systematic'), hist.Bin('pt', r'Jet $p_{T}$ [GeV]', 25,500,1000),#[525,575,625,700,800,1500]),#np.arange(525,2000,50)), hist.Bin('msd', r'Jet $m_{sd}$', 23, 40, 300), #hist.Bin('gru', 'GRU value',20,0.,1.), #hist.Bin('gruddt', 'GRU$^{DDT}$ value',[-1,0,1]), #hist.Bin('rho', 'jet rho', 20,-5.5,-2.),#[-5.5,-5.,-4.5,-4.,-3.5,-3.,-2.5,-2.]), #hist.Bin('n2', 'N$_2$ value', 20, 0., 0.5), #hist.Bin('n2ddt', 'N$_2^{DDT}$ value', 21, -0.3, 0.3), #hist.Bin('Vmatch', 'Matched to V', [-1,0,1]), hist.Bin('in_v3_ddt', 'IN$^{DDT}$ value', 20, -1, 0.5), hist.Bin('mu_pt', 'Leading muon p_{T}', 20,50., 700.), hist.Bin('mu_pfRelIso04_all', 'Muon pfRelIso04 isolation', 20,0.,1.), #hist.Bin('nPFConstituents', 'Number of PF candidates',41,20,60), #hist.Bin('nJet', 'Number of fat jets', 10,0,9), ), #'gruddt' : hist.Hist( # hist.Cat('dataset', 'Dataset'), # hist.Cat('region', 'Region'), #'cutflow': hist.Hist( # 'Events', # hist.Cat('dataset', 'Dataset'), # hist.Cat('region', 'Region'), # hist.Bin('cut', 'Cut index', 11, 0, 11), #), 'cutflow_signal' : processor.defaultdict_accumulator(partial(processor.defaultdict_accumulator, float)), 'cutflow_ttbar_muoncontrol' : processor.defaultdict_accumulator(partial(processor.defaultdict_accumulator, float)), })
def test_accumulators(): a = processor.accumulator(0.) a += 3. a += processor.accumulator(2) assert a.value == 5. assert a.identity().value == 0. a = processor.accumulator(np.array([0.])) a += 3. a += processor.accumulator(2) assert a.value == np.array([5.]) assert a.identity().value == np.array([0.]) b = processor.set_accumulator({'apples', 'oranges'}) b += {'pears'} b += 'grapes' assert b == {'apples', 'oranges', 'pears', 'grapes'} c = processor.dict_accumulator({'num': a, 'fruit': b}) c['num'] += 2. c += processor.dict_accumulator({ 'num2': processor.accumulator(0), 'fruit': processor.set_accumulator({'apples', 'cherries'}), }) assert c['num2'].value == 0 assert c['num'].value == 7. assert c['fruit'] == {'apples', 'oranges', 'pears', 'grapes', 'cherries'} d = processor.defaultdict_accumulator(lambda: processor.accumulator(0.)) d['x'] = processor.accumulator(0.) d['x'] += 4. d['y'] += 5. d['z'] += d['x'] d['x'] += d['y'] assert d['x'].value == 9. assert d['y'].value == 5. assert d['z'].value == 4. assert d['w'].value == 0. e = d + c f = processor.defaultdict_accumulator(lambda: 2.) f['x'] += 4. assert f['x'] == 6. f += f assert f['x'] == 12. assert f['y'] == 2.
def __init__(self, year): self._year = year self._trigger = { 2016: { "e": [ "Ele27_WPTight_Gsf", "Ele45_WPLoose_Gsf", "Ele25_eta2p1_WPTight_Gsf", "Ele115_CaloIdVT_GsfTrkIdT", "Ele15_IsoVVL_PFHT350", "Ele15_IsoVVVL_PFHT400", "Ele45_CaloIdVT_GsfTrkIdT_PFJet200_PFJet50", "Ele50_CaloIdVT_GsfTrkIdT_PFJet165", ], "mu": [ "IsoMu24", "IsoTkMu24", "Mu50", "TkMu50", "Mu15_IsoVVVL_PFHT400", "Mu15_IsoVVVL_PFHT350", ], } } self._trigger = self._trigger[int(self._year)] self._accumulator = processor.dict_accumulator({ 'sumw': processor.defaultdict_accumulator(float), 'cutflow': hist.Hist( 'Events', hist.Cat('dataset', 'Dataset'), hist.Cat('channel', 'Channel'), hist.Bin('cut', 'Cut index', 9, 0, 9), ), })
def __init__(self): ## make binning for hists self.dataset_axis = hist.Cat("dataset", "Event Process") self.jetmult_axis = hist.Cat("jmult", "nJets") self.leptype_axis = hist.Cat("leptype", "Lepton Type") self.lepcat_axis = hist.Cat("lepcat", "Lepton Category") self.btagging_applied_axis = hist.Cat("btagging", "btagging applied") self.btag_axis = hist.Cat("btag", "btagging Category") self.pt_axis = hist.Bin("pt", "p_{T} [GeV]", 200, 0, 1000) self.njets_axis = hist.Bin("njets", "n_{jets}", 20, 0, 20) self.mt_axis = hist.Bin("mt", "M_{T}", 200, 0., 1000.) self.SF_axis = hist.Bin("sf", "SF", 500, 0., 5.) self.deepcsv_axis = hist.Bin("deepcsv", "SF", 100, 0., 1.) self.pu_axis = hist.Bin("pu", "nTrueInt", 100, 0., 100.) self.rho_axis = hist.Bin("rho", "Rho", 100, 0., 100.) self.vtx_axis = hist.Bin("vtx", "num vertices", 100, 0., 100.) ## make dictionary of hists histo_dict = {} ## make jet hists hists = self.make_hists() histo_dict.update(hists) histo_dict['cutflow'] = processor.defaultdict_accumulator(int) self._accumulator = processor.dict_accumulator(histo_dict) self.sample_name = '' self.corrections = corrections self.isData = True
def __init__(self, isMC): self._isMC = isMC # Histograms dataset_axis = hist.Cat("dataset", "Primary dataset") selection_axis = hist.Cat("selection", "Selection name") self._accumulator = processor.dict_accumulator() self._accumulator["total_events"] = processor.defaultdict_accumulator( int) # Define histograms here self._accumulator["mjjj"] = hist.Hist( "Events", dataset_axis, selection_axis, hist.Bin("mjjj", r"$M_{jjj}$ [GeV]", dijet_binning), ) for pair in [(0, 1), (1, 2), (2, 0)]: self._accumulator[f"m{pair[0]}{pair[1]}"] = hist.Hist( "Events", dataset_axis, selection_axis, hist.Bin(f"m{pair[0]}{pair[1]}", f"$m_{{{pair[0]}{pair[1]}}}$ [GeV]", dijet_binning)) self._accumulator[f"dR{pair[0]}{pair[1]}"] = hist.Hist( "Events", dataset_axis, selection_axis, hist.Bin(f"dR{pair[0]}{pair[1]}", f"$\\Delta R_{{{pair[0]}{pair[1]}}}$ [GeV]", 100, 0., 4)) self._accumulator[f"dEta{pair[0]}{pair[1]}"] = hist.Hist( "Events", dataset_axis, selection_axis, hist.Bin(f"dEta{pair[0]}{pair[1]}", f"$\\Delta \\eta_{{{pair[0]}{pair[1]}}}$ [GeV]", 100, 0., 2)) self._accumulator[f"m{pair[0]}{pair[1]}overM"] = hist.Hist( "Events", dataset_axis, selection_axis, hist.Bin( f"m{pair[0]}{pair[1]}overM", r"$m_{{{pair0}{pair1}}}/M_{{jjj}}$".format(T="T", pair0=pair[0], pair1=pair[1], jjj="jjj"), 100, 0, 1)) for jet in [0, 1, 2]: self._accumulator[f"pt{jet}"] = hist.Hist( "Events", dataset_axis, selection_axis, hist.Bin(f"pt{jet}", r"$p^{T}_{jet}$ [GeV]".format(T="T", jet=jet), dijet_binning)) self._accumulator[f"eta{jet}"] = hist.Hist( "Events", dataset_axis, selection_axis, hist.Bin(f"eta{jet}", f"$\\eta_{jet}$", 100, -3, 3)) self._accumulator[f"ptoverM{jet}"] = hist.Hist( "Events", dataset_axis, selection_axis, hist.Bin( f"ptoverM{jet}", r"$p^{T}_{jet}/M_{{jjj}}$".format(T="T", jet=jet, jjj="jjj"), 100, 0, 2.5))
def __init__(self, era=2018): datasets_axis = hist.Cat("dataset", "Signal Model") category_axis = hist.Cat("region", "Lepton category") sys_axis = hist.Cat("syst", "systematic variation") MT1_axis = hist.Bin("MT1", r"$M_{T,1}$ [GeV]", 500, 0, 2000) MT2_axis = hist.Bin("MT2", r"$M_{T,2}$ [GeV]", 500, 0, 2000) MT3_axis = hist.Bin("MT3", r"$M_{T,3}$ [GeV]", 500, 0, 2000) ST1_axis = hist.Bin("ST1", r"$S_{T,1}$ [GeV]", 500, 0, 2000) MET_axis = hist.Bin("MET", r"$E_{T}^{miss}$ [GeV]", 500, 0, 2000) RT1_axis = hist.Bin("RT1", r"$R_{T}$", 500, 0, 200) self._accumulator = processor.dict_accumulator({ 'MET': hist.Hist("Events", datasets_axis, category_axis, MET_axis), 'MT1': hist.Hist("Events", datasets_axis, category_axis, MT1_axis), 'MT2': hist.Hist("Events", datasets_axis, category_axis, MT2_axis), 'MT3': hist.Hist("Events", datasets_axis, category_axis, MT3_axis), 'RT1': hist.Hist("Events", datasets_axis, category_axis, RT1_axis), 'cutflow': processor.defaultdict_accumulator(int), }) with open(f"{os.path.dirname(__file__)}/xsections_{era}.yaml") as stream: self.xsections = yaml.safe_load(stream) self.lumi = { 2016: 35.9, 2017: 41.5, 2018: 60.0 }[era]
def __init__(self): # Histogram setup dataset_ax = Cat("dataset", "Primary dataset") pdf_ax = Cat("pdf", "pdf") vpt_ax = Bin("vpt",r"$p_{T}^{V}$ (GeV)", 100, 0, 2000) sign_ax = Bin("sign",r"Sign", 2, -2, 2) items = {} items["gen_vpt"] = Hist("Counts", dataset_ax, pdf_ax, vpt_ax) items["gen_weight_sign"] = Hist("Counts", dataset_ax, sign_ax) items['sumw'] = processor.defaultdict_accumulator(float) items['sumw2'] = processor.defaultdict_accumulator(float) self._accumulator = processor.dict_accumulator(items) self._all_pdfs = [303600,263000,262000,306000]
def __init__(self, analyzer_name, analysis_type): self.analyzer_name = analyzer_name self.analysis_type = analysis_type self._accumulator = processor.dict_accumulator({ 'cutflow': processor.defaultdict_accumulator(int), })
def __init__(self): # Histogram setup dataset_ax = Cat("dataset", "Primary dataset") sieie_ax = Bin("sieie", r"sieie", 100, 0, 0.02) pt_ax = Bin("pt", r"pt", 50, 200, 1200) cat_ax = Cat("cat", r"cat") items = {} items[f"sieie"] = Hist("Counts", dataset_ax, sieie_ax, pt_ax, cat_ax) items['sumw'] = processor.defaultdict_accumulator(float) items['sumw2'] = processor.defaultdict_accumulator(float) self._accumulator = processor.dict_accumulator(items) self._configure()
def __init__(self): dataset_axis = hist.Cat("dataset", "Primary dataset") mass_axis = hist.Bin("mass", r"$m_{\mu\mu}$ [GeV]", 30000, 0.25, 300) self._accumulator = processor.dict_accumulator({ 'mass': hist.Hist("Counts", dataset_axis, mass_axis), 'cutflow': processor.defaultdict_accumulator(int), })
def __init__(self): self._accumulator = processor.dict_accumulator({ "sumw": processor.defaultdict_accumulator(float), "nTracksHist": hist.Hist( "Events", hist.Cat("dataset", "Dataset"), hist.Bin("nTracks", "multiplicity", 50, 0, 250), ), })
def __init__(self): self._accumulator = processor.dict_accumulator({ "sumw": processor.defaultdict_accumulator(float), "mass": hist.Hist( "Events", hist.Cat("dataset", "Dataset"), hist.Bin("mass", "$m_{\mu\mu}$ [GeV]", 60, 60, 120), ), })
def __init__(self): # Bins and categories for the histogram are defined here. For format, see https://coffeateam.github.io/coffea/stubs/coffea.hist.hist_tools.Hist.html && https://coffeateam.github.io/coffea/stubs/coffea.hist.hist_tools.Bin.html self._columns = ['MET_pt'] dataset_axis = hist.Cat("dataset", "") MET_axis = hist.Bin("MET", "MET [GeV]", 50, 0, 100) # The accumulator keeps our data chunks together for histogramming. It also gives us cutflow, which can be used to keep track of data. self._accumulator = processor.dict_accumulator({ 'MET': hist.Hist("Counts", dataset_axis, MET_axis), 'cutflow': processor.defaultdict_accumulator(int) })
def __init__(self, isMC): self._isMC = isMC # Histograms dataset_axis = hist.Cat("dataset", "Primary dataset") selection_axis = hist.Cat("selection", "Selection name") self._accumulator = processor.dict_accumulator() self._accumulator["total_events"] = processor.defaultdict_accumulator( int) for pt_cut in range(30, 1150, 5): self._accumulator[f"N_min_pT_cut{pt_cut}".format( pt_cut)] = processor.defaultdict_accumulator(int) for eta_cut in np.arange(0, 2.5, 0.05): self._accumulator[f"N_max_eta_cut{eta_cut}".format( eta_cut)] = processor.defaultdict_accumulator(int) for dEta_max_cut in np.arange(0, 5, 0.1): self._accumulator[f"N_dEta_jj_max_cut{dEta_max_cut}".format( dEta_max_cut)] = processor.defaultdict_accumulator(int) for dR_min_cut in np.arange(0, 5, 0.1): self._accumulator[f"N_dR_jj_min_cut{dR_min_cut}".format( dR_min_cut)] = processor.defaultdict_accumulator(int)
def __init__(self, isMC): self._isMC = isMC # Define accumulators here self._accumulator = processor.dict_accumulator() self._accumulator["total_events"] = processor.defaultdict_accumulator(int) self._accumulator["selected_events"] = processor.defaultdict_accumulator(int) for jet in [0, 1, 2]: self._accumulator[f"eta_{jet}_final"] = processor.dict_accumulator() self._accumulator[f"ptoverM_{jet}_final"] = processor.dict_accumulator() for pair in [(0, 1), (1, 2), (2, 0)]: self._accumulator[f"dEta_{pair[0]}{pair[1]}_final"] = processor.dict_accumulator() self._accumulator[f"dR_{pair[0]}{pair[1]}_final"] = processor.dict_accumulator() self._accumulator[f"moverM_{pair[0]}{pair[1]}_final"] = processor.dict_accumulator() for pair in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]: self._accumulator[f"dR_{pair[0]}_{pair[1]}{pair[2]}_final"] = processor.dict_accumulator() self._accumulator[f"dEta_{pair[0]}_{pair[1]}{pair[2]}_final"] = processor.dict_accumulator() self._accumulator[f"Phi_{pair[0]}_{pair[1]}{pair[2]}_final"] = processor.dict_accumulator() self._accumulator[f"dPtoverM_{pair[0]}_{pair[1]}{pair[2]}_final"] = processor.dict_accumulator() self._accumulator[f"ptoverM_max_final"] = processor.dict_accumulator() self._accumulator[f"ptoverM_min_final"] = processor.dict_accumulator() self._accumulator[f"eta_max_final"] = processor.dict_accumulator() self._accumulator[f"dR_max_final"] = processor.dict_accumulator() self._accumulator[f"dR_min_final"] = processor.dict_accumulator() self._accumulator[f"dEta_max_final"] = processor.dict_accumulator() self._accumulator[f"dEta_min_final"] = processor.dict_accumulator() self._accumulator[f"dR_j_jj_max_final"] = processor.dict_accumulator() self._accumulator[f"dR_j_jj_min_final"] = processor.dict_accumulator() self._accumulator[f"dEta_j_jj_max_final"] = processor.dict_accumulator() self._accumulator[f"dEta_j_jj_min_final"] = processor.dict_accumulator() self._accumulator[f"dPhi_j_jj_max_final"] = processor.dict_accumulator() self._accumulator[f"dPhi_j_jj_min_final"] = processor.dict_accumulator() self._accumulator[f"dPtoverM_j_jj_max_final"] = processor.dict_accumulator() self._accumulator[f"dPtoverM_j_jj_min_final"] = processor.dict_accumulator()
def __init__(self, columns=[]): self._columns = columns dataset_axis = hist.Cat("dataset", "Primary dataset") mass_axis = hist.Bin("mass", r"$m_{\mu\mu}$ [GeV]", 30000, 0.25, 300) pt_axis = hist.Bin("pt", r"$p_{T}$ [GeV]", 30000, 0.25, 300) self._accumulator = processor.dict_accumulator({ "mass": hist.Hist("Counts", dataset_axis, mass_axis), "pt": hist.Hist("Counts", dataset_axis, pt_axis), "cutflow": processor.defaultdict_accumulator(int), })
def __init__(self): # Histogram setup dataset_ax = Cat("dataset", "Primary dataset") vpt_ax = Bin("vpt", r"$p_{T}^{V}$ (GeV)", 50, 0, 2000) jpt_ax = Bin("jpt", r"$p_{T}^{j}$ (GeV)", 50, 0, 2000) mjj_ax = Bin("mjj", r"$m(jj)$ (GeV)", 75, 0, 7500) res_ax = Bin("res", r"pt: dressed / stat1 - 1", 80, -0.2, 0.2) items = {} for tag in ['stat1', 'dress', 'lhe']: items[f"gen_vpt_inclusive_{tag}"] = Hist("Counts", dataset_ax, vpt_ax) items[f"gen_vpt_monojet_{tag}"] = Hist("Counts", dataset_ax, jpt_ax, vpt_ax) items[f"gen_vpt_vbf_{tag}"] = Hist("Counts", dataset_ax, jpt_ax, mjj_ax, vpt_ax) items["resolution"] = Hist("Counts", dataset_ax, res_ax) items['sumw'] = processor.defaultdict_accumulator(float) items['sumw2'] = processor.defaultdict_accumulator(float) self._accumulator = processor.dict_accumulator(items)
def __init__(self): self._accumulator = processor.dict_accumulator({ 'sumw': processor.defaultdict_accumulator(float), 'nevents': processor.defaultdict_accumulator(float), 'variables': processor.defaultdict_accumulator(processor.column_accumulator(np.transpose(np.array([[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]))).identity), 'variables_merged': processor.defaultdict_accumulator(processor.column_accumulator(np.transpose(np.array([[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]))).identity), 'weights': processor.defaultdict_accumulator(processor.column_accumulator(np.array([])).identity), 'weights_merged': processor.defaultdict_accumulator(processor.column_accumulator(np.array([])).identity), })
def __init__(self, year='2017'): self._year = year self._btagWPs = { 'med': { '2016': 0.6321, '2017': 0.4941, '2018': 0.4184, }, } self._triggers = { '2016': [ "PFHT800", "PFHT900", "AK8PFJet360_TrimMass30", 'AK8PFHT700_TrimR0p1PT0p03Mass50', "PFHT650_WideJetMJJ950DEtaJJ1p5", "PFHT650_WideJetMJJ900DEtaJJ1p5", "AK8DiPFJet280_200_TrimMass30_BTagCSV_p20", "PFJet450", ], '2017': [ "AK8PFJet330_PFAK8BTagCSV_p17", "PFHT1050", "AK8PFJet400_TrimMass30", "AK8PFJet420_TrimMass30", "AK8PFHT800_TrimMass50", "PFJet500", "AK8PFJet500", ], '2018': [ 'AK8PFJet400_TrimMass30', 'AK8PFJet420_TrimMass30', 'AK8PFHT800_TrimMass50', 'PFHT1050', 'PFJet500', 'AK8PFJet500', # 'AK8PFJet330_PFAK8BTagCSV_p17', not present in 2018D? 'AK8PFJet330_TrimMass30_PFAK8BoostedDoubleB_np4', ], } self._accumulator = processor.dict_accumulator({ 'cutflow': processor.defaultdict_accumulator(float), })
def __init__(self, columns=[], canaries=[]): self._columns = columns self._canaries = canaries dataset_axis = hist.Cat("dataset", "Primary dataset") mass_axis = hist.Bin("mass", r"$m_{\mu\mu}$ [GeV]", 30000, 0.25, 300) pt_axis = hist.Bin("pt", r"$p_{T}$ [GeV]", 30000, 0.25, 300) self._accumulator = processor.dict_accumulator({ 'mass': hist.Hist("Counts", dataset_axis, mass_axis), 'pt': hist.Hist("Counts", dataset_axis, pt_axis), 'cutflow': processor.defaultdict_accumulator(int), 'worker': processor.set_accumulator(), })