def embed_crossref(source, idx_name, dest, dest_name): """Embed a cross-reference Parameters ---------- source : ak.Array any array with shape N * var * {record} idx_name : str A field in the source record dest : ak.Array any array with shape N * var * {record}, where: ``ak.max(source[idx_name], axis=1) < ak.num(dest)`` and ``ak.min(source[idx_name], axis=1) >= 0`` """ print(ak.max(source[idx_name], axis=1)) print(ak.num(dest)) print(ak.all(ak.max(source[idx_name], axis=1) < ak.num(dest))) assert ak.all(ak.max(source[idx_name], axis=1) < ak.num(dest)) assert ak.all(ak.min(source[idx_name], axis=1) >= 0) id_global = ak.flatten( source[idx_name] + np.asarray(dest.layout.starts), axis=None ) source[dest_name] = ak.Array( ak.layout.ListOffsetArray64( source.layout.offsets, ak.layout.ListOffsetArray64( source.layout.content.offsets, ak.flatten(dest)[id_global].layout, ), ) )
def getNonPromptFromFlavour(obj, allow_tau=True): # gamma* -> ll is always treated as prompt in NanoAOD if allow_tau: return ak.num(obj[((obj.genPartFlav != 1) & (obj.genPartFlav != 15) )]) # this treats tau->enu / tau->munu as prompt else: return ak.num(obj[(obj.genPartFlav != 1)])
def lct_check(csc_accept, csc_chamber, lct_chamber, alct_chamber, clct_chamber): csc_pass = np.array(ak.num(csc_accept, axis=1) > 0) if np.count_nonzero(csc_pass) == 0: return [0, 0, 0, 0] csc_cham_pass = csc_chamber[csc_pass] lct_cham_pass = lct_chamber[csc_pass] alct_cham_pass = alct_chamber[csc_pass] clct_cham_pass = clct_chamber[csc_pass] #print(csc_cham_pass) #print(lct_cham_pass) #print(alct_cham_pass) #print(clct_cham_pass) lct_corr = 0 alct_corr = 0 clct_corr = 0 for i in range(len(csc_cham_pass)): for j in range(len(csc_cham_pass[i])): if csc_cham_pass[i][j] in lct_cham_pass[i]: lct_corr += 1 if csc_cham_pass[i][j] in alct_cham_pass[i]: alct_corr += 1 if csc_cham_pass[i][j] in clct_cham_pass[i]: clct_corr += 1 total_cham = ak.sum(ak.num(csc_cham_pass)) lct_eff = lct_corr / total_cham * 100 alct_eff = alct_corr / total_cham * 100 clct_eff = clct_corr / total_cham * 100 return [total_cham, lct_eff, alct_eff, clct_eff]
def test_ByteMaskedArray_num(): content = awkward1.from_iter([[[0.0, 1.1, 2.2], [], [3.3, 4.4]], [], [[5.5]], [[6.6, 7.7, 8.8, 9.9]], [[], [10.0, 11.1, 12.2]]], highlevel=False) mask = awkward1.layout.Index8(numpy.array([0, 0, 1, 1, 0], dtype=numpy.int8)) array = awkward1.Array(awkward1.layout.ByteMaskedArray(mask, content, valid_when=False)) assert awkward1.to_list(array) == [[[0.0, 1.1, 2.2], [], [3.3, 4.4]], [], None, None, [[], [10.0, 11.1, 12.2]]] assert awkward1.num(array, axis=0) == 5 assert awkward1.to_list(awkward1.num(array, axis=1)) == [3, 0, None, None, 2] assert awkward1.to_list(awkward1.num(array, axis=2)) == [[3, 0, 2], [], None, None, [0, 3]]
def get_rate(csc_accept, emtf_accept, gmt_accept): # Calculate rate for reach n_acc = len(csc_accept) csc_rate = np.count_nonzero(ak.num(csc_accept, axis=1)) / n_acc * 30 * 1000 emtf_rate = np.count_nonzero(ak.num(emtf_accept, axis=1)) / n_acc * 30 * 1000 gmt_rate = np.count_nonzero(gmt_accept) / n_acc * 30 * 1000 return [n_acc, csc_rate, emtf_rate, gmt_rate]
def debug_eff(llp_accept, csc_accept, emtf_accept, gmt_accept, csc_endcap, csc_station, csc_ring, csc_chamber, emtf_endcap, emtf_station): # Create a mask for events in acceptance llp_pass = np.array(ak.sum(llp_accept, axis=-1) > 0) # Apppply mask to the csc, emtf, and gmt csc_pass = csc_accept[llp_pass] emtf_pass = emtf_accept[llp_pass] gmt_pass = gmt_accept[llp_pass] mismatch = ak.num(emtf_pass) > ak.flatten(gmt_pass) * 5 print("Mismatch:") print(mismatch) print('') csc_pass_endcap = csc_endcap[llp_pass] csc_pass_station = csc_station[llp_pass] csc_pass_ring = csc_ring[llp_pass] csc_pass_chamber = csc_chamber[llp_pass] print("CSC Masked Array:") print(csc_pass[mismatch]) print("CSC endcap:") print(csc_pass_endcap[mismatch]) print("CSC station:") print(csc_pass_station[mismatch]) print("CSC ring:") print(csc_pass_ring[mismatch]) print("CSC chamber:") print(csc_pass_chamber[mismatch]) print("") emtf_pass_endcap = emtf_endcap[llp_pass] emtf_pass_station = emtf_station[llp_pass] print("EMTF Masked Array:") print(emtf_pass[mismatch]) print("EMTF endcap:") print(emtf_pass_endcap[mismatch]) print("EMTF sector:") print(emtf_pass_station[mismatch]) print("") print("GMT Masked Array:") print(gmt_pass[mismatch]) print("") # Calculate efficiency for each n_acc = np.count_nonzero(llp_pass) csc_eff = np.count_nonzero(ak.num(csc_pass, axis=1)) / n_acc * 100 emtf_eff = np.count_nonzero(ak.num(emtf_pass, axis=1)) / n_acc * 100 gmt_eff = np.count_nonzero(gmt_pass) / n_acc * 100 return [n_acc, csc_eff, emtf_eff, gmt_eff]
def test_record_array_axis_out_of_range(): array = awkward1.Array([ {"x": [1], "y": [[], [1]]}, {"x": [1, 2], "y": [[], [1], [1, 2]]}, {"x": [1, 2, 3], "y": [[], [1], [1, 2], [1, 2, 3]]}]) with pytest.raises(ValueError) as err: assert awkward1.num(array, axis=-2) assert str(err.value) == "axis == -2 exceeds the min depth == 2 of this array" with pytest.raises(ValueError) as err: assert awkward1.num(array, axis=-3) assert str(err.value) == "axis == -3 exceeds the depth == 2 of this array"
def test_array_3d(): array = awkward1.Array(numpy.arange(3 * 5 * 2).reshape(3, 5, 2)) assert awkward1.to_list(array) == [[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], [[10, 11], [12, 13], [14, 15], [16, 17], [18, 19]], [[20, 21], [22, 23], [24, 25], [26, 27], [28, 29]]] assert awkward1.num(array, axis=0) == 3 assert awkward1.to_list(awkward1.num(array, axis=1)) == [5, 5, 5] assert awkward1.to_list(awkward1.num(array, axis=2)) == [[2, 2, 2, 2, 2], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2]] with pytest.raises(ValueError) as err: assert awkward1.num(array, axis=3) assert str(err.value).startswith("'axis' out of range for 'num'") assert awkward1.to_list(awkward1.num(array, axis=-1)) == [[2, 2, 2, 2, 2], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2]] assert awkward1.to_list(awkward1.num(array, axis=-2)) == [5, 5, 5] assert awkward1.num(array, axis=-3) == 3 with pytest.raises(ValueError) as err: assert awkward1.num(array, axis=-4) assert str(err.value).startswith( "axis == -4 exceeds the depth == 3 of this array")
def process(self, events): output = self.accumulator.identity() dataset = events.metadata["dataset"] dimuon = ak.combinations(events.Muon, 2) dimuon = dimuon["0"] + dimuon["1"] output["pt"].fill(dataset=dataset, pt=ak.flatten(events.Muon.pt)) output["mass"].fill(dataset=dataset, mass=ak.flatten(dimuon.mass)) output["cutflow"]["%s_pt" % dataset] += sum(ak.num(events.Muon)) output["cutflow"]["%s_mass" % dataset] += sum(ak.num(dimuon)) return output
def get(self, el, mu, meas='QCD'): if meas == 'QCD': el_key, mu_key = 'el_QCD_NC', 'mu_QCD' elif meas == 'TT': el_key, mu_key = 'el_TT', 'mu_TT' elif meas == 'data': el_key, mu_key = 'el_data', 'mu_data' n_lep = ak.num(el) + ak.num(mu) sign = (-1)**(n_lep + 1) el_fr = self.evaluator[el_key](el.conePt, np.abs(el.etaSC)) mu_fr = self.evaluator[mu_key](mu.conePt, np.abs(mu.eta)) fr = ak.concatenate([el_fr, mu_fr], axis=1) return ak.prod(fr, axis=1) * sign
def select_protons(events, branchName="ProtCand"): protons_ = events[branchName] protons_["Run"] = events["Run"] protons_["LumiSection"] = events["LumiSection"] protons_["BX"] = events["BX"] protons_["EventNum"] = events["EventNum"] protons_["CrossingAngle"] = events["CrossingAngle"] protons_["Muon0Pt"] = events.MuonCand.pt[:, 0] protons_["Muon0Eta"] = events.MuonCand.eta[:, 0] protons_["Muon0Phi"] = events.MuonCand.phi[:, 0] protons_["Muon0VtxZ"] = events.MuonCand.vtxz[:, 0] protons_["Muon1Pt"] = events.MuonCand.pt[:, 1] protons_["Muon1Eta"] = events.MuonCand.eta[:, 1] protons_["Muon1Phi"] = events.MuonCand.phi[:, 1] protons_["Muon1VtxZ"] = events.MuonCand.vtxz[:, 1] protons_["PrimVertexZ"] = events.PrimVertexCand.z[:, 0] protons_["InvMass"] = events["InvMass"] protons_["nExtraPfCandPV3"] = events["nExtraPfCandPV3"] protons_["Acopl"] = events["Acopl"] protons_["XiMuMuPlus"] = events["XiMuMuPlus"] protons_["XiMuMuMinus"] = events["XiMuMuMinus"] msk_num_prot = (ak.num(protons_.xi) > 0) protons_ = protons_[msk_num_prot] #print ( len(protons_) ) return protons_
def test_read_nanomc(): factory = NanoEventsFactory.from_file( os.path.abspath('tests/samples/nano_dy.root')) events = factory.events() # test after views first genroundtrips(events.GenPart.mask[events.GenPart.eta > 0]) genroundtrips(events.mask[ak.any(events.Electron.pt > 50, axis=1)].GenPart) genroundtrips(events.GenPart) genroundtrips(events.GenPart[events.GenPart.eta > 0]) genroundtrips(events[ak.any(events.Electron.pt > 50, axis=1)].GenPart) # sane gen matching (note for electrons gen match may be photon(22)) assert ak.all((abs(events.Electron.matched_gen.pdgId) == 11) | (events.Electron.matched_gen.pdgId == 22)) assert ak.all(abs(events.Muon.matched_gen.pdgId) == 13) genroundtrips(events.Electron.matched_gen) crossref(events[ak.num(events.Jet) > 2]) crossref(events) assert ak.any(events.Photon.isTight, axis=1).tolist()[:9] == [ False, True, True, True, False, False, False, False, False ]
def process(self, events): output = self.accumulator.identity() dataset = events.metadata['dataset'] muons = ak.zip( { "pt": events.Muon_pt, "eta": events.Muon_eta, "phi": events.Muon_phi, "mass": events.Muon_mass, "charge": events.Muon_charge, }, with_name="PtEtaPhiMCandidate") cut = (ak.num(muons) == 2) & (ak.sum(muons.charge) == 0) # add first and second muon in every event together dimuon = muons[cut][:, 0] + muons[cut][:, 1] output["sumw"][dataset] += len(events) output["mass"].fill( dataset=dataset, mass=dimuon.mass, ) return output
def select_protons(events): #protons_ = events.ProtCand[ # events.ProtCand.ismultirp == 1 # ] protons_ = events.ProtCand protons_["CrossingAngle"] = events["CrossingAngle"] protons_["Muon0Pt"] = events.MuonCand.pt[:, 0] protons_["Muon0Eta"] = events.MuonCand.eta[:, 0] protons_["Muon0Phi"] = events.MuonCand.phi[:, 0] protons_["Muon1Pt"] = events.MuonCand.pt[:, 1] protons_["Muon1Eta"] = events.MuonCand.eta[:, 1] protons_["Muon1Phi"] = events.MuonCand.phi[:, 1] protons_["InvMass"] = events["InvMass"] protons_["nExtraPfCandPV3"] = events["nExtraPfCandPV3"] protons_["Acopl"] = events["Acopl"] xi_mumu_plus_sel = events["XiMuMuPlus"] protons_["XiMuMuPlus"] = xi_mumu_plus_sel xi_mumu_minus_sel = events["XiMuMuMinus"] protons_["XiMuMuMinus"] = xi_mumu_minus_sel msk_num_prot = (ak.num(protons_.xi) > 0) protons_ = protons_[msk_num_prot] print(len(protons_)) return protons_
def test_read_nanodata(): factory = NanoEventsFactory.from_file( os.path.abspath('tests/samples/nano_dimuon.root')) events = factory.events() crossref(events) crossref(events[ak.num(events.Jet) > 2])
def process(self, events): # Initialize accumulator out = self.accumulator.identity() # Event selection: opposite charged same flavor Electron = events.Electron Electron_mask = (Electron.pt > 20) & (np.abs(Electron.eta) < 2.5) & (Electron.cutBased > 1) Ele_channel_mask = ak.num(Electron[Electron_mask]) > 1 Ele_channel_events = events[Ele_channel_mask] Ele = Ele_channel_events.Electron # All possible pairs of Electron in each event ele_pairs = ak.combinations(Ele, 2, axis=1) # TLorentz vector sum of ele_pairs ele_left, ele_right = ak.unzip(ele_pairs) diele = ele_left + ele_right diffsign_diele = diele[diele.charge == 0] leading_diffsign_diele = diffsign_diele[ak.argmax(diffsign_diele.pt, axis=1, keepdims=True)] #Mee = ak.flatten(leading_diffsign_diele.mass) # This makes type error ( primitive expected but ?float given ) Mee = ak.to_numpy(leading_diffsign_diele.mass) Mee = Mee.flatten() out.fill(dataset=events.metadata["dataset"], mass=Mee) return out
def yahist_1D_lookup(h, ar): ''' takes a yahist 1D histogram (which has a lookup function) and an awkward array. ''' return ak.unflatten( h.lookup( ak.to_numpy(ak.flatten(ar)) ), ak.num(ar) )
def test_record_array(): array = awkward1.Array([ {"x": [1], "y": [[], [1]]}, {"x": [1, 2], "y": [[], [1], [1, 2]]}, {"x": [1, 2, 3], "y": [[], [1], [1, 2], [1, 2, 3]]}]) assert awkward1.num(array, axis=0).tolist() == {'x': 3, 'y': 3} assert awkward1.num(array, axis=1).tolist() == [{'x': 1, 'y': 2}, {'x': 2, 'y': 3}, {'x': 3, 'y': 4}] with pytest.raises(ValueError) as err: assert awkward1.num(array, axis=2) assert str(err.value) == "'axis' out of range for 'num'" assert awkward1.num(array, axis=-1).tolist() == [{'x': 1, 'y': [0, 1]}, {'x': 2, 'y': [0, 1, 2]}, {'x': 3, 'y': [0, 1, 2, 3]}]
def GetFromDict(tree, dictionairy, key): keyString = re.sub("_[0-9]", "", key) if not keyString in dictionairy.keys(): dictionairy[keyString] = tree[keyString].array(library="ak") if re.search("_[1-9]", key): indexOfInterest = int(key[-1]) - 1 return dictionairy[keyString].mask[ak.num(dictionairy[keyString]) > indexOfInterest][:,indexOfInterest] else: return dictionairy[keyString]
def matchJets(self, obj, jet, deltaRCut=0.4): combs = ak.cartesian([obj, jet], nested=True) jet_index = ak.local_index(delta_r( combs['0'], combs['1']))[delta_r(combs['0'], combs['1']) < 0.4] jet_index_pad = ak.flatten(ak.fill_none( ak.pad_none(jet_index, target=1, clip=True, axis=2), 0), axis=2) mask = ak.num(jet_index, axis=2) > 0 # a mask for obj with a matched jet mask_match = mask * 1 + ~mask * 0 mask_nomatch = mask * 0 + ~mask * 1 return jet_index_pad, mask_match, mask_nomatch
def test_list_array(): array = awkward1.Array(numpy.arange(3*5*2).reshape(3, 5, 2).tolist()) assert awkward1.num(array, axis=0) == 3 assert awkward1.num(array, axis=1) == [5, 5, 5] assert awkward1.num(array, axis=2) == [[2, 2, 2, 2, 2], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2]] with pytest.raises(ValueError) as err: assert awkward1.num(array, axis=3) assert str(err.value) == "'axis' out of range for 'num'" assert awkward1.num(array, axis=-1) == [5, 5, 5] assert awkward1.num(array, axis=-2) == [[2, 2, 2, 2, 2], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2]] assert awkward1.num(array, axis=-3) == 3 with pytest.raises(ValueError) as err: assert awkward1.num(array, axis=-4) assert str(err.value) == "axis == -4 exceeds the depth == 3 of this array"
def errorbar_hist(P, var, P_name, title, low, high): fig, ax = plt.subplots(figsize=(8, 8)) #Number of events, use this to determine bins and thus bin width n = np.sum(ak.num(P)) bins = int(np.sqrt(n)) bin_w = (high - low) / bins counts, bin_edges = np.histogram(ak.to_list(ak.flatten(P[var])), bins, range=(low, high)) bin_centres = (bin_edges[:-1] + bin_edges[1:]) / 2. err = np.sqrt(counts) plt.errorbar(bin_centres, counts, yerr=err, fmt='o', color='k') plt.xlabel(title, fontsize=30) plt.ylabel("Candidates / (%.4f GeV/$c^2$)" % bin_w, fontsize=30) plt.xlim(low, high) ax.tick_params(axis='both', which='major', labelsize=25) ymin, ymax = plt.ylim() plt.ylim(0., ymax * 1.1) plt.tight_layout() plt.show() fig.savefig(f"{loc.PLOTS}/{P_name}_{var}.pdf")
def test_lazy_arrayset(): array = ak.from_json(""" [ { "listcollection": [ {"item1": 1, "item2": 2}, {"item1": 2, "item2": 4}, {"item1": 3, "item2": 6} ], "collection": {"item1": 3, "item2": 4}, "singleton": 5, "listsingleton": [1, 2, 3], "unioncollection": {"item1": 3}, "masked": null }, { "listcollection": [ {"item1": 1, "item2": 2}, {"item1": 2, "item2": 4}, {"item1": 3, "item2": 6} ], "collection": {"item1": 3, "item2": 4}, "singleton": 5, "listsingleton": [1, 2, 3], "unioncollection": [{"item1": 2}], "masked": 4 }, { "listcollection": [ {"item1": 1, "item2": 2}, {"item1": 2, "item2": 4}, {"item1": 3, "item2": 6} ], "collection": {"item1": 3, "item2": 4}, "singleton": 5, "listsingleton": [1, 2, 3], "unioncollection": {"item1": 4}, "masked": 4 } ]""") canary = Canary() prefix = "kitty" form, container, npart = ak.to_arrayset(array, container=canary, prefix=prefix) assert not any(op[0] == "get" for op in canary.ops) canary.ops = [] cache = {} out = ak.from_arrayset(form, container, lazy=True, lazy_cache=cache, lazy_lengths=3, prefix=prefix, lazy_cache_key="hello") assert len(canary.ops) == 0 assert len(cache) == 0 assert len(out) == 3 assert len(canary.ops) == 0 assert len(cache) == 0 assert ak.to_list(ak.num(out.listcollection)) == [3, 3, 3] assert set(canary.ops) == {('get', 'kitty-node1-offsets')} assert set(cache) == {'hello', 'hello-kitty-node1-virtual'} canary.ops = [] cache.clear() assert ak.to_list(out.unioncollection) == [{ 'item1': 3 }, [{ 'item1': 2 }], { 'item1': 4 }] assert set(canary.ops) == {('get', 'kitty-node11-tags'), ('get', 'kitty-node11-index'), ('get', 'kitty-node14-offsets'), ('get', 'kitty-node13'), ('get', 'kitty-node16')} assert set(cache) == { 'hello', 'hello-kitty-node11-virtual', 'hello-kitty-node13-virtual', 'hello-kitty-node16-virtual' } canary.ops = [] cache.clear() assert ak.to_list(out.masked) == [None, 4, 4] assert set(canary.ops) == {('get', 'kitty-node17-index'), ('get', 'kitty-node18')} assert set(cache) == {'hello', 'hello-kitty-node17-virtual'} canary.ops = [] cache.clear()
def getN(var_, i): return ak.mask(var_, ak.num(var_, axis=1)>i, highlevel=False)[:,i]
# Repeat events by resample factor if resample_: events_sel_ = ak.concatenate( ( [events_sel_] * resample_factor_ ), axis=0 ) # Randomize proton arrays if random_protons_: protons_sel_ = events_sel_.ProtCand index_rnd_ = np.random.permutation( len( events_sel_ ) ) protons_rnd_ = protons_sel_[ index_rnd_ ] events_sel_[ "ProtCandRnd" ] = protons_rnd_ print ( ak.num( events_sel_.ProtCand ) ) print ( ak.num( events_sel_.ProtCandRnd ) ) #protons_ = select_protons( events_sel_ ) protons_ = None if not random_protons_: protons_ = select_protons( events_sel_, "ProtCand" ) else: protons_ = select_protons( events_sel_, "ProtCandRnd" ) counts_protons_ = len( protons_ ) if not counts_label_protons_ in selections: selections = np.concatenate( ( selections, np.array( [ counts_label_protons_ ] ) ) ) counts = np.concatenate( ( counts, np.array( [counts_protons_] ) ) ) else: counts[ selections == counts_label_protons_ ] += counts_protons_ print ( selections )
def process(self, events): output = self.accumulator.identity() # use a very loose preselection to filter the events presel = ak.num(events.Jet)>2 ev = events[presel] dataset = ev.metadata['dataset'] # load the config - probably not needed anymore cfg = loadConfig() output['totalEvents']['all'] += len(events) output['skimmedEvents']['all'] += len(ev) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): ## Generated leptons gen_lep = ev.GenL leading_gen_lep = gen_lep[ak.singletons(ak.argmax(gen_lep.pt, axis=1))] trailing_gen_lep = gen_lep[ak.singletons(ak.argmin(gen_lep.pt, axis=1))] ## Muons muon = Collections(ev, "Muon", "tightSSTTH").get() vetomuon = Collections(ev, "Muon", "vetoTTH").get() dimuon = choose(muon, 2) SSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge)>0, axis=1) leading_muon_idx = ak.singletons(ak.argmax(muon.pt, axis=1)) leading_muon = muon[leading_muon_idx] ## Electrons electron = Collections(ev, "Electron", "tightSSTTH").get() vetoelectron = Collections(ev, "Electron", "vetoTTH").get() dielectron = choose(electron, 2) SSelectron = ak.any((dielectron['0'].charge * dielectron['1'].charge)>0, axis=1) leading_electron_idx = ak.singletons(ak.argmax(electron.pt, axis=1)) leading_electron = electron[leading_electron_idx] ## Merge electrons and muons - this should work better now in ak1 dilepton = cross(muon, electron) SSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge)>0, axis=1) lepton = ak.concatenate([muon, electron], axis=1) leading_lepton_idx = ak.singletons(ak.argmax(lepton.pt, axis=1)) leading_lepton = lepton[leading_lepton_idx] trailing_lepton_idx = ak.singletons(ak.argmin(lepton.pt, axis=1)) trailing_lepton = lepton[trailing_lepton_idx] dilepton_mass = (leading_lepton+trailing_lepton).mass dilepton_pt = (leading_lepton+trailing_lepton).pt dilepton_dR = delta_r(leading_lepton, trailing_lepton) lepton_pdgId_pt_ordered = ak.fill_none(ak.pad_none(lepton[ak.argsort(lepton.pt, ascending=False)].pdgId, 2, clip=True), 0) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): n_nonprompt = getNonPromptFromFlavour(electron) + getNonPromptFromFlavour(muon) n_chargeflip = getChargeFlips(electron, ev.GenPart) + getChargeFlips(muon, ev.GenPart) mt_lep_met = mt(lepton.pt, lepton.phi, ev.MET.pt, ev.MET.phi) min_mt_lep_met = ak.min(mt_lep_met, axis=1) ## Tau and other stuff tau = getTaus(ev) track = getIsoTracks(ev) ## Jets jet = getJets(ev, minPt=25, maxEta=4.7, pt_var='pt_nom') jet = jet[ak.argsort(jet.pt_nom, ascending=False)] # need to sort wrt smeared and recorrected jet pt jet = jet[~match(jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons jet = jet[~match(jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons central = jet[(abs(jet.eta)<2.4)] btag = getBTagsDeepFlavB(jet, year=self.year) # should study working point for DeepJet light = getBTagsDeepFlavB(jet, year=self.year, invert=True) fwd = getFwdJet(light) fwd_noPU = getFwdJet(light, puId=False) high_score_btag = central[ak.argsort(central.btagDeepFlavB)][:,:2] bl = cross(lepton, high_score_btag) bl_dR = delta_r(bl['0'], bl['1']) min_bl_dR = ak.min(bl_dR, axis=1) ## forward jets j_fwd = fwd[ak.singletons(ak.argmax(fwd.p, axis=1))] # highest momentum spectator jf = cross(j_fwd, jet) mjf = (jf['0']+jf['1']).mass j_fwd2 = jf[ak.singletons(ak.argmax(mjf, axis=1))]['1'] # this is the jet that forms the largest invariant mass with j_fwd delta_eta = abs(j_fwd2.eta - j_fwd.eta) ## MET -> can switch to puppi MET met_pt = ev.MET.pt met_phi = ev.MET.phi ## other variables ht = ak.sum(jet.pt, axis=1) st = met_pt + ht + ak.sum(muon.pt, axis=1) + ak.sum(electron.pt, axis=1) # define the weight weight = Weights( len(ev) ) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): # lumi weight weight.add("weight", ev.weight*cfg['lumi'][self.year]) #weight.add("weight", ev.genWeight*cfg['lumi'][self.year]*mult) # PU weight - not in the babies... weight.add("PU", ev.puWeight, weightUp=ev.puWeightUp, weightDown=ev.puWeightDown, shift=False) # b-tag SFs weight.add("btag", self.btagSF.Method1a(btag, light)) # lepton SFs weight.add("lepton", self.leptonSF.get(electron, muon)) cutflow = Cutflow(output, ev, weight=weight) sel = Selection( dataset = dataset, events = ev, year = self.year, ele = electron, ele_veto = vetoelectron, mu = muon, mu_veto = vetomuon, jet_all = jet, jet_central = central, jet_btag = btag, jet_fwd = fwd, met = ev.MET, ) BL = sel.dilep_baseline(cutflow=cutflow, SS=True) weight_BL = weight.weight()[BL] if True: # define the inputs to the NN # this is super stupid. there must be a better way. NN_inputs = np.stack([ ak.to_numpy(ak.num(jet[BL])), ak.to_numpy(ak.num(tau[BL])), ak.to_numpy(ak.num(track[BL])), ak.to_numpy(st[BL]), ak.to_numpy(ev.MET[BL].pt), ak.to_numpy(ak.max(mjf[BL], axis=1)), ak.to_numpy(pad_and_flatten(delta_eta[BL])), ak.to_numpy(pad_and_flatten(leading_lepton[BL].pt)), ak.to_numpy(pad_and_flatten(leading_lepton[BL].eta)), ak.to_numpy(pad_and_flatten(trailing_lepton[BL].pt)), ak.to_numpy(pad_and_flatten(trailing_lepton[BL].eta)), ak.to_numpy(pad_and_flatten(dilepton_mass[BL])), ak.to_numpy(pad_and_flatten(dilepton_pt[BL])), ak.to_numpy(pad_and_flatten(j_fwd[BL].pt)), ak.to_numpy(pad_and_flatten(j_fwd[BL].p)), ak.to_numpy(pad_and_flatten(j_fwd[BL].eta)), ak.to_numpy(pad_and_flatten(jet[:, 0:1][BL].pt)), ak.to_numpy(pad_and_flatten(jet[:, 1:2][BL].pt)), ak.to_numpy(pad_and_flatten(jet[:, 0:1][BL].eta)), ak.to_numpy(pad_and_flatten(jet[:, 1:2][BL].eta)), ak.to_numpy(pad_and_flatten(high_score_btag[:, 0:1][BL].pt)), ak.to_numpy(pad_and_flatten(high_score_btag[:, 1:2][BL].pt)), ak.to_numpy(pad_and_flatten(high_score_btag[:, 0:1][BL].eta)), ak.to_numpy(pad_and_flatten(high_score_btag[:, 1:2][BL].eta)), ak.to_numpy(min_bl_dR[BL]), ak.to_numpy(min_mt_lep_met[BL]), ]) NN_inputs = np.moveaxis(NN_inputs, 0, 1) model, scaler = load_onnx_model('v8') try: NN_inputs_scaled = scaler.transform(NN_inputs) NN_pred = predict_onnx(model, NN_inputs_scaled) best_score = np.argmax(NN_pred, axis=1) except ValueError: #print ("Empty NN_inputs") NN_pred = np.array([]) best_score = np.array([]) NN_inputs_scaled = NN_inputs #k.clear_session() output['node'].fill(dataset=dataset, multiplicity=best_score, weight=weight_BL) output['node0_score_incl'].fill(dataset=dataset, score=NN_pred[:,0] if np.shape(NN_pred)[0]>0 else np.array([]), weight=weight_BL) output['node0_score'].fill(dataset=dataset, score=NN_pred[best_score==0][:,0] if np.shape(NN_pred)[0]>0 else np.array([]), weight=weight_BL[best_score==0]) output['node1_score'].fill(dataset=dataset, score=NN_pred[best_score==1][:,1] if np.shape(NN_pred)[0]>0 else np.array([]), weight=weight_BL[best_score==1]) output['node2_score'].fill(dataset=dataset, score=NN_pred[best_score==2][:,2] if np.shape(NN_pred)[0]>0 else np.array([]), weight=weight_BL[best_score==2]) output['node3_score'].fill(dataset=dataset, score=NN_pred[best_score==3][:,3] if np.shape(NN_pred)[0]>0 else np.array([]), weight=weight_BL[best_score==3]) output['node4_score'].fill(dataset=dataset, score=NN_pred[best_score==4][:,4] if np.shape(NN_pred)[0]>0 else np.array([]), weight=weight_BL[best_score==4]) SR_sel_pp = ((best_score==0) & ak.flatten((leading_lepton[BL].pdgId<0))) SR_sel_mm = ((best_score==0) & ak.flatten((leading_lepton[BL].pdgId>0))) leading_lepton_BL = leading_lepton[BL] output['lead_lep_SR_pp'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(leading_lepton_BL[SR_sel_pp].pt)), weight = weight_BL[SR_sel_pp] ) output['lead_lep_SR_mm'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(leading_lepton_BL[SR_sel_mm].pt)), weight = weight_BL[SR_sel_mm] ) del model del scaler del NN_inputs, NN_inputs_scaled, NN_pred # first, make a few super inclusive plots output['PV_npvs'].fill(dataset=dataset, multiplicity=ev.PV[BL].npvs, weight=weight_BL) output['PV_npvsGood'].fill(dataset=dataset, multiplicity=ev.PV[BL].npvsGood, weight=weight_BL) output['N_jet'].fill(dataset=dataset, multiplicity=ak.num(jet)[BL], weight=weight_BL) output['N_b'].fill(dataset=dataset, multiplicity=ak.num(btag)[BL], weight=weight_BL) output['N_central'].fill(dataset=dataset, multiplicity=ak.num(central)[BL], weight=weight_BL) output['N_ele'].fill(dataset=dataset, multiplicity=ak.num(electron)[BL], weight=weight_BL) output['N_mu'].fill(dataset=dataset, multiplicity=ak.num(electron)[BL], weight=weight_BL) output['N_fwd'].fill(dataset=dataset, multiplicity=ak.num(fwd)[BL], weight=weight_BL) output['ST'].fill(dataset=dataset, pt=st[BL], weight=weight_BL) output['HT'].fill(dataset=dataset, pt=ht[BL], weight=weight_BL) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): output['nLepFromTop'].fill(dataset=dataset, multiplicity=ev[BL].nLepFromTop, weight=weight_BL) output['nLepFromTau'].fill(dataset=dataset, multiplicity=ev.nLepFromTau[BL], weight=weight_BL) output['nLepFromZ'].fill(dataset=dataset, multiplicity=ev.nLepFromZ[BL], weight=weight_BL) output['nLepFromW'].fill(dataset=dataset, multiplicity=ev.nLepFromW[BL], weight=weight_BL) output['nGenTau'].fill(dataset=dataset, multiplicity=ev.nGenTau[BL], weight=weight_BL) output['nGenL'].fill(dataset=dataset, multiplicity=ak.num(ev.GenL[BL], axis=1), weight=weight_BL) output['chargeFlip_vs_nonprompt'].fill(dataset=dataset, n1=n_chargeflip[BL], n2=n_nonprompt[BL], n_ele=ak.num(electron)[BL], weight=weight_BL) output['MET'].fill( dataset = dataset, pt = ev.MET[BL].pt, phi = ev.MET[BL].phi, weight = weight_BL ) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): output['lead_gen_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(leading_gen_lep[BL].pt)), eta = ak.to_numpy(ak.flatten(leading_gen_lep[BL].eta)), phi = ak.to_numpy(ak.flatten(leading_gen_lep[BL].phi)), weight = weight_BL ) output['trail_gen_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(trailing_gen_lep[BL].pt)), eta = ak.to_numpy(ak.flatten(trailing_gen_lep[BL].eta)), phi = ak.to_numpy(ak.flatten(trailing_gen_lep[BL].phi)), weight = weight_BL ) output['lead_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(leading_lepton[BL].pt)), eta = ak.to_numpy(ak.flatten(leading_lepton[BL].eta)), phi = ak.to_numpy(ak.flatten(leading_lepton[BL].phi)), weight = weight_BL ) output['trail_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(trailing_lepton[BL].pt)), eta = ak.to_numpy(ak.flatten(trailing_lepton[BL].eta)), phi = ak.to_numpy(ak.flatten(trailing_lepton[BL].phi)), weight = weight_BL ) output['j1'].fill( dataset = dataset, pt = ak.flatten(jet.pt_nom[:, 0:1][BL]), eta = ak.flatten(jet.eta[:, 0:1][BL]), phi = ak.flatten(jet.phi[:, 0:1][BL]), weight = weight_BL ) output['j2'].fill( dataset = dataset, pt = ak.flatten(jet[:, 1:2][BL].pt_nom), eta = ak.flatten(jet[:, 1:2][BL].eta), phi = ak.flatten(jet[:, 1:2][BL].phi), weight = weight_BL ) output['j3'].fill( dataset = dataset, pt = ak.flatten(jet[:, 2:3][BL].pt_nom), eta = ak.flatten(jet[:, 2:3][BL].eta), phi = ak.flatten(jet[:, 2:3][BL].phi), weight = weight_BL ) output['fwd_jet'].fill( dataset = dataset, pt = ak.flatten(j_fwd[BL].pt), eta = ak.flatten(j_fwd[BL].eta), phi = ak.flatten(j_fwd[BL].phi), weight = weight_BL ) output['high_p_fwd_p'].fill(dataset=dataset, p = ak.flatten(j_fwd[BL].p), weight = weight_BL) return output
def process(self, events): output = self.accumulator.identity() # use a very loose preselection to filter the events presel = ak.num(events.Jet) > 2 ev = events[presel] dataset = ev.metadata['dataset'] # load the config - probably not needed anymore cfg = loadConfig() output['totalEvents']['all'] += len(events) output['skimmedEvents']['all'] += len(ev) ## Muons muon = Collections(ev, "Muon", "tightTTH").get() vetomuon = Collections(ev, "Muon", "vetoTTH").get() dimuon = choose(muon, 2) SSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge) > 0, axis=1) OSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge) < 0, axis=1) leading_muon_idx = ak.singletons(ak.argmax(muon.pt, axis=1)) leading_muon = muon[leading_muon_idx] ## Electrons electron = Collections(ev, "Electron", "tightTTH").get() vetoelectron = Collections(ev, "Electron", "vetoTTH").get() dielectron = choose(electron, 2) SSelectron = ak.any( (dielectron['0'].charge * dielectron['1'].charge) > 0, axis=1) OSelectron = ak.any( (dielectron['0'].charge * dielectron['1'].charge) < 0, axis=1) leading_electron_idx = ak.singletons(ak.argmax(electron.pt, axis=1)) leading_electron = electron[leading_electron_idx] ## Merge electrons and muons - this should work better now in ak1 lepton = ak.concatenate([muon, electron], axis=1) dilepton = cross(muon, electron) SSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge) > 0, axis=1) OSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge) < 0, axis=1) leading_lepton_idx = ak.singletons(ak.argmax(lepton.pt, axis=1)) leading_lepton = lepton[leading_lepton_idx] trailing_lepton_idx = ak.singletons(ak.argmin(lepton.pt, axis=1)) trailing_lepton = lepton[trailing_lepton_idx] ## Jets jet = getJets(ev, minPt=25, maxEta=4.7, pt_var='pt_nom') jet = jet[ak.argsort( jet.pt_nom, ascending=False )] # need to sort wrt smeared and recorrected jet pt jet = jet[~match(jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons jet = jet[~match( jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons central = jet[(abs(jet.eta) < 2.4)] btag = getBTagsDeepFlavB( jet, year=self.year) # should study working point for DeepJet light = getBTagsDeepFlavB(jet, year=self.year, invert=True) fwd = getFwdJet(light) fwd_noPU = getFwdJet(light, puId=False) ## forward jets high_p_fwd = fwd[ak.singletons(ak.argmax( fwd.p, axis=1))] # highest momentum spectator high_pt_fwd = fwd[ak.singletons(ak.argmax( fwd.pt_nom, axis=1))] # highest transverse momentum spectator high_eta_fwd = fwd[ak.singletons(ak.argmax(abs( fwd.eta), axis=1))] # most forward spectator ## Get the two leading b-jets in terms of btag score high_score_btag = central[ak.argsort(central.btagDeepFlavB)][:, :2] jf = cross(high_p_fwd, jet) mjf = (jf['0'] + jf['1']).mass deltaEta = abs(high_p_fwd.eta - jf[ak.singletons(ak.argmax(mjf, axis=1))]['1'].eta) deltaEtaMax = ak.max(deltaEta, axis=1) mjf_max = ak.max(mjf, axis=1) jj = choose(jet, 2) mjj_max = ak.max((jj['0'] + jj['1']).mass, axis=1) ## MET -> can switch to puppi MET met_pt = ev.MET.pt met_phi = ev.MET.phi ## other variables ht = ak.sum(jet.pt, axis=1) st = met_pt + ht + ak.sum(muon.pt, axis=1) + ak.sum(electron.pt, axis=1) ht_central = ak.sum(central.pt, axis=1) ## event selectors filters = getFilters(ev, year=self.year, dataset=dataset) triggers = getTriggers(ev, year=self.year, dataset=dataset) dilep = ((ak.num(electron) == 1) & (ak.num(muon) == 1)) lep0pt = ((ak.num(electron[(electron.pt > 25)]) + ak.num(muon[(muon.pt > 25)])) > 0) lep1pt = ((ak.num(electron[(electron.pt > 20)]) + ak.num(muon[(muon.pt > 20)])) > 1) lepveto = ((ak.num(vetoelectron) + ak.num(vetomuon)) == 2) # define the weight weight = Weights(len(ev)) if not dataset == 'MuonEG': # lumi weight weight.add("weight", ev.weight * cfg['lumi'][self.year]) # PU weight - not in the babies... weight.add("PU", ev.puWeight, weightUp=ev.puWeightUp, weightDown=ev.puWeightDown, shift=False) # b-tag SFs weight.add("btag", self.btagSF.Method1a(btag, light)) # lepton SFs weight.add("lepton", self.leptonSF.get(electron, muon)) selection = PackedSelection() selection.add('lepveto', lepveto) selection.add('dilep', dilep) selection.add('trigger', (triggers)) selection.add('filter', (filters)) selection.add('p_T(lep0)>25', lep0pt) selection.add('p_T(lep1)>20', lep1pt) selection.add('OS', OSlepton) selection.add('N_btag=2', (ak.num(btag) == 2)) selection.add('N_jet>2', (ak.num(jet) >= 3)) selection.add('N_central>1', (ak.num(central) >= 2)) selection.add('N_fwd>0', (ak.num(fwd) >= 1)) selection.add('MET>30', (ev.MET.pt > 30)) os_reqs = [ 'lepveto', 'dilep', 'trigger', 'filter', 'p_T(lep0)>25', 'p_T(lep1)>20', 'OS' ] bl_reqs = os_reqs + [ 'N_btag=2', 'N_jet>2', 'N_central>1', 'N_fwd>0', 'MET>30' ] os_reqs_d = {sel: True for sel in os_reqs} os_selection = selection.require(**os_reqs_d) bl_reqs_d = {sel: True for sel in bl_reqs} BL = selection.require(**bl_reqs_d) cutflow = Cutflow(output, ev, weight=weight) cutflow_reqs_d = {} for req in bl_reqs: cutflow_reqs_d.update({req: True}) cutflow.addRow(req, selection.require(**cutflow_reqs_d)) # first, make a few super inclusive plots output['PV_npvs'].fill(dataset=dataset, multiplicity=ev.PV[os_selection].npvs, weight=weight.weight()[os_selection]) output['PV_npvsGood'].fill(dataset=dataset, multiplicity=ev.PV[os_selection].npvsGood, weight=weight.weight()[os_selection]) output['N_jet'].fill(dataset=dataset, multiplicity=ak.num(jet)[os_selection], weight=weight.weight()[os_selection]) output['N_b'].fill(dataset=dataset, multiplicity=ak.num(btag)[os_selection], weight=weight.weight()[os_selection]) output['N_central'].fill(dataset=dataset, multiplicity=ak.num(central)[os_selection], weight=weight.weight()[os_selection]) output['N_ele'].fill(dataset=dataset, multiplicity=ak.num(electron)[os_selection], weight=weight.weight()[os_selection]) output['N_mu'].fill(dataset=dataset, multiplicity=ak.num(electron)[os_selection], weight=weight.weight()[os_selection]) output['N_fwd'].fill(dataset=dataset, multiplicity=ak.num(fwd)[os_selection], weight=weight.weight()[os_selection]) output['MET'].fill(dataset=dataset, pt=ev.MET[os_selection].pt, phi=ev.MET[os_selection].phi, weight=weight.weight()[os_selection]) output['electron'].fill(dataset=dataset, pt=ak.to_numpy(ak.flatten(electron[BL].pt)), eta=ak.to_numpy(ak.flatten(electron[BL].eta)), phi=ak.to_numpy(ak.flatten(electron[BL].phi)), weight=weight.weight()[BL]) output['muon'].fill(dataset=dataset, pt=ak.to_numpy(ak.flatten(muon[BL].pt)), eta=ak.to_numpy(ak.flatten(muon[BL].eta)), phi=ak.to_numpy(ak.flatten(muon[BL].phi)), weight=weight.weight()[BL]) output['lead_lep'].fill( dataset=dataset, pt=ak.to_numpy(ak.flatten(leading_lepton[BL].pt)), eta=ak.to_numpy(ak.flatten(leading_lepton[BL].eta)), phi=ak.to_numpy(ak.flatten(leading_lepton[BL].phi)), weight=weight.weight()[BL]) output['trail_lep'].fill( dataset=dataset, pt=ak.to_numpy(ak.flatten(trailing_lepton[BL].pt)), eta=ak.to_numpy(ak.flatten(trailing_lepton[BL].eta)), phi=ak.to_numpy(ak.flatten(trailing_lepton[BL].phi)), weight=weight.weight()[BL]) output['fwd_jet'].fill(dataset=dataset, pt=ak.flatten(high_p_fwd[BL].pt_nom), eta=ak.flatten(high_p_fwd[BL].eta), phi=ak.flatten(high_p_fwd[BL].phi), weight=weight.weight()[BL]) output['b1'].fill(dataset=dataset, pt=ak.flatten(high_score_btag[:, 0:1][BL].pt_nom), eta=ak.flatten(high_score_btag[:, 0:1][BL].eta), phi=ak.flatten(high_score_btag[:, 0:1][BL].phi), weight=weight.weight()[BL]) output['b2'].fill(dataset=dataset, pt=ak.flatten(high_score_btag[:, 1:2][BL].pt_nom), eta=ak.flatten(high_score_btag[:, 1:2][BL].eta), phi=ak.flatten(high_score_btag[:, 1:2][BL].phi), weight=weight.weight()[BL]) output['j1'].fill(dataset=dataset, pt=ak.flatten(jet.pt_nom[:, 0:1][BL]), eta=ak.flatten(jet.eta[:, 0:1][BL]), phi=ak.flatten(jet.phi[:, 0:1][BL]), weight=weight.weight()[BL]) output['j2'].fill(dataset=dataset, pt=ak.flatten(jet[:, 1:2][BL].pt_nom), eta=ak.flatten(jet[:, 1:2][BL].eta), phi=ak.flatten(jet[:, 1:2][BL].phi), weight=weight.weight()[BL]) output['j3'].fill(dataset=dataset, pt=ak.flatten(jet[:, 2:3][BL].pt_nom), eta=ak.flatten(jet[:, 2:3][BL].eta), phi=ak.flatten(jet[:, 2:3][BL].phi), weight=weight.weight()[BL]) # Now, take care of systematic unceratinties if not dataset == 'MuonEG': alljets = getJets(ev, minPt=0, maxEta=4.7) alljets = alljets[(alljets.jetId > 1)] for var in self.variations: # get the collections that change with the variations jet = getPtEtaPhi(alljets, pt_var=var) jet = jet[(jet.pt > 25)] jet = jet[~match( jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons jet = jet[~match( jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons central = jet[(abs(jet.eta) < 2.4)] btag = getBTagsDeepFlavB( jet, year=self.year) # should study working point for DeepJet light = getBTagsDeepFlavB(jet, year=self.year, invert=True) fwd = getFwdJet(light) fwd_noPU = getFwdJet(light, puId=False) ## forward jets high_p_fwd = fwd[ak.singletons(ak.argmax( fwd.p, axis=1))] # highest momentum spectator high_pt_fwd = fwd[ak.singletons(ak.argmax( fwd.pt, axis=1))] # highest transverse momentum spectator high_eta_fwd = fwd[ak.singletons( ak.argmax(abs(fwd.eta), axis=1))] # most forward spectator ## Get the two leading b-jets in terms of btag score high_score_btag = central[ak.argsort( central.btagDeepFlavB)][:, :2] # get the modified selection -> more difficult selection.add('N_jet>2_' + var, (ak.num(jet.pt) >= 3)) # stupid bug here... selection.add('N_btag=2_' + var, (ak.num(btag) == 2)) selection.add('N_central>1_' + var, (ak.num(central) >= 2)) selection.add('N_fwd>0_' + var, (ak.num(fwd) >= 1)) selection.add('MET>30_' + var, (getattr(ev.MET, var) > 30)) ## Don't change the selection for now... bl_reqs = os_reqs + [ 'N_jet>2_' + var, 'MET>30_' + var, 'N_btag=2_' + var, 'N_central>1_' + var, 'N_fwd>0_' + var ] bl_reqs_d = {sel: True for sel in bl_reqs} BL = selection.require(**bl_reqs_d) # the OS selection remains unchanged output['N_jet_' + var].fill( dataset=dataset, multiplicity=ak.num(jet)[os_selection], weight=weight.weight()[os_selection]) output['N_fwd_' + var].fill( dataset=dataset, multiplicity=ak.num(fwd)[os_selection], weight=weight.weight()[os_selection]) output['N_b_' + var].fill( dataset=dataset, multiplicity=ak.num(btag)[os_selection], weight=weight.weight()[os_selection]) output['N_central_' + var].fill( dataset=dataset, multiplicity=ak.num(central)[os_selection], weight=weight.weight()[os_selection]) # We don't need to redo all plots with variations. E.g., just add uncertainties to the jet plots. output['j1_' + var].fill(dataset=dataset, pt=ak.flatten(jet.pt[:, 0:1][BL]), eta=ak.flatten(jet.eta[:, 0:1][BL]), phi=ak.flatten(jet.phi[:, 0:1][BL]), weight=weight.weight()[BL]) output['b1_' + var].fill( dataset=dataset, pt=ak.flatten(high_score_btag[:, 0:1].pt[:, 0:1][BL]), eta=ak.flatten(high_score_btag[:, 0:1].eta[:, 0:1][BL]), phi=ak.flatten(high_score_btag[:, 0:1].phi[:, 0:1][BL]), weight=weight.weight()[BL]) output['fwd_jet_' + var].fill( dataset=dataset, pt=ak.flatten(high_p_fwd[BL].pt), eta=ak.flatten(high_p_fwd[BL].eta), phi=ak.flatten(high_p_fwd[BL].phi), weight=weight.weight()[BL]) output['MET_' + var].fill(dataset=dataset, pt=getattr(ev.MET, var)[os_selection], phi=ev.MET[os_selection].phi, weight=weight.weight()[os_selection]) return output
def process(self, events): output = self.accumulator.identity() # use a very loose preselection to filter the events presel = ak.num(events.Jet) > 2 ev = events[presel] dataset = ev.metadata['dataset'] # load the config - probably not needed anymore cfg = loadConfig() output['totalEvents']['all'] += len(events) output['skimmedEvents']['all'] += len(ev) ## Muons muon = Collections(ev, "Muon", "tightSSTTH").get() vetomuon = Collections(ev, "Muon", "vetoTTH").get() dimuon = choose(muon, 2) SSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge) > 0, axis=1) OSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge) < 0, axis=1) leading_muon_idx = ak.singletons(ak.argmax(muon.pt, axis=1)) leading_muon = muon[leading_muon_idx] ## Electrons electron = Collections(ev, "Electron", "tightSSTTH").get() vetoelectron = Collections(ev, "Electron", "vetoTTH").get() dielectron = choose(electron, 2) SSelectron = ak.any( (dielectron['0'].charge * dielectron['1'].charge) > 0, axis=1) OSelectron = ak.any( (dielectron['0'].charge * dielectron['1'].charge) < 0, axis=1) leading_electron_idx = ak.singletons(ak.argmax(electron.pt, axis=1)) leading_electron = electron[leading_electron_idx] ## Merge electrons and muons - this should work better now in ak1 lepton = ak.concatenate([muon, electron], axis=1) dilepton = cross(muon, electron) SSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge) > 0, axis=1) OSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge) < 0, axis=1) leading_lepton_idx = ak.singletons(ak.argmax(lepton.pt, axis=1)) leading_lepton = lepton[leading_lepton_idx] trailing_lepton_idx = ak.singletons(ak.argmin(lepton.pt, axis=1)) trailing_lepton = lepton[trailing_lepton_idx] second_lepton = lepton[~(trailing_lepton_idx & leading_lepton_idx)] ## Jets jet = getJets(ev, minPt=25, maxEta=4.7, pt_var='pt_nom') jet = jet[ak.argsort( jet.pt_nom, ascending=False )] # need to sort wrt smeared and recorrected jet pt jet = jet[~match(jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons jet = jet[~match( jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons central = jet[(abs(jet.eta) < 2.4)] btag = getBTagsDeepFlavB( jet, year=self.year) # should study working point for DeepJet light = getBTagsDeepFlavB(jet, year=self.year, invert=True) fwd = getFwdJet(light) fwd_noPU = getFwdJet(light, puId=False) ## forward jets high_p_fwd = fwd[ak.singletons(ak.argmax( fwd.p, axis=1))] # highest momentum spectator high_pt_fwd = fwd[ak.singletons(ak.argmax( fwd.pt_nom, axis=1))] # highest transverse momentum spectator high_eta_fwd = fwd[ak.singletons(ak.argmax(abs( fwd.eta), axis=1))] # most forward spectator ## Get the two leading b-jets in terms of btag score high_score_btag = central[ak.argsort(central.btagDeepFlavB)][:, :2] jf = cross(high_p_fwd, jet) mjf = (jf['0'] + jf['1']).mass deltaEta = abs(high_p_fwd.eta - jf[ak.singletons(ak.argmax(mjf, axis=1))]['1'].eta) deltaEtaMax = ak.max(deltaEta, axis=1) mjf_max = ak.max(mjf, axis=1) jj = choose(jet, 2) mjj_max = ak.max((jj['0'] + jj['1']).mass, axis=1) ## MET -> can switch to puppi MET met_pt = ev.MET.pt met_phi = ev.MET.phi ## other variables ht = ak.sum(jet.pt, axis=1) st = met_pt + ht + ak.sum(muon.pt, axis=1) + ak.sum(electron.pt, axis=1) lt = met_pt + ak.sum(muon.pt, axis=1) + ak.sum(electron.pt, axis=1) ht_central = ak.sum(central.pt, axis=1) # define the weight weight = Weights(len(ev)) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): # lumi weight weight.add("weight", ev.weight * cfg['lumi'][self.year]) # PU weight - not in the babies... weight.add("PU", ev.puWeight, weightUp=ev.puWeightUp, weightDown=ev.puWeightDown, shift=False) # b-tag SFs weight.add( "btag", self.btagSF.Method1a(btag, light, b_direction='central', c_direction='central')) # lepton SFs weight.add("lepton", self.leptonSF.get(electron, muon)) sel = Selection( dataset=dataset, events=ev, year=self.year, ele=electron, ele_veto=vetoelectron, mu=muon, mu_veto=vetomuon, jet_all=jet, jet_central=central, jet_btag=btag, jet_fwd=fwd, met=ev.MET, ) BL = sel.dilep_baseline(SS=False) BL_minusNb = sel.dilep_baseline(SS=False, omit=['N_btag>0']) output['N_b'].fill(dataset=dataset, multiplicity=ak.num(btag)[BL_minusNb], weight=weight.weight()[BL_minusNb]) if re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): #rle = ak.to_numpy(ak.zip([ev.run, ev.luminosityBlock, ev.event])) run_ = ak.to_numpy(ev.run) lumi_ = ak.to_numpy(ev.luminosityBlock) event_ = ak.to_numpy(ev.event) output['%s_run' % dataset] += processor.column_accumulator( run_[BL]) output['%s_lumi' % dataset] += processor.column_accumulator( lumi_[BL]) output['%s_event' % dataset] += processor.column_accumulator( event_[BL]) # Now, take care of systematic unceratinties if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): alljets = getJets(ev, minPt=0, maxEta=4.7) alljets = alljets[(alljets.jetId > 1)] for var in self.variations: # get the collections that change with the variations btag = getBTagsDeepFlavB( jet, year=self.year) # should study working point for DeepJet weight = Weights(len(ev)) weight.add("weight", ev.weight * cfg['lumi'][self.year]) weight.add("PU", ev.puWeight, weightUp=ev.puWeightUp, weightDown=ev.puWeightDown, shift=False) if var == 'centralUp': weight.add( "btag", self.btagSF.Method1a(btag, light, b_direction='central', c_direction='up')) elif var == 'centralDown': weight.add( "btag", self.btagSF.Method1a(btag, light, b_direction='central', c_direction='down')) elif var == 'upCentral': weight.add( "btag", self.btagSF.Method1a(btag, light, b_direction='up', c_direction='central')) elif var == 'downCentral': weight.add( "btag", self.btagSF.Method1a(btag, light, b_direction='down', c_direction='central')) weight.add("lepton", self.leptonSF.get(electron, muon)) met = ev.MET sel = Selection( dataset=dataset, events=ev, year=self.year, ele=electron, ele_veto=vetoelectron, mu=muon, mu_veto=vetomuon, jet_all=jet, jet_central=central, jet_btag=btag, jet_fwd=fwd, met=met, ) BL = sel.dilep_baseline(SS=False) BL_minusNb = sel.dilep_baseline(SS=False, omit=['N_btag>0']) output['N_b_' + var].fill( dataset=dataset, multiplicity=ak.num(btag)[BL_minusNb], weight=weight.weight()[BL_minusNb]) return output
events = NanoEventsFactory.from_root(args.file, schemaclass=HackSchema).events() jets = events.Jet jetSel = (jets.pt > 30) & (abs(jets.eta) < 2.4) tightJet = jets[jetSel] bJet = tightJet[tightJet.btagDeepFlavB > 0.642] muons = events.Muon muonSel = (muons.pt > 30) & (abs(muons.eta) < 2.4) tightMuon = muons[muonSel] ele = events.Electron eleSel = (ele.pt > 35) & (abs(ele.eta) < 2.4) tightEle = ele[eleSel] eventSel = (((ak.num(tightMuon) == 1) | (ak.num(tightEle) == 1)) & (ak.num(tightJet) >= 3) & (ak.num(bJet) >= 1)) final = events[eventSel] print("initial cuts made") #Finding Gen Part Events genPart = final.GenPart tops = genPart[abs(genPart.pdgId) == 6] #The isLastCopy Flag filters out copy Genparticles: tops = tops[tops.hasFlags('isLastCopy')] tDecay = tops.distinctChildren tDecay = tDecay[tDecay.hasFlags('isLastCopy')] t_Events = tDecay[abs(tDecay.pdgId) == 5] W = tDecay[abs(tDecay.pdgId) == 24] W = W[W.hasFlags('isLastCopy')]
def process(self, events): output = self.accumulator.identity() # use a very loose preselection to filter the events presel = ak.num(events.Jet)>2 ev = events[presel] dataset = ev.metadata['dataset'] # load the config - probably not needed anymore cfg = loadConfig() output['totalEvents']['all'] += len(events) output['skimmedEvents']['all'] += len(ev) ## Muons muon = Collections(ev, "Muon", "tightSSTTH").get() vetomuon = Collections(ev, "Muon", "vetoTTH").get() dimuon = choose(muon, 2) SSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge)>0, axis=1) OSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge)<0, axis=1) leading_muon_idx = ak.singletons(ak.argmax(muon.pt, axis=1)) leading_muon = muon[leading_muon_idx] ## Electrons electron = Collections(ev, "Electron", "tightSSTTH").get() vetoelectron = Collections(ev, "Electron", "vetoTTH").get() dielectron = choose(electron, 2) SSelectron = ak.any((dielectron['0'].charge * dielectron['1'].charge)>0, axis=1) OSelectron = ak.any((dielectron['0'].charge * dielectron['1'].charge)<0, axis=1) leading_electron_idx = ak.singletons(ak.argmax(electron.pt, axis=1)) leading_electron = electron[leading_electron_idx] ## Merge electrons and muons - this should work better now in ak1 lepton = ak.concatenate([muon, electron], axis=1) dilepton = cross(muon, electron) SSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge)>0, axis=1) OSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge)<0, axis=1) leading_lepton_idx = ak.singletons(ak.argmax(lepton.pt, axis=1)) leading_lepton = lepton[leading_lepton_idx] trailing_lepton_idx = ak.singletons(ak.argmin(lepton.pt, axis=1)) trailing_lepton = lepton[trailing_lepton_idx] ## Jets jet = getJets(ev, minPt=25, maxEta=4.7, pt_var='pt_nom') jet = jet[ak.argsort(jet.pt_nom, ascending=False)] # need to sort wrt smeared and recorrected jet pt jet = jet[~match(jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons jet = jet[~match(jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons central = jet[(abs(jet.eta)<2.4)] btag = getBTagsDeepFlavB(jet, year=self.year) # should study working point for DeepJet light = getBTagsDeepFlavB(jet, year=self.year, invert=True) fwd = getFwdJet(light) fwd_noPU = getFwdJet(light, puId=False) ## forward jets high_p_fwd = fwd[ak.singletons(ak.argmax(fwd.p, axis=1))] # highest momentum spectator high_pt_fwd = fwd[ak.singletons(ak.argmax(fwd.pt_nom, axis=1))] # highest transverse momentum spectator high_eta_fwd = fwd[ak.singletons(ak.argmax(abs(fwd.eta), axis=1))] # most forward spectator ## Get the two leading b-jets in terms of btag score high_score_btag = central[ak.argsort(central.btagDeepFlavB)][:,:2] jf = cross(high_p_fwd, jet) mjf = (jf['0']+jf['1']).mass deltaEta = abs(high_p_fwd.eta - jf[ak.singletons(ak.argmax(mjf, axis=1))]['1'].eta) deltaEtaMax = ak.max(deltaEta, axis=1) mjf_max = ak.max(mjf, axis=1) jj = choose(jet, 2) mjj_max = ak.max((jj['0']+jj['1']).mass, axis=1) ## MET -> can switch to puppi MET met_pt = ev.MET.pt met_phi = ev.MET.phi ## other variables ht = ak.sum(jet.pt, axis=1) st = met_pt + ht + ak.sum(muon.pt, axis=1) + ak.sum(electron.pt, axis=1) ht_central = ak.sum(central.pt, axis=1) # define the weight weight = Weights( len(ev) ) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): # lumi weight weight.add("weight", ev.weight*cfg['lumi'][self.year]) # PU weight - not in the babies... weight.add("PU", ev.puWeight, weightUp=ev.puWeightUp, weightDown=ev.puWeightDown, shift=False) # b-tag SFs weight.add("btag", self.btagSF.Method1a(btag, light)) # lepton SFs weight.add("lepton", self.leptonSF.get(electron, muon)) cutflow = Cutflow(output, ev, weight=weight) sel = Selection( dataset = dataset, events = ev, year = self.year, ele = electron, ele_veto = vetoelectron, mu = muon, mu_veto = vetomuon, jet_all = jet, jet_central = central, jet_btag = btag, jet_fwd = fwd, met = ev.MET, ) BL = sel.dilep_baseline(cutflow=cutflow, SS=False) # first, make a few super inclusive plots output['PV_npvs'].fill(dataset=dataset, multiplicity=ev.PV[BL].npvs, weight=weight.weight()[BL]) output['PV_npvsGood'].fill(dataset=dataset, multiplicity=ev.PV[BL].npvsGood, weight=weight.weight()[BL]) output['N_jet'].fill(dataset=dataset, multiplicity=ak.num(jet)[BL], weight=weight.weight()[BL]) BL_minusNb = sel.dilep_baseline(SS=False, omit=['N_btag>0']) output['N_b'].fill(dataset=dataset, multiplicity=ak.num(btag)[BL_minusNb], weight=weight.weight()[BL_minusNb]) output['N_central'].fill(dataset=dataset, multiplicity=ak.num(central)[BL], weight=weight.weight()[BL]) output['N_ele'].fill(dataset=dataset, multiplicity=ak.num(electron)[BL], weight=weight.weight()[BL]) output['N_mu'].fill(dataset=dataset, multiplicity=ak.num(electron)[BL], weight=weight.weight()[BL]) BL_minusFwd = sel.dilep_baseline(SS=False, omit=['N_fwd>0']) output['N_fwd'].fill(dataset=dataset, multiplicity=ak.num(fwd)[BL_minusFwd], weight=weight.weight()[BL_minusFwd]) BL_minusMET = sel.dilep_baseline(SS=False, omit=['MET>50']) output['MET'].fill( dataset = dataset, pt = ev.MET[BL_minusMET].pt, phi = ev.MET[BL_minusMET].phi, weight = weight.weight()[BL_minusMET] ) #output['electron'].fill( # dataset = dataset, # pt = ak.to_numpy(ak.flatten(electron[BL].pt)), # eta = ak.to_numpy(ak.flatten(electron[BL].eta)), # phi = ak.to_numpy(ak.flatten(electron[BL].phi)), # weight = weight.weight()[BL] #) # #output['muon'].fill( # dataset = dataset, # pt = ak.to_numpy(ak.flatten(muon[BL].pt)), # eta = ak.to_numpy(ak.flatten(muon[BL].eta)), # phi = ak.to_numpy(ak.flatten(muon[BL].phi)), # weight = weight.weight()[BL] #) output['lead_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(leading_lepton[BL].pt)), eta = ak.to_numpy(ak.flatten(leading_lepton[BL].eta)), phi = ak.to_numpy(ak.flatten(leading_lepton[BL].phi)), weight = weight.weight()[BL] ) output['trail_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(trailing_lepton[BL].pt)), eta = ak.to_numpy(ak.flatten(trailing_lepton[BL].eta)), phi = ak.to_numpy(ak.flatten(trailing_lepton[BL].phi)), weight = weight.weight()[BL] ) output['fwd_jet'].fill( dataset = dataset, pt = ak.flatten(high_p_fwd[BL].pt_nom), eta = ak.flatten(high_p_fwd[BL].eta), phi = ak.flatten(high_p_fwd[BL].phi), weight = weight.weight()[BL] ) output['b1'].fill( dataset = dataset, pt = ak.flatten(high_score_btag[:, 0:1][BL].pt_nom), eta = ak.flatten(high_score_btag[:, 0:1][BL].eta), phi = ak.flatten(high_score_btag[:, 0:1][BL].phi), weight = weight.weight()[BL] ) output['b2'].fill( dataset = dataset, pt = ak.flatten(high_score_btag[:, 1:2][BL].pt_nom), eta = ak.flatten(high_score_btag[:, 1:2][BL].eta), phi = ak.flatten(high_score_btag[:, 1:2][BL].phi), weight = weight.weight()[BL] ) output['j1'].fill( dataset = dataset, pt = ak.flatten(jet.pt_nom[:, 0:1][BL]), eta = ak.flatten(jet.eta[:, 0:1][BL]), phi = ak.flatten(jet.phi[:, 0:1][BL]), weight = weight.weight()[BL] ) output['j2'].fill( dataset = dataset, pt = ak.flatten(jet[:, 1:2][BL].pt_nom), eta = ak.flatten(jet[:, 1:2][BL].eta), phi = ak.flatten(jet[:, 1:2][BL].phi), weight = weight.weight()[BL] ) output['j3'].fill( dataset = dataset, pt = ak.flatten(jet[:, 2:3][BL].pt_nom), eta = ak.flatten(jet[:, 2:3][BL].eta), phi = ak.flatten(jet[:, 2:3][BL].phi), weight = weight.weight()[BL] ) if re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): #rle = ak.to_numpy(ak.zip([ev.run, ev.luminosityBlock, ev.event])) run_ = ak.to_numpy(ev.run) lumi_ = ak.to_numpy(ev.luminosityBlock) event_ = ak.to_numpy(ev.event) output['%s_run'%dataset] += processor.column_accumulator(run_[BL]) output['%s_lumi'%dataset] += processor.column_accumulator(lumi_[BL]) output['%s_event'%dataset] += processor.column_accumulator(event_[BL]) # Now, take care of systematic unceratinties if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): alljets = getJets(ev, minPt=0, maxEta=4.7) alljets = alljets[(alljets.jetId>1)] for var in self.variations: # get the collections that change with the variations jet = getPtEtaPhi(alljets, pt_var=var) jet = jet[(jet.pt>25)] jet = jet[~match(jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons jet = jet[~match(jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons central = jet[(abs(jet.eta)<2.4)] btag = getBTagsDeepFlavB(jet, year=self.year) # should study working point for DeepJet light = getBTagsDeepFlavB(jet, year=self.year, invert=True) fwd = getFwdJet(light) fwd_noPU = getFwdJet(light, puId=False) ## forward jets high_p_fwd = fwd[ak.singletons(ak.argmax(fwd.p, axis=1))] # highest momentum spectator high_pt_fwd = fwd[ak.singletons(ak.argmax(fwd.pt, axis=1))] # highest transverse momentum spectator high_eta_fwd = fwd[ak.singletons(ak.argmax(abs(fwd.eta), axis=1))] # most forward spectator ## Get the two leading b-jets in terms of btag score high_score_btag = central[ak.argsort(central.btagDeepFlavB)][:,:2] met = ev.MET met['pt'] = getattr(met, var) sel = Selection( dataset = dataset, events = ev, year = self.year, ele = electron, ele_veto = vetoelectron, mu = muon, mu_veto = vetomuon, jet_all = jet, jet_central = central, jet_btag = btag, jet_fwd = fwd, met = met, ) BL = sel.dilep_baseline(SS=False) # get the modified selection -> more difficult #selection.add('N_jet>2_'+var, (ak.num(jet.pt)>=3)) # stupid bug here... #selection.add('N_btag=2_'+var, (ak.num(btag)==2) ) #selection.add('N_central>1_'+var, (ak.num(central)>=2) ) #selection.add('N_fwd>0_'+var, (ak.num(fwd)>=1) ) #selection.add('MET>30_'+var, (getattr(ev.MET, var)>30) ) ### Don't change the selection for now... #bl_reqs = os_reqs + ['N_jet>2_'+var, 'MET>30_'+var, 'N_btag=2_'+var, 'N_central>1_'+var, 'N_fwd>0_'+var] #bl_reqs_d = { sel: True for sel in bl_reqs } #BL = selection.require(**bl_reqs_d) # the OS selection remains unchanged output['N_jet_'+var].fill(dataset=dataset, multiplicity=ak.num(jet)[BL], weight=weight.weight()[BL]) BL_minusFwd = sel.dilep_baseline(SS=False, omit=['N_fwd>0']) output['N_fwd_'+var].fill(dataset=dataset, multiplicity=ak.num(fwd)[BL_minusFwd], weight=weight.weight()[BL_minusFwd]) BL_minusNb = sel.dilep_baseline(SS=False, omit=['N_btag>0']) output['N_b_'+var].fill(dataset=dataset, multiplicity=ak.num(btag)[BL_minusNb], weight=weight.weight()[BL_minusNb]) output['N_central_'+var].fill(dataset=dataset, multiplicity=ak.num(central)[BL], weight=weight.weight()[BL]) # We don't need to redo all plots with variations. E.g., just add uncertainties to the jet plots. output['j1_'+var].fill( dataset = dataset, pt = ak.flatten(jet.pt[:, 0:1][BL]), eta = ak.flatten(jet.eta[:, 0:1][BL]), phi = ak.flatten(jet.phi[:, 0:1][BL]), weight = weight.weight()[BL] ) output['b1_'+var].fill( dataset = dataset, pt = ak.flatten(high_score_btag[:, 0:1].pt[:, 0:1][BL]), eta = ak.flatten(high_score_btag[:, 0:1].eta[:, 0:1][BL]), phi = ak.flatten(high_score_btag[:, 0:1].phi[:, 0:1][BL]), weight = weight.weight()[BL] ) output['fwd_jet_'+var].fill( dataset = dataset, pt = ak.flatten(high_p_fwd[BL].pt), #p = ak.flatten(high_p_fwd[BL].p), eta = ak.flatten(high_p_fwd[BL].eta), phi = ak.flatten(high_p_fwd[BL].phi), weight = weight.weight()[BL] ) BL_minusMET = sel.dilep_baseline(SS=False, omit=['MET>50']) output['MET_'+var].fill( dataset = dataset, pt = getattr(ev.MET, var)[BL_minusMET], phi = ev.MET[BL_minusMET].phi, weight = weight.weight()[BL_minusMET] ) return output