def embed_crossref(source, idx_name, dest, dest_name): """Embed a cross-reference Parameters ---------- source : ak.Array any array with shape N * var * {record} idx_name : str A field in the source record dest : ak.Array any array with shape N * var * {record}, where: ``ak.max(source[idx_name], axis=1) < ak.num(dest)`` and ``ak.min(source[idx_name], axis=1) >= 0`` """ print(ak.max(source[idx_name], axis=1)) print(ak.num(dest)) print(ak.all(ak.max(source[idx_name], axis=1) < ak.num(dest))) assert ak.all(ak.max(source[idx_name], axis=1) < ak.num(dest)) assert ak.all(ak.min(source[idx_name], axis=1) >= 0) id_global = ak.flatten( source[idx_name] + np.asarray(dest.layout.starts), axis=None ) source[dest_name] = ak.Array( ak.layout.ListOffsetArray64( source.layout.offsets, ak.layout.ListOffsetArray64( source.layout.content.offsets, ak.flatten(dest)[id_global].layout, ), ) )
def test_ByteMaskedArray(): content = awkward1.Array([1.1, 2.2, 3.3, 999, 999, 4.4, 5.5]).layout mask = awkward1.layout.Index8( numpy.array([False, False, False, True, True, False, False])) bytemaskedarray = awkward1.layout.ByteMaskedArray(mask, content, valid_when=False) array = awkward1.Array(bytemaskedarray) assert array.tolist() == [1.1, 2.2, 3.3, None, None, 4.4, 5.5] assert awkward1.max(array, axis=0) == 5.5 assert awkward1.argmax(array, axis=0) == 6 offsets = awkward1.layout.Index64( numpy.array([0, 2, 4, 7], dtype=numpy.int64)) listoffsetarray = awkward1.layout.ListOffsetArray64( offsets, bytemaskedarray) array = awkward1.Array(listoffsetarray) assert array.tolist() == [[1.1, 2.2], [3.3, None], [None, 4.4, 5.5]] assert awkward1.max(array, axis=1) == [2.2, 3.3, 5.5] assert awkward1.argmax(array, axis=1) == [1, 0, 2]
def process(self, events): output = self.accumulator.identity() # use a very loose preselection to filter the events presel = ak.num(events.Jet)>2 ev = events[presel] dataset = ev.metadata['dataset'] # load the config - probably not needed anymore cfg = loadConfig() output['totalEvents']['all'] += len(events) output['skimmedEvents']['all'] += len(ev) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): ## Generated leptons gen_lep = ev.GenL leading_gen_lep = gen_lep[ak.singletons(ak.argmax(gen_lep.pt, axis=1))] trailing_gen_lep = gen_lep[ak.singletons(ak.argmin(gen_lep.pt, axis=1))] ## Muons muon = Collections(ev, "Muon", "tightSSTTH").get() vetomuon = Collections(ev, "Muon", "vetoTTH").get() dimuon = choose(muon, 2) SSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge)>0, axis=1) leading_muon_idx = ak.singletons(ak.argmax(muon.pt, axis=1)) leading_muon = muon[leading_muon_idx] ## Electrons electron = Collections(ev, "Electron", "tightSSTTH").get() vetoelectron = Collections(ev, "Electron", "vetoTTH").get() dielectron = choose(electron, 2) SSelectron = ak.any((dielectron['0'].charge * dielectron['1'].charge)>0, axis=1) leading_electron_idx = ak.singletons(ak.argmax(electron.pt, axis=1)) leading_electron = electron[leading_electron_idx] ## Merge electrons and muons - this should work better now in ak1 dilepton = cross(muon, electron) SSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge)>0, axis=1) lepton = ak.concatenate([muon, electron], axis=1) leading_lepton_idx = ak.singletons(ak.argmax(lepton.pt, axis=1)) leading_lepton = lepton[leading_lepton_idx] trailing_lepton_idx = ak.singletons(ak.argmin(lepton.pt, axis=1)) trailing_lepton = lepton[trailing_lepton_idx] dilepton_mass = (leading_lepton+trailing_lepton).mass dilepton_pt = (leading_lepton+trailing_lepton).pt dilepton_dR = delta_r(leading_lepton, trailing_lepton) lepton_pdgId_pt_ordered = ak.fill_none(ak.pad_none(lepton[ak.argsort(lepton.pt, ascending=False)].pdgId, 2, clip=True), 0) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): n_nonprompt = getNonPromptFromFlavour(electron) + getNonPromptFromFlavour(muon) n_chargeflip = getChargeFlips(electron, ev.GenPart) + getChargeFlips(muon, ev.GenPart) mt_lep_met = mt(lepton.pt, lepton.phi, ev.MET.pt, ev.MET.phi) min_mt_lep_met = ak.min(mt_lep_met, axis=1) ## Tau and other stuff tau = getTaus(ev) track = getIsoTracks(ev) ## Jets jet = getJets(ev, minPt=25, maxEta=4.7, pt_var='pt_nom') jet = jet[ak.argsort(jet.pt_nom, ascending=False)] # need to sort wrt smeared and recorrected jet pt jet = jet[~match(jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons jet = jet[~match(jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons central = jet[(abs(jet.eta)<2.4)] btag = getBTagsDeepFlavB(jet, year=self.year) # should study working point for DeepJet light = getBTagsDeepFlavB(jet, year=self.year, invert=True) fwd = getFwdJet(light) fwd_noPU = getFwdJet(light, puId=False) high_score_btag = central[ak.argsort(central.btagDeepFlavB)][:,:2] bl = cross(lepton, high_score_btag) bl_dR = delta_r(bl['0'], bl['1']) min_bl_dR = ak.min(bl_dR, axis=1) ## forward jets j_fwd = fwd[ak.singletons(ak.argmax(fwd.p, axis=1))] # highest momentum spectator jf = cross(j_fwd, jet) mjf = (jf['0']+jf['1']).mass j_fwd2 = jf[ak.singletons(ak.argmax(mjf, axis=1))]['1'] # this is the jet that forms the largest invariant mass with j_fwd delta_eta = abs(j_fwd2.eta - j_fwd.eta) ## MET -> can switch to puppi MET met_pt = ev.MET.pt met_phi = ev.MET.phi ## other variables ht = ak.sum(jet.pt, axis=1) st = met_pt + ht + ak.sum(muon.pt, axis=1) + ak.sum(electron.pt, axis=1) # define the weight weight = Weights( len(ev) ) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): # lumi weight weight.add("weight", ev.weight*cfg['lumi'][self.year]) #weight.add("weight", ev.genWeight*cfg['lumi'][self.year]*mult) # PU weight - not in the babies... weight.add("PU", ev.puWeight, weightUp=ev.puWeightUp, weightDown=ev.puWeightDown, shift=False) # b-tag SFs weight.add("btag", self.btagSF.Method1a(btag, light)) # lepton SFs weight.add("lepton", self.leptonSF.get(electron, muon)) cutflow = Cutflow(output, ev, weight=weight) sel = Selection( dataset = dataset, events = ev, year = self.year, ele = electron, ele_veto = vetoelectron, mu = muon, mu_veto = vetomuon, jet_all = jet, jet_central = central, jet_btag = btag, jet_fwd = fwd, met = ev.MET, ) BL = sel.dilep_baseline(cutflow=cutflow, SS=True) weight_BL = weight.weight()[BL] if True: # define the inputs to the NN # this is super stupid. there must be a better way. NN_inputs = np.stack([ ak.to_numpy(ak.num(jet[BL])), ak.to_numpy(ak.num(tau[BL])), ak.to_numpy(ak.num(track[BL])), ak.to_numpy(st[BL]), ak.to_numpy(ev.MET[BL].pt), ak.to_numpy(ak.max(mjf[BL], axis=1)), ak.to_numpy(pad_and_flatten(delta_eta[BL])), ak.to_numpy(pad_and_flatten(leading_lepton[BL].pt)), ak.to_numpy(pad_and_flatten(leading_lepton[BL].eta)), ak.to_numpy(pad_and_flatten(trailing_lepton[BL].pt)), ak.to_numpy(pad_and_flatten(trailing_lepton[BL].eta)), ak.to_numpy(pad_and_flatten(dilepton_mass[BL])), ak.to_numpy(pad_and_flatten(dilepton_pt[BL])), ak.to_numpy(pad_and_flatten(j_fwd[BL].pt)), ak.to_numpy(pad_and_flatten(j_fwd[BL].p)), ak.to_numpy(pad_and_flatten(j_fwd[BL].eta)), ak.to_numpy(pad_and_flatten(jet[:, 0:1][BL].pt)), ak.to_numpy(pad_and_flatten(jet[:, 1:2][BL].pt)), ak.to_numpy(pad_and_flatten(jet[:, 0:1][BL].eta)), ak.to_numpy(pad_and_flatten(jet[:, 1:2][BL].eta)), ak.to_numpy(pad_and_flatten(high_score_btag[:, 0:1][BL].pt)), ak.to_numpy(pad_and_flatten(high_score_btag[:, 1:2][BL].pt)), ak.to_numpy(pad_and_flatten(high_score_btag[:, 0:1][BL].eta)), ak.to_numpy(pad_and_flatten(high_score_btag[:, 1:2][BL].eta)), ak.to_numpy(min_bl_dR[BL]), ak.to_numpy(min_mt_lep_met[BL]), ]) NN_inputs = np.moveaxis(NN_inputs, 0, 1) model, scaler = load_onnx_model('v8') try: NN_inputs_scaled = scaler.transform(NN_inputs) NN_pred = predict_onnx(model, NN_inputs_scaled) best_score = np.argmax(NN_pred, axis=1) except ValueError: #print ("Empty NN_inputs") NN_pred = np.array([]) best_score = np.array([]) NN_inputs_scaled = NN_inputs #k.clear_session() output['node'].fill(dataset=dataset, multiplicity=best_score, weight=weight_BL) output['node0_score_incl'].fill(dataset=dataset, score=NN_pred[:,0] if np.shape(NN_pred)[0]>0 else np.array([]), weight=weight_BL) output['node0_score'].fill(dataset=dataset, score=NN_pred[best_score==0][:,0] if np.shape(NN_pred)[0]>0 else np.array([]), weight=weight_BL[best_score==0]) output['node1_score'].fill(dataset=dataset, score=NN_pred[best_score==1][:,1] if np.shape(NN_pred)[0]>0 else np.array([]), weight=weight_BL[best_score==1]) output['node2_score'].fill(dataset=dataset, score=NN_pred[best_score==2][:,2] if np.shape(NN_pred)[0]>0 else np.array([]), weight=weight_BL[best_score==2]) output['node3_score'].fill(dataset=dataset, score=NN_pred[best_score==3][:,3] if np.shape(NN_pred)[0]>0 else np.array([]), weight=weight_BL[best_score==3]) output['node4_score'].fill(dataset=dataset, score=NN_pred[best_score==4][:,4] if np.shape(NN_pred)[0]>0 else np.array([]), weight=weight_BL[best_score==4]) SR_sel_pp = ((best_score==0) & ak.flatten((leading_lepton[BL].pdgId<0))) SR_sel_mm = ((best_score==0) & ak.flatten((leading_lepton[BL].pdgId>0))) leading_lepton_BL = leading_lepton[BL] output['lead_lep_SR_pp'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(leading_lepton_BL[SR_sel_pp].pt)), weight = weight_BL[SR_sel_pp] ) output['lead_lep_SR_mm'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(leading_lepton_BL[SR_sel_mm].pt)), weight = weight_BL[SR_sel_mm] ) del model del scaler del NN_inputs, NN_inputs_scaled, NN_pred # first, make a few super inclusive plots output['PV_npvs'].fill(dataset=dataset, multiplicity=ev.PV[BL].npvs, weight=weight_BL) output['PV_npvsGood'].fill(dataset=dataset, multiplicity=ev.PV[BL].npvsGood, weight=weight_BL) output['N_jet'].fill(dataset=dataset, multiplicity=ak.num(jet)[BL], weight=weight_BL) output['N_b'].fill(dataset=dataset, multiplicity=ak.num(btag)[BL], weight=weight_BL) output['N_central'].fill(dataset=dataset, multiplicity=ak.num(central)[BL], weight=weight_BL) output['N_ele'].fill(dataset=dataset, multiplicity=ak.num(electron)[BL], weight=weight_BL) output['N_mu'].fill(dataset=dataset, multiplicity=ak.num(electron)[BL], weight=weight_BL) output['N_fwd'].fill(dataset=dataset, multiplicity=ak.num(fwd)[BL], weight=weight_BL) output['ST'].fill(dataset=dataset, pt=st[BL], weight=weight_BL) output['HT'].fill(dataset=dataset, pt=ht[BL], weight=weight_BL) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): output['nLepFromTop'].fill(dataset=dataset, multiplicity=ev[BL].nLepFromTop, weight=weight_BL) output['nLepFromTau'].fill(dataset=dataset, multiplicity=ev.nLepFromTau[BL], weight=weight_BL) output['nLepFromZ'].fill(dataset=dataset, multiplicity=ev.nLepFromZ[BL], weight=weight_BL) output['nLepFromW'].fill(dataset=dataset, multiplicity=ev.nLepFromW[BL], weight=weight_BL) output['nGenTau'].fill(dataset=dataset, multiplicity=ev.nGenTau[BL], weight=weight_BL) output['nGenL'].fill(dataset=dataset, multiplicity=ak.num(ev.GenL[BL], axis=1), weight=weight_BL) output['chargeFlip_vs_nonprompt'].fill(dataset=dataset, n1=n_chargeflip[BL], n2=n_nonprompt[BL], n_ele=ak.num(electron)[BL], weight=weight_BL) output['MET'].fill( dataset = dataset, pt = ev.MET[BL].pt, phi = ev.MET[BL].phi, weight = weight_BL ) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): output['lead_gen_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(leading_gen_lep[BL].pt)), eta = ak.to_numpy(ak.flatten(leading_gen_lep[BL].eta)), phi = ak.to_numpy(ak.flatten(leading_gen_lep[BL].phi)), weight = weight_BL ) output['trail_gen_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(trailing_gen_lep[BL].pt)), eta = ak.to_numpy(ak.flatten(trailing_gen_lep[BL].eta)), phi = ak.to_numpy(ak.flatten(trailing_gen_lep[BL].phi)), weight = weight_BL ) output['lead_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(leading_lepton[BL].pt)), eta = ak.to_numpy(ak.flatten(leading_lepton[BL].eta)), phi = ak.to_numpy(ak.flatten(leading_lepton[BL].phi)), weight = weight_BL ) output['trail_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(trailing_lepton[BL].pt)), eta = ak.to_numpy(ak.flatten(trailing_lepton[BL].eta)), phi = ak.to_numpy(ak.flatten(trailing_lepton[BL].phi)), weight = weight_BL ) output['j1'].fill( dataset = dataset, pt = ak.flatten(jet.pt_nom[:, 0:1][BL]), eta = ak.flatten(jet.eta[:, 0:1][BL]), phi = ak.flatten(jet.phi[:, 0:1][BL]), weight = weight_BL ) output['j2'].fill( dataset = dataset, pt = ak.flatten(jet[:, 1:2][BL].pt_nom), eta = ak.flatten(jet[:, 1:2][BL].eta), phi = ak.flatten(jet[:, 1:2][BL].phi), weight = weight_BL ) output['j3'].fill( dataset = dataset, pt = ak.flatten(jet[:, 2:3][BL].pt_nom), eta = ak.flatten(jet[:, 2:3][BL].eta), phi = ak.flatten(jet[:, 2:3][BL].phi), weight = weight_BL ) output['fwd_jet'].fill( dataset = dataset, pt = ak.flatten(j_fwd[BL].pt), eta = ak.flatten(j_fwd[BL].eta), phi = ak.flatten(j_fwd[BL].phi), weight = weight_BL ) output['high_p_fwd_p'].fill(dataset=dataset, p = ak.flatten(j_fwd[BL].p), weight = weight_BL) return output
def process(self, events): output = self.accumulator.identity() # use a very loose preselection to filter the events presel = ak.num(events.Jet) > 2 ev = events[presel] dataset = ev.metadata['dataset'] # load the config - probably not needed anymore cfg = loadConfig() output['totalEvents']['all'] += len(events) output['skimmedEvents']['all'] += len(ev) ## Muons muon = Collections(ev, "Muon", "tightSSTTH").get() vetomuon = Collections(ev, "Muon", "vetoTTH").get() dimuon = choose(muon, 2) SSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge) > 0, axis=1) OSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge) < 0, axis=1) leading_muon_idx = ak.singletons(ak.argmax(muon.pt, axis=1)) leading_muon = muon[leading_muon_idx] ## Electrons electron = Collections(ev, "Electron", "tightSSTTH").get() vetoelectron = Collections(ev, "Electron", "vetoTTH").get() dielectron = choose(electron, 2) SSelectron = ak.any( (dielectron['0'].charge * dielectron['1'].charge) > 0, axis=1) OSelectron = ak.any( (dielectron['0'].charge * dielectron['1'].charge) < 0, axis=1) leading_electron_idx = ak.singletons(ak.argmax(electron.pt, axis=1)) leading_electron = electron[leading_electron_idx] ## Merge electrons and muons - this should work better now in ak1 lepton = ak.concatenate([muon, electron], axis=1) dilepton = cross(muon, electron) SSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge) > 0, axis=1) OSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge) < 0, axis=1) leading_lepton_idx = ak.singletons(ak.argmax(lepton.pt, axis=1)) leading_lepton = lepton[leading_lepton_idx] trailing_lepton_idx = ak.singletons(ak.argmin(lepton.pt, axis=1)) trailing_lepton = lepton[trailing_lepton_idx] second_lepton = lepton[~(trailing_lepton_idx & leading_lepton_idx)] ## Jets jet = getJets(ev, minPt=25, maxEta=4.7, pt_var='pt_nom') jet = jet[ak.argsort( jet.pt_nom, ascending=False )] # need to sort wrt smeared and recorrected jet pt jet = jet[~match(jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons jet = jet[~match( jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons central = jet[(abs(jet.eta) < 2.4)] btag = getBTagsDeepFlavB( jet, year=self.year) # should study working point for DeepJet light = getBTagsDeepFlavB(jet, year=self.year, invert=True) fwd = getFwdJet(light) fwd_noPU = getFwdJet(light, puId=False) ## forward jets high_p_fwd = fwd[ak.singletons(ak.argmax( fwd.p, axis=1))] # highest momentum spectator high_pt_fwd = fwd[ak.singletons(ak.argmax( fwd.pt_nom, axis=1))] # highest transverse momentum spectator high_eta_fwd = fwd[ak.singletons(ak.argmax(abs( fwd.eta), axis=1))] # most forward spectator ## Get the two leading b-jets in terms of btag score high_score_btag = central[ak.argsort(central.btagDeepFlavB)][:, :2] jf = cross(high_p_fwd, jet) mjf = (jf['0'] + jf['1']).mass deltaEta = abs(high_p_fwd.eta - jf[ak.singletons(ak.argmax(mjf, axis=1))]['1'].eta) deltaEtaMax = ak.max(deltaEta, axis=1) mjf_max = ak.max(mjf, axis=1) jj = choose(jet, 2) mjj_max = ak.max((jj['0'] + jj['1']).mass, axis=1) ## MET -> can switch to puppi MET met_pt = ev.MET.pt met_phi = ev.MET.phi ## other variables ht = ak.sum(jet.pt, axis=1) st = met_pt + ht + ak.sum(muon.pt, axis=1) + ak.sum(electron.pt, axis=1) lt = met_pt + ak.sum(muon.pt, axis=1) + ak.sum(electron.pt, axis=1) ht_central = ak.sum(central.pt, axis=1) # define the weight weight = Weights(len(ev)) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): # lumi weight weight.add("weight", ev.weight * cfg['lumi'][self.year]) # PU weight - not in the babies... weight.add("PU", ev.puWeight, weightUp=ev.puWeightUp, weightDown=ev.puWeightDown, shift=False) # b-tag SFs weight.add( "btag", self.btagSF.Method1a(btag, light, b_direction='central', c_direction='central')) # lepton SFs weight.add("lepton", self.leptonSF.get(electron, muon)) sel = Selection( dataset=dataset, events=ev, year=self.year, ele=electron, ele_veto=vetoelectron, mu=muon, mu_veto=vetomuon, jet_all=jet, jet_central=central, jet_btag=btag, jet_fwd=fwd, met=ev.MET, ) BL = sel.dilep_baseline(SS=False) BL_minusNb = sel.dilep_baseline(SS=False, omit=['N_btag>0']) output['N_b'].fill(dataset=dataset, multiplicity=ak.num(btag)[BL_minusNb], weight=weight.weight()[BL_minusNb]) if re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): #rle = ak.to_numpy(ak.zip([ev.run, ev.luminosityBlock, ev.event])) run_ = ak.to_numpy(ev.run) lumi_ = ak.to_numpy(ev.luminosityBlock) event_ = ak.to_numpy(ev.event) output['%s_run' % dataset] += processor.column_accumulator( run_[BL]) output['%s_lumi' % dataset] += processor.column_accumulator( lumi_[BL]) output['%s_event' % dataset] += processor.column_accumulator( event_[BL]) # Now, take care of systematic unceratinties if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): alljets = getJets(ev, minPt=0, maxEta=4.7) alljets = alljets[(alljets.jetId > 1)] for var in self.variations: # get the collections that change with the variations btag = getBTagsDeepFlavB( jet, year=self.year) # should study working point for DeepJet weight = Weights(len(ev)) weight.add("weight", ev.weight * cfg['lumi'][self.year]) weight.add("PU", ev.puWeight, weightUp=ev.puWeightUp, weightDown=ev.puWeightDown, shift=False) if var == 'centralUp': weight.add( "btag", self.btagSF.Method1a(btag, light, b_direction='central', c_direction='up')) elif var == 'centralDown': weight.add( "btag", self.btagSF.Method1a(btag, light, b_direction='central', c_direction='down')) elif var == 'upCentral': weight.add( "btag", self.btagSF.Method1a(btag, light, b_direction='up', c_direction='central')) elif var == 'downCentral': weight.add( "btag", self.btagSF.Method1a(btag, light, b_direction='down', c_direction='central')) weight.add("lepton", self.leptonSF.get(electron, muon)) met = ev.MET sel = Selection( dataset=dataset, events=ev, year=self.year, ele=electron, ele_veto=vetoelectron, mu=muon, mu_veto=vetomuon, jet_all=jet, jet_central=central, jet_btag=btag, jet_fwd=fwd, met=met, ) BL = sel.dilep_baseline(SS=False) BL_minusNb = sel.dilep_baseline(SS=False, omit=['N_btag>0']) output['N_b_' + var].fill( dataset=dataset, multiplicity=ak.num(btag)[BL_minusNb], weight=weight.weight()[BL_minusNb]) return output
def process(self, events): output = self.accumulator.identity() # use a very loose preselection to filter the events presel = ak.num(events.Jet)>2 ev = events[presel] dataset = ev.metadata['dataset'] # load the config - probably not needed anymore cfg = loadConfig() output['totalEvents']['all'] += len(events) output['skimmedEvents']['all'] += len(ev) ## Muons muon = Collections(ev, "Muon", "tightSSTTH").get() vetomuon = Collections(ev, "Muon", "vetoTTH").get() dimuon = choose(muon, 2) SSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge)>0, axis=1) OSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge)<0, axis=1) leading_muon_idx = ak.singletons(ak.argmax(muon.pt, axis=1)) leading_muon = muon[leading_muon_idx] ## Electrons electron = Collections(ev, "Electron", "tightSSTTH").get() vetoelectron = Collections(ev, "Electron", "vetoTTH").get() dielectron = choose(electron, 2) SSelectron = ak.any((dielectron['0'].charge * dielectron['1'].charge)>0, axis=1) OSelectron = ak.any((dielectron['0'].charge * dielectron['1'].charge)<0, axis=1) leading_electron_idx = ak.singletons(ak.argmax(electron.pt, axis=1)) leading_electron = electron[leading_electron_idx] ## Merge electrons and muons - this should work better now in ak1 lepton = ak.concatenate([muon, electron], axis=1) dilepton = cross(muon, electron) SSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge)>0, axis=1) OSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge)<0, axis=1) leading_lepton_idx = ak.singletons(ak.argmax(lepton.pt, axis=1)) leading_lepton = lepton[leading_lepton_idx] trailing_lepton_idx = ak.singletons(ak.argmin(lepton.pt, axis=1)) trailing_lepton = lepton[trailing_lepton_idx] ## Jets jet = getJets(ev, minPt=25, maxEta=4.7, pt_var='pt_nom') jet = jet[ak.argsort(jet.pt_nom, ascending=False)] # need to sort wrt smeared and recorrected jet pt jet = jet[~match(jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons jet = jet[~match(jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons central = jet[(abs(jet.eta)<2.4)] btag = getBTagsDeepFlavB(jet, year=self.year) # should study working point for DeepJet light = getBTagsDeepFlavB(jet, year=self.year, invert=True) fwd = getFwdJet(light) fwd_noPU = getFwdJet(light, puId=False) ## forward jets high_p_fwd = fwd[ak.singletons(ak.argmax(fwd.p, axis=1))] # highest momentum spectator high_pt_fwd = fwd[ak.singletons(ak.argmax(fwd.pt_nom, axis=1))] # highest transverse momentum spectator high_eta_fwd = fwd[ak.singletons(ak.argmax(abs(fwd.eta), axis=1))] # most forward spectator ## Get the two leading b-jets in terms of btag score high_score_btag = central[ak.argsort(central.btagDeepFlavB)][:,:2] jf = cross(high_p_fwd, jet) mjf = (jf['0']+jf['1']).mass deltaEta = abs(high_p_fwd.eta - jf[ak.singletons(ak.argmax(mjf, axis=1))]['1'].eta) deltaEtaMax = ak.max(deltaEta, axis=1) mjf_max = ak.max(mjf, axis=1) jj = choose(jet, 2) mjj_max = ak.max((jj['0']+jj['1']).mass, axis=1) ## MET -> can switch to puppi MET met_pt = ev.MET.pt met_phi = ev.MET.phi ## other variables ht = ak.sum(jet.pt, axis=1) st = met_pt + ht + ak.sum(muon.pt, axis=1) + ak.sum(electron.pt, axis=1) ht_central = ak.sum(central.pt, axis=1) # define the weight weight = Weights( len(ev) ) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): # lumi weight weight.add("weight", ev.weight*cfg['lumi'][self.year]) # PU weight - not in the babies... weight.add("PU", ev.puWeight, weightUp=ev.puWeightUp, weightDown=ev.puWeightDown, shift=False) # b-tag SFs weight.add("btag", self.btagSF.Method1a(btag, light)) # lepton SFs weight.add("lepton", self.leptonSF.get(electron, muon)) cutflow = Cutflow(output, ev, weight=weight) sel = Selection( dataset = dataset, events = ev, year = self.year, ele = electron, ele_veto = vetoelectron, mu = muon, mu_veto = vetomuon, jet_all = jet, jet_central = central, jet_btag = btag, jet_fwd = fwd, met = ev.MET, ) BL = sel.dilep_baseline(cutflow=cutflow, SS=False) # first, make a few super inclusive plots output['PV_npvs'].fill(dataset=dataset, multiplicity=ev.PV[BL].npvs, weight=weight.weight()[BL]) output['PV_npvsGood'].fill(dataset=dataset, multiplicity=ev.PV[BL].npvsGood, weight=weight.weight()[BL]) output['N_jet'].fill(dataset=dataset, multiplicity=ak.num(jet)[BL], weight=weight.weight()[BL]) BL_minusNb = sel.dilep_baseline(SS=False, omit=['N_btag>0']) output['N_b'].fill(dataset=dataset, multiplicity=ak.num(btag)[BL_minusNb], weight=weight.weight()[BL_minusNb]) output['N_central'].fill(dataset=dataset, multiplicity=ak.num(central)[BL], weight=weight.weight()[BL]) output['N_ele'].fill(dataset=dataset, multiplicity=ak.num(electron)[BL], weight=weight.weight()[BL]) output['N_mu'].fill(dataset=dataset, multiplicity=ak.num(electron)[BL], weight=weight.weight()[BL]) BL_minusFwd = sel.dilep_baseline(SS=False, omit=['N_fwd>0']) output['N_fwd'].fill(dataset=dataset, multiplicity=ak.num(fwd)[BL_minusFwd], weight=weight.weight()[BL_minusFwd]) BL_minusMET = sel.dilep_baseline(SS=False, omit=['MET>50']) output['MET'].fill( dataset = dataset, pt = ev.MET[BL_minusMET].pt, phi = ev.MET[BL_minusMET].phi, weight = weight.weight()[BL_minusMET] ) #output['electron'].fill( # dataset = dataset, # pt = ak.to_numpy(ak.flatten(electron[BL].pt)), # eta = ak.to_numpy(ak.flatten(electron[BL].eta)), # phi = ak.to_numpy(ak.flatten(electron[BL].phi)), # weight = weight.weight()[BL] #) # #output['muon'].fill( # dataset = dataset, # pt = ak.to_numpy(ak.flatten(muon[BL].pt)), # eta = ak.to_numpy(ak.flatten(muon[BL].eta)), # phi = ak.to_numpy(ak.flatten(muon[BL].phi)), # weight = weight.weight()[BL] #) output['lead_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(leading_lepton[BL].pt)), eta = ak.to_numpy(ak.flatten(leading_lepton[BL].eta)), phi = ak.to_numpy(ak.flatten(leading_lepton[BL].phi)), weight = weight.weight()[BL] ) output['trail_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(trailing_lepton[BL].pt)), eta = ak.to_numpy(ak.flatten(trailing_lepton[BL].eta)), phi = ak.to_numpy(ak.flatten(trailing_lepton[BL].phi)), weight = weight.weight()[BL] ) output['fwd_jet'].fill( dataset = dataset, pt = ak.flatten(high_p_fwd[BL].pt_nom), eta = ak.flatten(high_p_fwd[BL].eta), phi = ak.flatten(high_p_fwd[BL].phi), weight = weight.weight()[BL] ) output['b1'].fill( dataset = dataset, pt = ak.flatten(high_score_btag[:, 0:1][BL].pt_nom), eta = ak.flatten(high_score_btag[:, 0:1][BL].eta), phi = ak.flatten(high_score_btag[:, 0:1][BL].phi), weight = weight.weight()[BL] ) output['b2'].fill( dataset = dataset, pt = ak.flatten(high_score_btag[:, 1:2][BL].pt_nom), eta = ak.flatten(high_score_btag[:, 1:2][BL].eta), phi = ak.flatten(high_score_btag[:, 1:2][BL].phi), weight = weight.weight()[BL] ) output['j1'].fill( dataset = dataset, pt = ak.flatten(jet.pt_nom[:, 0:1][BL]), eta = ak.flatten(jet.eta[:, 0:1][BL]), phi = ak.flatten(jet.phi[:, 0:1][BL]), weight = weight.weight()[BL] ) output['j2'].fill( dataset = dataset, pt = ak.flatten(jet[:, 1:2][BL].pt_nom), eta = ak.flatten(jet[:, 1:2][BL].eta), phi = ak.flatten(jet[:, 1:2][BL].phi), weight = weight.weight()[BL] ) output['j3'].fill( dataset = dataset, pt = ak.flatten(jet[:, 2:3][BL].pt_nom), eta = ak.flatten(jet[:, 2:3][BL].eta), phi = ak.flatten(jet[:, 2:3][BL].phi), weight = weight.weight()[BL] ) if re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): #rle = ak.to_numpy(ak.zip([ev.run, ev.luminosityBlock, ev.event])) run_ = ak.to_numpy(ev.run) lumi_ = ak.to_numpy(ev.luminosityBlock) event_ = ak.to_numpy(ev.event) output['%s_run'%dataset] += processor.column_accumulator(run_[BL]) output['%s_lumi'%dataset] += processor.column_accumulator(lumi_[BL]) output['%s_event'%dataset] += processor.column_accumulator(event_[BL]) # Now, take care of systematic unceratinties if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): alljets = getJets(ev, minPt=0, maxEta=4.7) alljets = alljets[(alljets.jetId>1)] for var in self.variations: # get the collections that change with the variations jet = getPtEtaPhi(alljets, pt_var=var) jet = jet[(jet.pt>25)] jet = jet[~match(jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons jet = jet[~match(jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons central = jet[(abs(jet.eta)<2.4)] btag = getBTagsDeepFlavB(jet, year=self.year) # should study working point for DeepJet light = getBTagsDeepFlavB(jet, year=self.year, invert=True) fwd = getFwdJet(light) fwd_noPU = getFwdJet(light, puId=False) ## forward jets high_p_fwd = fwd[ak.singletons(ak.argmax(fwd.p, axis=1))] # highest momentum spectator high_pt_fwd = fwd[ak.singletons(ak.argmax(fwd.pt, axis=1))] # highest transverse momentum spectator high_eta_fwd = fwd[ak.singletons(ak.argmax(abs(fwd.eta), axis=1))] # most forward spectator ## Get the two leading b-jets in terms of btag score high_score_btag = central[ak.argsort(central.btagDeepFlavB)][:,:2] met = ev.MET met['pt'] = getattr(met, var) sel = Selection( dataset = dataset, events = ev, year = self.year, ele = electron, ele_veto = vetoelectron, mu = muon, mu_veto = vetomuon, jet_all = jet, jet_central = central, jet_btag = btag, jet_fwd = fwd, met = met, ) BL = sel.dilep_baseline(SS=False) # get the modified selection -> more difficult #selection.add('N_jet>2_'+var, (ak.num(jet.pt)>=3)) # stupid bug here... #selection.add('N_btag=2_'+var, (ak.num(btag)==2) ) #selection.add('N_central>1_'+var, (ak.num(central)>=2) ) #selection.add('N_fwd>0_'+var, (ak.num(fwd)>=1) ) #selection.add('MET>30_'+var, (getattr(ev.MET, var)>30) ) ### Don't change the selection for now... #bl_reqs = os_reqs + ['N_jet>2_'+var, 'MET>30_'+var, 'N_btag=2_'+var, 'N_central>1_'+var, 'N_fwd>0_'+var] #bl_reqs_d = { sel: True for sel in bl_reqs } #BL = selection.require(**bl_reqs_d) # the OS selection remains unchanged output['N_jet_'+var].fill(dataset=dataset, multiplicity=ak.num(jet)[BL], weight=weight.weight()[BL]) BL_minusFwd = sel.dilep_baseline(SS=False, omit=['N_fwd>0']) output['N_fwd_'+var].fill(dataset=dataset, multiplicity=ak.num(fwd)[BL_minusFwd], weight=weight.weight()[BL_minusFwd]) BL_minusNb = sel.dilep_baseline(SS=False, omit=['N_btag>0']) output['N_b_'+var].fill(dataset=dataset, multiplicity=ak.num(btag)[BL_minusNb], weight=weight.weight()[BL_minusNb]) output['N_central_'+var].fill(dataset=dataset, multiplicity=ak.num(central)[BL], weight=weight.weight()[BL]) # We don't need to redo all plots with variations. E.g., just add uncertainties to the jet plots. output['j1_'+var].fill( dataset = dataset, pt = ak.flatten(jet.pt[:, 0:1][BL]), eta = ak.flatten(jet.eta[:, 0:1][BL]), phi = ak.flatten(jet.phi[:, 0:1][BL]), weight = weight.weight()[BL] ) output['b1_'+var].fill( dataset = dataset, pt = ak.flatten(high_score_btag[:, 0:1].pt[:, 0:1][BL]), eta = ak.flatten(high_score_btag[:, 0:1].eta[:, 0:1][BL]), phi = ak.flatten(high_score_btag[:, 0:1].phi[:, 0:1][BL]), weight = weight.weight()[BL] ) output['fwd_jet_'+var].fill( dataset = dataset, pt = ak.flatten(high_p_fwd[BL].pt), #p = ak.flatten(high_p_fwd[BL].p), eta = ak.flatten(high_p_fwd[BL].eta), phi = ak.flatten(high_p_fwd[BL].phi), weight = weight.weight()[BL] ) BL_minusMET = sel.dilep_baseline(SS=False, omit=['MET>50']) output['MET_'+var].fill( dataset = dataset, pt = getattr(ev.MET, var)[BL_minusMET], phi = ev.MET[BL_minusMET].phi, weight = weight.weight()[BL_minusMET] ) return output
def process(self, events): output = self.accumulator.identity() # use a very loose preselection to filter the events presel = ak.num(events.Jet) > 2 ev = events[presel] dataset = ev.metadata['dataset'] # load the config - probably not needed anymore cfg = loadConfig() output['totalEvents']['all'] += len(events) output['skimmedEvents']['all'] += len(ev) ## Muons muon = Collections(ev, "Muon", "tightTTH").get() vetomuon = Collections(ev, "Muon", "vetoTTH").get() dimuon = choose(muon, 2) SSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge) > 0, axis=1) OSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge) < 0, axis=1) leading_muon_idx = ak.singletons(ak.argmax(muon.pt, axis=1)) leading_muon = muon[leading_muon_idx] ## Electrons electron = Collections(ev, "Electron", "tightTTH").get() vetoelectron = Collections(ev, "Electron", "vetoTTH").get() dielectron = choose(electron, 2) SSelectron = ak.any( (dielectron['0'].charge * dielectron['1'].charge) > 0, axis=1) OSelectron = ak.any( (dielectron['0'].charge * dielectron['1'].charge) < 0, axis=1) leading_electron_idx = ak.singletons(ak.argmax(electron.pt, axis=1)) leading_electron = electron[leading_electron_idx] ## Merge electrons and muons - this should work better now in ak1 lepton = ak.concatenate([muon, electron], axis=1) dilepton = cross(muon, electron) SSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge) > 0, axis=1) OSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge) < 0, axis=1) leading_lepton_idx = ak.singletons(ak.argmax(lepton.pt, axis=1)) leading_lepton = lepton[leading_lepton_idx] trailing_lepton_idx = ak.singletons(ak.argmin(lepton.pt, axis=1)) trailing_lepton = lepton[trailing_lepton_idx] ## Jets jet = getJets(ev, minPt=25, maxEta=4.7, pt_var='pt_nom') jet = jet[ak.argsort( jet.pt_nom, ascending=False )] # need to sort wrt smeared and recorrected jet pt jet = jet[~match(jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons jet = jet[~match( jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons central = jet[(abs(jet.eta) < 2.4)] btag = getBTagsDeepFlavB( jet, year=self.year) # should study working point for DeepJet light = getBTagsDeepFlavB(jet, year=self.year, invert=True) fwd = getFwdJet(light) fwd_noPU = getFwdJet(light, puId=False) ## forward jets high_p_fwd = fwd[ak.singletons(ak.argmax( fwd.p, axis=1))] # highest momentum spectator high_pt_fwd = fwd[ak.singletons(ak.argmax( fwd.pt_nom, axis=1))] # highest transverse momentum spectator high_eta_fwd = fwd[ak.singletons(ak.argmax(abs( fwd.eta), axis=1))] # most forward spectator ## Get the two leading b-jets in terms of btag score high_score_btag = central[ak.argsort(central.btagDeepFlavB)][:, :2] jf = cross(high_p_fwd, jet) mjf = (jf['0'] + jf['1']).mass deltaEta = abs(high_p_fwd.eta - jf[ak.singletons(ak.argmax(mjf, axis=1))]['1'].eta) deltaEtaMax = ak.max(deltaEta, axis=1) mjf_max = ak.max(mjf, axis=1) jj = choose(jet, 2) mjj_max = ak.max((jj['0'] + jj['1']).mass, axis=1) ## MET -> can switch to puppi MET met_pt = ev.MET.pt met_phi = ev.MET.phi ## other variables ht = ak.sum(jet.pt, axis=1) st = met_pt + ht + ak.sum(muon.pt, axis=1) + ak.sum(electron.pt, axis=1) ht_central = ak.sum(central.pt, axis=1) ## event selectors filters = getFilters(ev, year=self.year, dataset=dataset) triggers = getTriggers(ev, year=self.year, dataset=dataset) dilep = ((ak.num(electron) == 1) & (ak.num(muon) == 1)) lep0pt = ((ak.num(electron[(electron.pt > 25)]) + ak.num(muon[(muon.pt > 25)])) > 0) lep1pt = ((ak.num(electron[(electron.pt > 20)]) + ak.num(muon[(muon.pt > 20)])) > 1) lepveto = ((ak.num(vetoelectron) + ak.num(vetomuon)) == 2) # define the weight weight = Weights(len(ev)) if not dataset == 'MuonEG': # lumi weight weight.add("weight", ev.weight * cfg['lumi'][self.year]) # PU weight - not in the babies... weight.add("PU", ev.puWeight, weightUp=ev.puWeightUp, weightDown=ev.puWeightDown, shift=False) # b-tag SFs weight.add("btag", self.btagSF.Method1a(btag, light)) # lepton SFs weight.add("lepton", self.leptonSF.get(electron, muon)) selection = PackedSelection() selection.add('lepveto', lepveto) selection.add('dilep', dilep) selection.add('trigger', (triggers)) selection.add('filter', (filters)) selection.add('p_T(lep0)>25', lep0pt) selection.add('p_T(lep1)>20', lep1pt) selection.add('OS', OSlepton) selection.add('N_btag=2', (ak.num(btag) == 2)) selection.add('N_jet>2', (ak.num(jet) >= 3)) selection.add('N_central>1', (ak.num(central) >= 2)) selection.add('N_fwd>0', (ak.num(fwd) >= 1)) selection.add('MET>30', (ev.MET.pt > 30)) os_reqs = [ 'lepveto', 'dilep', 'trigger', 'filter', 'p_T(lep0)>25', 'p_T(lep1)>20', 'OS' ] bl_reqs = os_reqs + [ 'N_btag=2', 'N_jet>2', 'N_central>1', 'N_fwd>0', 'MET>30' ] os_reqs_d = {sel: True for sel in os_reqs} os_selection = selection.require(**os_reqs_d) bl_reqs_d = {sel: True for sel in bl_reqs} BL = selection.require(**bl_reqs_d) cutflow = Cutflow(output, ev, weight=weight) cutflow_reqs_d = {} for req in bl_reqs: cutflow_reqs_d.update({req: True}) cutflow.addRow(req, selection.require(**cutflow_reqs_d)) # first, make a few super inclusive plots output['PV_npvs'].fill(dataset=dataset, multiplicity=ev.PV[os_selection].npvs, weight=weight.weight()[os_selection]) output['PV_npvsGood'].fill(dataset=dataset, multiplicity=ev.PV[os_selection].npvsGood, weight=weight.weight()[os_selection]) output['N_jet'].fill(dataset=dataset, multiplicity=ak.num(jet)[os_selection], weight=weight.weight()[os_selection]) output['N_b'].fill(dataset=dataset, multiplicity=ak.num(btag)[os_selection], weight=weight.weight()[os_selection]) output['N_central'].fill(dataset=dataset, multiplicity=ak.num(central)[os_selection], weight=weight.weight()[os_selection]) output['N_ele'].fill(dataset=dataset, multiplicity=ak.num(electron)[os_selection], weight=weight.weight()[os_selection]) output['N_mu'].fill(dataset=dataset, multiplicity=ak.num(electron)[os_selection], weight=weight.weight()[os_selection]) output['N_fwd'].fill(dataset=dataset, multiplicity=ak.num(fwd)[os_selection], weight=weight.weight()[os_selection]) output['MET'].fill(dataset=dataset, pt=ev.MET[os_selection].pt, phi=ev.MET[os_selection].phi, weight=weight.weight()[os_selection]) output['electron'].fill(dataset=dataset, pt=ak.to_numpy(ak.flatten(electron[BL].pt)), eta=ak.to_numpy(ak.flatten(electron[BL].eta)), phi=ak.to_numpy(ak.flatten(electron[BL].phi)), weight=weight.weight()[BL]) output['muon'].fill(dataset=dataset, pt=ak.to_numpy(ak.flatten(muon[BL].pt)), eta=ak.to_numpy(ak.flatten(muon[BL].eta)), phi=ak.to_numpy(ak.flatten(muon[BL].phi)), weight=weight.weight()[BL]) output['lead_lep'].fill( dataset=dataset, pt=ak.to_numpy(ak.flatten(leading_lepton[BL].pt)), eta=ak.to_numpy(ak.flatten(leading_lepton[BL].eta)), phi=ak.to_numpy(ak.flatten(leading_lepton[BL].phi)), weight=weight.weight()[BL]) output['trail_lep'].fill( dataset=dataset, pt=ak.to_numpy(ak.flatten(trailing_lepton[BL].pt)), eta=ak.to_numpy(ak.flatten(trailing_lepton[BL].eta)), phi=ak.to_numpy(ak.flatten(trailing_lepton[BL].phi)), weight=weight.weight()[BL]) output['fwd_jet'].fill(dataset=dataset, pt=ak.flatten(high_p_fwd[BL].pt_nom), eta=ak.flatten(high_p_fwd[BL].eta), phi=ak.flatten(high_p_fwd[BL].phi), weight=weight.weight()[BL]) output['b1'].fill(dataset=dataset, pt=ak.flatten(high_score_btag[:, 0:1][BL].pt_nom), eta=ak.flatten(high_score_btag[:, 0:1][BL].eta), phi=ak.flatten(high_score_btag[:, 0:1][BL].phi), weight=weight.weight()[BL]) output['b2'].fill(dataset=dataset, pt=ak.flatten(high_score_btag[:, 1:2][BL].pt_nom), eta=ak.flatten(high_score_btag[:, 1:2][BL].eta), phi=ak.flatten(high_score_btag[:, 1:2][BL].phi), weight=weight.weight()[BL]) output['j1'].fill(dataset=dataset, pt=ak.flatten(jet.pt_nom[:, 0:1][BL]), eta=ak.flatten(jet.eta[:, 0:1][BL]), phi=ak.flatten(jet.phi[:, 0:1][BL]), weight=weight.weight()[BL]) output['j2'].fill(dataset=dataset, pt=ak.flatten(jet[:, 1:2][BL].pt_nom), eta=ak.flatten(jet[:, 1:2][BL].eta), phi=ak.flatten(jet[:, 1:2][BL].phi), weight=weight.weight()[BL]) output['j3'].fill(dataset=dataset, pt=ak.flatten(jet[:, 2:3][BL].pt_nom), eta=ak.flatten(jet[:, 2:3][BL].eta), phi=ak.flatten(jet[:, 2:3][BL].phi), weight=weight.weight()[BL]) # Now, take care of systematic unceratinties if not dataset == 'MuonEG': alljets = getJets(ev, minPt=0, maxEta=4.7) alljets = alljets[(alljets.jetId > 1)] for var in self.variations: # get the collections that change with the variations jet = getPtEtaPhi(alljets, pt_var=var) jet = jet[(jet.pt > 25)] jet = jet[~match( jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons jet = jet[~match( jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons central = jet[(abs(jet.eta) < 2.4)] btag = getBTagsDeepFlavB( jet, year=self.year) # should study working point for DeepJet light = getBTagsDeepFlavB(jet, year=self.year, invert=True) fwd = getFwdJet(light) fwd_noPU = getFwdJet(light, puId=False) ## forward jets high_p_fwd = fwd[ak.singletons(ak.argmax( fwd.p, axis=1))] # highest momentum spectator high_pt_fwd = fwd[ak.singletons(ak.argmax( fwd.pt, axis=1))] # highest transverse momentum spectator high_eta_fwd = fwd[ak.singletons( ak.argmax(abs(fwd.eta), axis=1))] # most forward spectator ## Get the two leading b-jets in terms of btag score high_score_btag = central[ak.argsort( central.btagDeepFlavB)][:, :2] # get the modified selection -> more difficult selection.add('N_jet>2_' + var, (ak.num(jet.pt) >= 3)) # stupid bug here... selection.add('N_btag=2_' + var, (ak.num(btag) == 2)) selection.add('N_central>1_' + var, (ak.num(central) >= 2)) selection.add('N_fwd>0_' + var, (ak.num(fwd) >= 1)) selection.add('MET>30_' + var, (getattr(ev.MET, var) > 30)) ## Don't change the selection for now... bl_reqs = os_reqs + [ 'N_jet>2_' + var, 'MET>30_' + var, 'N_btag=2_' + var, 'N_central>1_' + var, 'N_fwd>0_' + var ] bl_reqs_d = {sel: True for sel in bl_reqs} BL = selection.require(**bl_reqs_d) # the OS selection remains unchanged output['N_jet_' + var].fill( dataset=dataset, multiplicity=ak.num(jet)[os_selection], weight=weight.weight()[os_selection]) output['N_fwd_' + var].fill( dataset=dataset, multiplicity=ak.num(fwd)[os_selection], weight=weight.weight()[os_selection]) output['N_b_' + var].fill( dataset=dataset, multiplicity=ak.num(btag)[os_selection], weight=weight.weight()[os_selection]) output['N_central_' + var].fill( dataset=dataset, multiplicity=ak.num(central)[os_selection], weight=weight.weight()[os_selection]) # We don't need to redo all plots with variations. E.g., just add uncertainties to the jet plots. output['j1_' + var].fill(dataset=dataset, pt=ak.flatten(jet.pt[:, 0:1][BL]), eta=ak.flatten(jet.eta[:, 0:1][BL]), phi=ak.flatten(jet.phi[:, 0:1][BL]), weight=weight.weight()[BL]) output['b1_' + var].fill( dataset=dataset, pt=ak.flatten(high_score_btag[:, 0:1].pt[:, 0:1][BL]), eta=ak.flatten(high_score_btag[:, 0:1].eta[:, 0:1][BL]), phi=ak.flatten(high_score_btag[:, 0:1].phi[:, 0:1][BL]), weight=weight.weight()[BL]) output['fwd_jet_' + var].fill( dataset=dataset, pt=ak.flatten(high_p_fwd[BL].pt), eta=ak.flatten(high_p_fwd[BL].eta), phi=ak.flatten(high_p_fwd[BL].phi), weight=weight.weight()[BL]) output['MET_' + var].fill(dataset=dataset, pt=getattr(ev.MET, var)[os_selection], phi=ev.MET[os_selection].phi, weight=weight.weight()[os_selection]) return output
def __init__(self, ev, obj, wp, verbose=0, year=2018): self.obj = obj self.wp = wp if self.wp == None: self.selection_dict = {} else: self.selection_dict = obj_def[self.obj][self.wp] self.v = verbose #self.year = df['year'][0] ## to be implemented in next verison of babies self.year = year if self.obj == "Muon": # collections are already there, so we just need to calculate missing ones ev['Muon', 'absMiniIso'] = ev.Muon.miniPFRelIso_all * ev.Muon.pt ev['Muon', 'ptErrRel'] = ev.Muon.ptErr / ev.Muon.pt # this is what we are using: # - jetRelIso if the matched jet is within deltaR<0.4, pfRelIso03_all otherwise # - btagDeepFlavB discriminator of the matched jet if jet is within deltaR<0.4, 0 otherwise # (FOR TTH) - pt_cone = 0.9*pt of matched jet if jet is within deltaR<0.4, pt/(pt+iso) otherwise # (FOR SS) - pt_cone = pt*(1 + max(0,I_m-I_1)) if pt_rel > I_3; max(pt, pt(matched_jet)*I_2) otherwise #TTH conePt mask_close = (ak.fill_none(ev.Muon.delta_r(ev.Muon.matched_jet), 99) < 0.4) * 1 mask_far = ~(ak.fill_none(ev.Muon.delta_r(ev.Muon.matched_jet), 99) < 0.4) * 1 #conePt = 0.9 * ak.fill_none(ev.Muon.matched_jet.pt,0) * mask_close + ev.Muon.pt*(1 + ev.Muon.miniPFRelIso_all)*mask_far #SS conePt if (self.year == 2017) or (self.year == 2018): I_1 = 0.11 I_2 = 0.74 I_3 = 6.8 elif (self.year == 2016): I_1 = 0.16 I_2 = 0.76 I_3 = 7.2 PF_unflatten = ak.from_regular( ev.Muon.miniPFRelIso_all[:, :, np.newaxis]) max_miniIso = ak.max( ak.concatenate( [PF_unflatten - I_1, ak.zeros_like(PF_unflatten)], axis=2), axis=2) #equivalent to max(0, ev.Muon.miniPFRelIso_all - I_1) muon_pt_unflatten = ak.from_regular(ev.Muon.pt[:, :, np.newaxis]) jet_pt_unflatten = ak.from_regular( ev.Muon.matched_jet.pt[:, :, np.newaxis]) max_pt = ak.max( ak.concatenate([muon_pt_unflatten, jet_pt_unflatten * I_2], axis=2), axis=2) #max(ev.Muon.pt, ev.Muon.matched_jet.pt * I_2) conePt = (ev.Muon.pt * (1 + max_miniIso)) * (ev.Muon.jetPtRelv2 > I_3) + ( max_pt * ~(ev.Muon.jetPtRelv2 > I_3)) deepJet = ak.fill_none(ev.Muon.matched_jet.btagDeepFlavB, 0) * mask_close jetRelIsoV2 = ev.Muon.jetRelIso * mask_close + ev.Muon.pfRelIso03_all * mask_far # default to 0 if no match ev['Muon', 'deepJet'] = ak.copy(deepJet) ev['Muon', 'jetRelIsoV2'] = jetRelIsoV2 ev['Muon', 'conePt'] = conePt ev['Muon', 'jetRelIso'] = ev.Muon.jetRelIso ev['Muon', 'jetPtRelv2'] = ev.Muon.jetPtRelv2 ev['Muon', 'boolFCNCIso'] = self.getFCNCIsolation( ev.Muon.jetRelIso, ev.Muon.jetPtRelv2, I_2, I_3) & (ev.Muon.miniPFRelIso_all < I_1) ev['Muon', 'boolFCNCfake'] = (ev.Muon.genPartFlav != 1) & (ev.Muon.genPartFlav != 15) self.cand = ev.Muon elif self.obj == "Electron": # calculate new variables. asignment is awkward, but what can you do. ev['Electron', 'absMiniIso'] = ev.Electron.miniPFRelIso_all * ev.Electron.pt ev['Electron', 'etaSC'] = ev.Electron.eta + ev.Electron.deltaEtaSC # the following line is only needed if we do our own matching. # right now, we keep using the NanoAOD match, but check the deltaR distance # jet_index, mask_match, mask_nomatch = self.matchJets(ev.Electron, ev.Jet) # this is what we are using: # - jetRelIso if the matched jet is within deltaR<0.4, pfRelIso03_all otherwise # - btagDeepFlavB discriminator of the matched jet if jet is within deltaR<0.4, 0 otherwise # - pt_cone = 0.9*pt of matched jet if jet is within deltaR<0.4, pt/(pt+iso), 0 otherwise mask_close = (ak.fill_none( ev.Electron.delta_r(ev.Electron.matched_jet), 99) < 0.4) * 1 mask_far = ~(ak.fill_none( ev.Electron.delta_r(ev.Electron.matched_jet), 99) < 0.4) * 1 deepJet = ak.fill_none(ev.Electron.matched_jet.btagDeepFlavB, 0) * mask_close jetRelIsoV2 = ev.Electron.jetRelIso * mask_close + ev.Electron.pfRelIso03_all * mask_far # default to 0 if no match #TTH conePt #conePt = 0.9 * ak.fill_none(ev.Electron.matched_jet.pt,0) * mask_close + ev.Electron.pt*(1 + ev.Electron.miniPFRelIso_all)*mask_far #SS conePt if (self.year == 2017) or (self.year == 2018): I_1 = 0.07 I_2 = 0.78 I_3 = 8.0 elif (self.year == 2016): I_1 = 0.12 I_2 = 0.8 I_3 = 7.2 PF_unflatten = ak.from_regular( ev.Electron.miniPFRelIso_all[:, :, np.newaxis]) max_miniIso = ak.max( ak.concatenate( [PF_unflatten - I_1, ak.zeros_like(PF_unflatten)], axis=2), axis=2) #equivalent to max(0, ev.Muon.miniPFRelIso_all - I_1) electron_pt_unflatten = ak.from_regular(ev.Electron.pt[:, :, np.newaxis]) jet_pt_unflatten = ak.from_regular( ev.Electron.matched_jet.pt[:, :, np.newaxis]) max_pt = ak.max( ak.concatenate([electron_pt_unflatten, jet_pt_unflatten * I_2], axis=2), axis=2) #max(ev.Muon.pt, ev.Muon.matched_jet.pt * I_2) conePt = (ev.Electron.pt * (1 + max_miniIso)) * (ev.Electron.jetPtRelv2 > I_3) + ( max_pt * ~(ev.Electron.jetPtRelv2 > I_3)) ev['Electron', 'deepJet'] = ak.copy(deepJet) ev['Electron', 'jetRelIsoV2'] = jetRelIsoV2 ev['Electron', 'conePt'] = conePt ev['Electron', 'jetRelIso'] = ev.Electron.jetRelIso ev['Electron', 'jetPtRelv2'] = ev.Electron.jetPtRelv2 ev['Electron', 'boolFCNCIso'] = self.getFCNCIsolation( ev.Electron.jetRelIso, ev.Electron.jetPtRelv2, I_2, I_3) & (ev.Electron.miniPFRelIso_all < I_1) ev['Electron', 'boolFCNCfake'] = (ev.Electron.genPartFlav != 1) & (ev.Electron.genPartFlav != 15) self.cand = ev.Electron self.getSelection() if self.obj == "Electron" and self.wp == "tight": self.selection = self.selection & self.getElectronMVAID( ) & self.getIsolation(0.07, 0.78, 8.0) & self.isTriggerSafeNoIso() if self.v > 0: print(" - custom ID and multi-isolation") if self.obj == "Muon" and self.wp == "tight": self.selection = self.selection & self.getIsolation( 0.11, 0.74, 6.8) if self.v > 0: print(" - custom multi-isolation") #self.selection = self.selection & ak.fill_none(ev.Muon.matched_jet.btagDeepFlavB<0.2770, True) #self.selection = self.selection & (ev.Muon.matched_jet.btagDeepFlavB<0.2770) #if self.v>0: print (" - deepJet") if self.obj == "Electron" and (self.wp == "tightTTH" or self.wp == 'fakeableTTH' or self.wp == "tightSSTTH" or self.wp == 'fakeableSSTTH'): self.selection = self.selection & self.getSigmaIEtaIEta() if self.v > 0: print(" - SigmaIEtaIEta") #self.selection = self.selection & ak.fill_none(ev.Electron.matched_jet.btagDeepFlavB<0.2770, True) #self.selection = self.selection & (ev.Electron.matched_jet.btagDeepFlavB<0.2770) #self.selection = self.selection & (ev.Jet[ev.Electron.jetIdx].btagDeepFlavB<0.2770) #if self.v>0: print (" - deepJet") if self.obj == 'Muon' and (self.wp == 'fakeableTTH' or self.wp == 'fakeableSSTTH'): self.selection = self.selection & ( self.cand.deepJet < self.getThreshold(self.cand.conePt, min_pt=20, max_pt=45, low=0.2770, high=0.0494)) if self.v > 0: print(" - interpolated deepJet")
def process(self, events): # get meta infos dataset = events.metadata["dataset"] isRealData = not hasattr(events, "genWeight") n_events = len(events) selection = processor.PackedSelection() weights = processor.Weights(n_events) output = self.accumulator.identity() # weights if not isRealData: output['sumw'][dataset] += awkward1.sum(events.genWeight) # trigger triggers = {} for channel in ["e","mu"]: trigger = np.zeros(len(events), dtype='bool') for t in self._trigger[channel]: try: trigger = trigger | events.HLT[t] except: warnings.warn("Missing trigger %s" % t, RuntimeWarning) triggers[channel] = trigger # met filter met_filters = ["goodVertices", "globalSuperTightHalo2016Filter", "HBHENoiseFilter", "HBHENoiseIsoFilter", "EcalDeadCellTriggerPrimitiveFilter", "BadPFMuonFilter", ] met_filters_mask = np.ones(len(events), dtype='bool') for t in met_filters: met_filters_mask = met_filters_mask & events.Flag[t] selection.add("met_filter", awkward1.to_numpy(met_filters_mask)) # load objects muons = events.Muon electrons = events.Electron jets = events.Jet fatjets = events.FatJet subjets = events.SubJet fatjetsLS = events.FatJetLS met = events.MET # muons goodmuon = ( (muons.mediumId) & (muons.miniPFRelIso_all <= 0.2) & (muons.pt >= 27) & (abs(muons.eta) <= 2.4) & (abs(muons.dz) < 0.1) & (abs(muons.dxy) < 0.05) & (muons.sip3d < 4) ) good_muons = muons[goodmuon] ngood_muons = awkward1.sum(goodmuon, axis=1) # electrons goodelectron = ( (electrons.mvaFall17V2noIso_WP90) & (electrons.pt >= 30) & (abs(electrons.eta) <= 1.479) & (abs(electrons.dz) < 0.1) & (abs(electrons.dxy) < 0.05) & (electrons.sip3d < 4) ) good_electrons = electrons[goodelectron] ngood_electrons = awkward1.sum(goodelectron, axis=1) # good leptons good_leptons = awkward1.concatenate([good_muons, good_electrons], axis=1) good_leptons = good_leptons[awkward1.argsort(good_leptons.pt)] # lepton candidate candidatelep = awkward1.firsts(good_leptons) # lepton channel selection selection.add("ch_e", awkward1.to_numpy((triggers["e"]) & (ngood_electrons==1) & (ngood_muons==0))) # not sure if need to require 0 muons or 0 electrons in the next line selection.add("ch_mu", awkward1.to_numpy((triggers["mu"]) & (ngood_electrons==0) & (ngood_muons==1))) # jets ht = awkward1.sum(jets[jets.pt > 30].pt,axis=1) selection.add("ht_400", awkward1.to_numpy(ht>=400)) goodjet = ( (jets.isTight) & (jets.pt > 30) & (abs(jets.eta) <= 2.5) ) good_jets = jets[goodjet] # fat jets jID = "isTight" # TODO: add mass correction # a way to get the first two subjets # cart = awkward1.cartesian([fatjets, subjets], nested=True) # idxes = awkward1.pad_none(awkward1.argsort(cart['0'].delta_r(cart['1'])), 2, axis=2) # sj1 = subjets[idxes[:,:,0]] # sj2 = subjets[idxes[:,:,1]] good_fatjet = ( (getattr(fatjets, jID)) & (abs(fatjets.eta) <= 2.4) & (fatjets.pt > 50) & (fatjets.msoftdrop > 30) & (fatjets.msoftdrop < 210) #& (fatjets.pt.copy(content=fatjets.subjets.content.counts) == 2) # TODO: require 2 subjets? # this can probably be done w FatJet_subJetIdx1 or FatJet_subJetIdx2 & (awkward1.all(fatjets.subjets.pt >= 20)) & (awkward1.all(abs(fatjets.subjets.eta) <= 2.4)) ) good_fatjets = fatjets[good_fatjet] # hbb candidate mask_hbb = ( (good_fatjets.pt > 200) & (good_fatjets.delta_r(candidatelep) > 2.0) ) candidateHbb = awkward1.firsts(good_fatjets[mask_hbb]) # b-tag #& (good_fatjets.particleNetMD_Xbb > 0.9) selection.add('hbb_btag',awkward1.to_numpy(candidateHbb.deepTagMD_ZHbbvsQCD >= 0.8)) # score would be larger for tight category (0.97) # No AK4 b-tagged jets away from bb jet jets_HbbV = jets[good_jets.delta_r(candidateHbb) >= 1.2] selection.add('hbb_vetobtagaway', awkward1.to_numpy(awkward1.max(jets_HbbV.btagDeepB, axis=1, mask_identity=False) > BTagEfficiency.btagWPs[self._year]['medium'])) # fat jets Lepton Subtracted # wjj candidate mask_wjj = ( (fatjetsLS.pt > 50) & (fatjetsLS.delta_r(candidatelep) > 1.2) # need to add 2 subjets w pt > 20 & eta<2.4 # need to add ID? ) candidateWjj = awkward1.firsts(fatjetsLS[mask_wjj][awkward1.argmin(fatjetsLS[mask_wjj].delta_r(candidatelep),axis=1,keepdims=True)]) # add t2/t1 <= 0.75 (0.45 HP) selection.add('hww_mass', awkward1.to_numpy(candidateWjj.mass >= 10)) print('met ',met) # wjjlnu info #HSolverLiInfo hwwInfoLi; # qqSDmass = candidateWjj.msoftdrop # hwwLi = hSolverLi->minimize(candidatelep.p4(), met.p4(), wjjcand.p4(), qqSDmass, hwwInfoLi) #neutrino = hwwInfoLi.neutrino; #wlnu = hwwInfoLi.wlnu; #wqq = hwwInfoLi.wqqjet; #hWW = hwwInfoLi.hWW; #wwDM = PhysicsUtilities::deltaR( wlnu,wqq) * hWW.pt()/2.0; # add dlvqq <= 11 (2.5 HP) # in the meantime let's add the mass ''' mm = (candidatejet - candidatelep).mass2 jmass = (mm>0)*np.sqrt(np.maximum(0, mm)) + (mm<0)*candidatejet.mass joffshell = jmass < 62.5 massassumption = 80.*joffshell + (125 - 80.)*~joffshell x = massassumption**2/(2*candidatelep.pt*met.pt) + np.cos(candidatelep.phi - met.phi) met_eta = ( (x < 1)*np.arcsinh(x*np.sinh(candidatelep.eta)) + (x > 1)*( candidatelep.eta - np.sign(candidatelep.eta)*np.arccosh(candidatelep.eta) ) ) met_p4 = TLorentzVectorArray.from_ptetaphim(np.array([0.]),np.array([0.]),np.array([0.]),np.array([0.])) if met.size > 0: met_p4 = TLorentzVectorArray.from_ptetaphim(met.pt, met_eta.fillna(0.), met.phi, np.zeros(met.size)) # hh system candidateHH = candidateWjj + met_p4 + candidateHbb selection.add('hh_mass', candidateHH.mass >= 700) selection.add('hh_centrality', candidateHH.pt/candidateHH.mass >= 0.3) ''' channels = {"e": ["met_filter","ch_e","ht_400","hbb_btag","hbb_vetobtagaway","hww_mass"], #,"hh_mass","hh_centrality"], "mu": ["met_filter","ch_mu","ht_400","hbb_btag","hbb_vetobtagaway","hww_mass"] #,"hh_mass","hh_centrality"], } # need to add gen info if not isRealData: weights.add('genweight', events.genWeight) add_pileup_weight(weights, events.Pileup.nPU, self._year, dataset) for channel, cuts in channels.items(): allcuts = set() output['cutflow'].fill(dataset=dataset, channel=channel, cut=0, weight=weights.weight()) for i, cut in enumerate(cuts): allcuts.add(cut) cut = selection.all(*allcuts) output['cutflow'].fill(dataset=dataset, channel=channel, cut=i + 1, weight=weights.weight()[cut]) return output
def process(self, events): output = self._accumulator.identity() dataset_name = events.metadata['dataset'] output["total_events"][dataset_name] += events.__len__() # HLT selection HLT_mask = [] if year == "2016": if "SingleMuon" in dataset_name: if "2016B2" in dataset_name: HLT_mask = events.HLT.IsoMu24 | events.HLT.IsoTkMu24 | events.HLT.Mu50 else: HLT_mask = events.HLT.IsoMu24 | events.HLT.IsoTkMu24 | events.HLT.Mu50 | events.HLT.TkMu50 else: #https://twiki.cern.ch/twiki/bin/view/CMS/HLTPathsRunIIList if "2016B2" in dataset_name: HLT_mask = events.HLT.PFHT800 | events.HLT.PFHT900 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID elif "2016H" in dataset_name: HLT_mask = events.HLT.PFHT900 | events.HLT.AK8PFJet450 | events.HLT.AK8PFJet500 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID else: HLT_mask = events.HLT.PFHT800 | events.HLT.PFHT900 | events.HLT.AK8PFJet450 | events.HLT.AK8PFJet500 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID if year == "2017": if "SingleMuon" in dataset_name: if "2017B" in dataset_name: HLT_mask = events.HLT.IsoMu27 | events.HLT.Mu50 else: HLT_mask = events.HLT.IsoMu27 | events.HLT.Mu50 | events.HLT.OldMu100 | events.HLT.TkMu100 else: HLT_mask = events.HLT.PFHT1050 | events.HLT.AK8PFJet500 | events.HLT.AK8PFJet550 | events.HLT.CaloJet500_NoJetID | events.HLT.CaloJet550_NoJetID | events.HLT.PFJet500 if year == "2018": if "SingleMuon" in dataset_name: HLT_mask = events.HLT.IsoMu24 | events.HLT.Mu50 | events.HLT.OldMu100 | events.HLT.TkMu100 else: HLT_mask = events.HLT.PFHT1050 | events.HLT.AK8PFJet500 | events.HLT.AK8PFJet550 | events.HLT.CaloJet500_NoJetID | events.HLT.CaloJet550_NoJetID | events.HLT.PFJet500 # Require 3 jets jet_mask = (events.Jet.pt > 30.) & (abs(events.Jet.eta) < 2.5) & (events.Jet.isTight) event_mask = (awk.sum(jet_mask, axis=1) >= 3) event_mask = event_mask & HLT_mask events_3j = events[event_mask] # Reduce jet mask to only events with 3 good jets jet_mask = jet_mask[event_mask] # Array of the jets to consider for trijet resonance selected_jets = events_3j.Jet[jet_mask][:, :3] # Pairs of jets pairs = [(0, 1), (1, 2), (2, 0)] jet_i, jet_j = zip(*pairs) # Returns [0, 1, 2] , [1, 2, 0] m_ij = (selected_jets[:, jet_i] + selected_jets[:, jet_j]).mass dR_ij = selected_jets[:, jet_i].delta_r(selected_jets[:, jet_j]) dEta_ij = abs(selected_jets[:, jet_i].eta - selected_jets[:, jet_j].eta) max_dR = awk.max(dR_ij, axis=1) max_dEta = awk.max(dEta_ij, axis=1) min_dR = awk.min(dR_ij, axis=1) min_dEta = awk.min(dEta_ij, axis=1) min_pT = awk.min(selected_jets.pt, axis=1) max_eta = abs(awk.max(selected_jets.eta, axis=1)) jet_k = [2, 0, 1] dR_i_jk = selected_jets[:, jet_i].delta_r(selected_jets[:, jet_j] + selected_jets[:, jet_k]) dEta_i_jk = abs(selected_jets[:, jet_i].eta - (selected_jets[:, jet_j] + selected_jets[:, jet_k]).eta) dPhi_i_jk = abs(selected_jets[:, jet_i].phi - (selected_jets[:, jet_j] + selected_jets[:, jet_k]).phi) dPt_i_jk = abs(selected_jets[:, jet_i].pt - (selected_jets[:, jet_j] + selected_jets[:, jet_k]).pt) max_dPhi_jjj = awk.max(dPhi_i_jk, axis=1) m3j = selected_jets.sum().mass pt_i_overM = selected_jets.pt / m3j max_pt_overM = awk.max(pt_i_overM, axis=1) min_pt_overM = awk.min(pt_i_overM, axis=1) m_01_overM = m_ij[:, 0] / m3j m_12_overM = m_ij[:, 1] / m3j m_20_overM = m_ij[:, 2] / m3j for pt_cut in range(30, 1150, 5): cut_name = f"min_pT_cut{pt_cut}".format(pt_cut) selection = PackedSelection() selection.add("MinJetPt_cut", min_pT > pt_cut) sel_mask = selection.require( **{name: True for name in selection.names}) output[f"N_min_pT_cut{pt_cut}".format( pt_cut)][dataset_name] += events_3j[sel_mask].__len__() for eta_cut in np.arange(0, 2.5, 0.05): cut_name = f"max_eta_cut{eta_cut}".format(eta_cut) selection = PackedSelection() selection.add("MaxJetEta_cut", max_eta < eta_cut) sel_mask = selection.require( **{name: True for name in selection.names}) output[f"N_max_eta_cut{eta_cut}".format( eta_cut)][dataset_name] += events_3j[sel_mask].__len__() for dEta_max_cut in np.arange(0, 5, 0.1): cut_name = f"dEta_max_cut{dEta_max_cut}".format(dEta_max_cut) selection = PackedSelection() selection.add("MaxJJdEta_cut", max_dEta < dEta_max_cut) sel_mask = selection.require( **{name: True for name in selection.names}) output[f"N_dEta_jj_max_cut{dEta_max_cut}".format( dEta_max_cut)][dataset_name] += events_3j[sel_mask].__len__() for dR_min_cut in np.arange(0, 5, 0.1): cut_name = f"dR_min_cut{dR_min_cut}".format(dR_min_cut) selection = PackedSelection() selection.add("MinJJdR_cut", min_dR > dR_min_cut) sel_mask = selection.require( **{name: True for name in selection.names}) output[f"N_dR_jj_min_cut{dR_min_cut}".format( dR_min_cut)][dataset_name] += events_3j[sel_mask].__len__() #min cut for the variable dPhi_jjj_max # for dPhi_jjj_max_min_cut in range(0,6,0.1): # cut_name = f"dPhi_jjj_max_min_cut{dPhi_jjj_max_min_cut}".format(dPhi_jjj_max_min_cut) # selections[cut_name] = PackedSelection() # selections[cut_name].add("j_jj_dPhi_max_cut", min_dR > dPhi_jjj_max_min_cut) # selection_items[cut_name] = [] # selection_items[cut_name].append("j_jj_dPhi_max_cut") # sel_mask = HLT_mask & selections[cut_name].require(**{name: True for name in selection_items[cut_name]}) # output[f"N_dPhi_jjj_max_min_cut{dPhi_jjj_max_min_cut}".format(dPhi_jjj_max_min_cut)][dataset_name] += events_3j[sel_mask].__len__() # for dPhi_jjj_min_max_cut in range(0,6,0.1): # cut_name = f"dPhi_jjj_max_min_cut{dPhi_jjj_max_min_cut}".format(dPhi_jjj_max_min_cut) # selections[cut_name] = PackedSelection() # selections[cut_name].add("j_jj_dPhi_max_cut", min_dR > dPhi_jjj_max_min_cut) # selection_items[cut_name] = [] # selection_items[cut_name].append("j_jj_dPhi_max_cut") # sel_mask = HLT_mask & selections[cut_name].require(**{name: True for name in selection_items[cut_name]}) # output[f"N_dPhi_jjj_max_min_cut{dPhi_jjj_max_min_cut}".format(dPhi_jjj_max_min_cut)][dataset_name] += events_3j[sel_mask].__len__() return output
def process(self, events): output = self._accumulator.identity() dataset_name = events.metadata['dataset'] output["total_events"][dataset_name] += events.__len__() # HLT selection HLT_mask = [] if year == "2016": if "SingleMuon" in dataset_name: #this does not work, as the name of file which is under processing is unknown if "2016B2" in dataset_name: HLT_mask = events.HLT.IsoMu24 | events.HLT.IsoTkMu24 | events.HLT.Mu50 else: HLT_mask = events.HLT.IsoMu24 | events.HLT.IsoTkMu24 | events.HLT.Mu50 | events.HLT.TkMu50 else: #https://twiki.cern.ch/twiki/bin/view/CMS/HLTPathsRunIIList if "2016B2" in dataset_name: HLT_mask = events.HLT.PFHT800 | events.HLT.PFHT900 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID elif "2016H" in dataset_name: HLT_mask = events.HLT.PFHT900 | events.HLT.AK8PFJet450 | events.HLT.AK8PFJet500 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID else: HLT_mask = events.HLT.PFHT800 | events.HLT.PFHT900 | events.HLT.AK8PFJet450 | events.HLT.AK8PFJet500 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID if year == "2017": if "SingleMuon" in dataset_name: if "2017B" in dataset_name: HLT_mask = events.HLT.IsoMu27 | events.HLT.Mu50 else: HLT_mask = events.HLT.IsoMu27 | events.HLT.Mu50 | events.HLT.OldMu100 | events.HLT.TkMu100 else: HLT_mask = events.HLT.PFHT1050 | events.HLT.AK8PFJet500 | events.HLT.AK8PFJet550 | events.HLT.CaloJet500_NoJetID | events.HLT.CaloJet550_NoJetID | events.HLT.PFJet500 if year == "2018": if "SingleMuon" in dataset_name: HLT_mask = events.HLT.IsoMu24 | events.HLT.Mu50 | events.HLT.OldMu100 | events.HLT.TkMu100 else: HLT_mask = events.HLT.PFHT1050 | events.HLT.AK8PFJet500 | events.HLT.AK8PFJet550 | events.HLT.CaloJet500_NoJetID | events.HLT.CaloJet550_NoJetID | events.HLT.PFJet500 # Require 3 jets jet_mask = (events.Jet.pt > 30.) & (abs(events.Jet.eta) < 2.5) & (events.Jet.isTight) event_mask = (awk.sum(jet_mask, axis=1) >= 3) event_mask = event_mask & HLT_mask events_3j = events[event_mask] # Reduce jet mask to only events with 3 good jets jet_mask = jet_mask[event_mask] # Array of the jets to consider for trijet resonance selected_jets = events_3j.Jet[jet_mask][:, :3] # Pairs of jets #pairs = awk.argcombinations(selected_jets, 2) #jet_i, jet_j = awk.unzip(pairs) pairs = [(0, 1), (1, 2), (2, 0)] jet_i, jet_j = zip(*pairs) # Returns [0, 1, 2] , [1, 2, 0] m_ij = (selected_jets[:, jet_i] + selected_jets[:, jet_j]).mass dR_ij = selected_jets[:, jet_i].delta_r(selected_jets[:, jet_j]) dEta_ij = abs(selected_jets[:, jet_i].eta - selected_jets[:, jet_j].eta) jet_k = [2, 0, 1] dR_i_jk = selected_jets[:, jet_i].delta_r(selected_jets[:, jet_j] + selected_jets[:, jet_k]) dEta_i_jk = abs(selected_jets[:, jet_i].eta - (selected_jets[:, jet_j] + selected_jets[:, jet_k]).eta) dPhi_i_jk = abs(selected_jets[:, jet_i].phi - (selected_jets[:, jet_j] + selected_jets[:, jet_k]).phi) m3j = selected_jets.sum().mass pt_i_overM = selected_jets.pt / m3j m_01_overM = m_ij[:, 0] / m3j m_12_overM = m_ij[:, 1] / m3j m_20_overM = m_ij[:, 2] / m3j dPtoverM_0_12 = abs(selected_jets[:, 0].pt - (selected_jets[:, 1] + selected_jets[:, 2]).pt) / m3j dPtoverM_1_20 = abs(selected_jets[:, 1].pt - (selected_jets[:, 2] + selected_jets[:, 0]).pt) / m3j dPtoverM_2_01 = abs(selected_jets[:, 2].pt - (selected_jets[:, 0] + selected_jets[:, 1]).pt) / m3j # Event selection masks selection_masks = {} # Pre-selection selection = PackedSelection() selection.add("Dummy", m3j > 000) sel_mask = selection.require( **{name: True for name in selection.names}) selection_masks["Pre-selection"] = sel_mask # HLT_trigger (this is already done at the beginning) # if year == "2016": # JetHLT_mask = [] # if "2016B2" in dataset_name: # JetHLT_mask = events.HLT.PFHT800 | events.HLT.PFHT900 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID # elif "2016H" in dataset_name: # JetHLT_mask = events.HLT.PFHT900 | events.HLT.AK8PFJet450 | events.HLT.AK8PFJet500 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID # else: # JetHLT_mask = events.HLT.PFHT800 | events.HLT.PFHT900 | events.HLT.AK8PFJet450 | events.HLT.AK8PFJet500 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID # selection_masks["JetHLT"] = JetHLT_mask[event_mask] # if year == "2017": # JetHLT_mask = events.HLT.PFHT1050 | events.HLT.AK8PFJet500 | events.HLT.AK8PFJet550 | events.HLT.CaloJet500_NoJetID | events.HLT.CaloJet550_NoJetID | events.HLT.PFJet500 # selection_masks["JetHLT"] = JetHLT_mask[event_mask] # if year == "2018": # JetHLT_mask = events.HLT.PFHT1050 | events.HLT.AK8PFJet500 | events.HLT.AK8PFJet550 | events.HLT.CaloJet500_NoJetID | events.HLT.CaloJet550_NoJetID | events.HLT.PFJet500 # selection_masks["JetHLT"] = JetHLT_mask[event_mask] # Fill histograms for selection, selection_mask in selection_masks.items(): output["mjjj"].fill(dataset=dataset_name, selection=selection, mjjj=m3j[selection_mask]) output["m_ij"].fill(dataset=dataset_name, selection=selection, m_01=m_ij[:, 0][selection_mask], m_12=m_ij[:, 1][selection_mask], m_20=m_ij[:, 2][selection_mask]) output["dR_ij"].fill(dataset=dataset_name, selection=selection, dR_01=dR_ij[:, 0][selection_mask], dR_12=dR_ij[:, 1][selection_mask], dR_20=dR_ij[:, 2][selection_mask]) output["dEta_ij"].fill(dataset=dataset_name, selection=selection, dEta_01=dEta_ij[:, 0][selection_mask], dEta_12=dEta_ij[:, 1][selection_mask], dEta_20=dEta_ij[:, 2][selection_mask]) output["moverM_ij"].fill(dataset=dataset_name, selection=selection, moverM_01=m_01_overM[selection_mask], moverM_12=m_12_overM[selection_mask], moverM_20=m_20_overM[selection_mask]) output["pt_i"].fill(dataset=dataset_name, selection=selection, pt_0=selected_jets[:, 0][selection_mask].pt, pt_1=selected_jets[:, 1][selection_mask].pt, pt_2=selected_jets[:, 2][selection_mask].pt) output["eta_i"].fill(dataset=dataset_name, selection=selection, eta_0=selected_jets[:, 0][selection_mask].eta, eta_1=selected_jets[:, 1][selection_mask].eta, eta_2=selected_jets[:, 2][selection_mask].eta) output["ptoverM_i"].fill(dataset=dataset_name, selection=selection, ptoverM_0=pt_i_overM[:, 0][selection_mask], ptoverM_1=pt_i_overM[:, 1][selection_mask], ptoverM_2=pt_i_overM[:, 2][selection_mask]) output["dR_i_jk"].fill(dataset=dataset_name, selection=selection, dR_0_12=dR_i_jk[:, 0][selection_mask], dR_1_20=dR_i_jk[:, 1][selection_mask], dR_2_01=dR_i_jk[:, 2][selection_mask]) output["dEta_i_jk"].fill(dataset=dataset_name, selection=selection, dEta_0_12=dEta_i_jk[:, 0][selection_mask], dEta_1_20=dEta_i_jk[:, 1][selection_mask], dEta_2_01=dEta_i_jk[:, 2][selection_mask]) output["dPhi_i_jk"].fill(dataset=dataset_name, selection=selection, dPhi_0_12=dPhi_i_jk[:, 0][selection_mask], dPhi_1_20=dPhi_i_jk[:, 1][selection_mask], dPhi_2_01=dPhi_i_jk[:, 2][selection_mask]) output["dPtoverM_i_jk"].fill( dataset=dataset_name, selection=selection, dPtoverM_0_12=dPtoverM_0_12[selection_mask], dPtoverM_1_20=dPtoverM_1_20[selection_mask], dPtoverM_2_01=dPtoverM_2_01[selection_mask]) pt_i_overM_2fill = pt_i_overM[selection_mask] dR_ij_2fill = dR_ij[selection_mask] dEta_ij_2fill = dEta_ij[selection_mask] dR_i_jk_2fill = dR_i_jk[selection_mask] dEta_i_jk_2fill = dEta_i_jk[selection_mask] dPhi_i_jk_2fill = dPhi_i_jk[selection_mask] dPtoverM_0_12_2fill = dPtoverM_0_12[selection_mask] dPtoverM_1_20_2fill = dPtoverM_1_20[selection_mask] dPtoverM_2_01_2fill = dPtoverM_2_01[selection_mask] selected_jets_2fill = selected_jets[selection_mask] max_pt_overM_2fill = awk.max(pt_i_overM_2fill, axis=1) min_pt_overM_2fill = awk.min(pt_i_overM_2fill, axis=1) max_dR_2fill = awk.max(dR_ij_2fill, axis=1) max_dEta_2fill = awk.max(dEta_ij_2fill, axis=1) min_dR_2fill = awk.min(dR_ij_2fill, axis=1) min_dEta_2fill = awk.min(dEta_ij_2fill, axis=1) min_pt_2fill = awk.min(selected_jets_2fill.pt, axis=1) max_eta_2fill = awk.max(abs(selected_jets_2fill.eta), axis=1) max_dR_i_jk_2fill = awk.max(dR_i_jk_2fill, axis=1) min_dR_i_jk_2fill = awk.min(dR_i_jk_2fill, axis=1) max_dEta_i_jk_2fill = awk.max(dEta_i_jk_2fill, axis=1) min_dEta_i_jk_2fill = awk.min(dEta_i_jk_2fill, axis=1) max_dPhi_i_jk_2fill = awk.max(dPhi_i_jk_2fill, axis=1) min_dPhi_i_jk_2fill = awk.min(dPhi_i_jk_2fill, axis=1) max_dPtoverM_i_jk_2fill = [] min_dPtoverM_i_jk_2fill = [] for pair in zip(dPtoverM_0_12_2fill, dPtoverM_1_20_2fill, dPtoverM_2_01_2fill): max_dPtoverM_i_jk_2fill.append(max(pair)) min_dPtoverM_i_jk_2fill.append(min(pair)) max_dPtoverM_i_jk_2fill = np.array(max_dPtoverM_i_jk_2fill) min_dPtoverM_i_jk_2fill = np.array(min_dPtoverM_i_jk_2fill) max_pt_overM_2fill = awk.fill_none(max_pt_overM_2fill, -99) min_pt_overM_2fill = awk.fill_none(min_pt_overM_2fill, -99) max_dR_2fill = awk.fill_none(max_dR_2fill, -99) max_dEta_2fill = awk.fill_none(max_dEta_2fill, -99) min_dR_2fill = awk.fill_none(min_dR_2fill, -99) min_dEta_2fill = awk.fill_none(min_dEta_2fill, -99) min_pt_2fill = awk.fill_none(min_pt_2fill, -99) max_eta_2fill = awk.fill_none(max_eta_2fill, -99) max_dR_i_jk_2fill = awk.fill_none(max_dR_i_jk_2fill, -99) min_dR_i_jk_2fill = awk.fill_none(min_dR_i_jk_2fill, -99) max_dEta_i_jk_2fill = awk.fill_none(max_dEta_i_jk_2fill, -99) min_dEta_i_jk_2fill = awk.fill_none(min_dEta_i_jk_2fill, -99) max_dPhi_i_jk_2fill = awk.fill_none(max_dPhi_i_jk_2fill, -99) min_dPhi_i_jk_2fill = awk.fill_none(min_dPhi_i_jk_2fill, -99) output["max_dR"].fill(dataset=dataset_name, selection=selection, max_dR=max_dR_2fill) output["max_dEta"].fill(dataset=dataset_name, selection=selection, max_dEta=max_dEta_2fill) output["min_dR"].fill(dataset=dataset_name, selection=selection, min_dR=min_dR_2fill) output["min_dEta"].fill(dataset=dataset_name, selection=selection, min_dEta=min_dEta_2fill) output["min_pt"].fill(dataset=dataset_name, selection=selection, min_pt=min_pt_2fill) output["max_eta"].fill(dataset=dataset_name, selection=selection, max_eta=max_eta_2fill) output["max_ptoverM"].fill(dataset=dataset_name, selection=selection, max_ptoverM=max_pt_overM_2fill) output["min_ptoverM"].fill(dataset=dataset_name, selection=selection, min_ptoverM=min_pt_overM_2fill) output["max_dR_j_jj"].fill(dataset=dataset_name, selection=selection, max_dR_j_jj=max_dR_i_jk_2fill) output["max_dEta_j_jj"].fill(dataset=dataset_name, selection=selection, max_dEta_j_jj=max_dEta_i_jk_2fill) output["max_dPhi_j_jj"].fill(dataset=dataset_name, selection=selection, max_dPhi_j_jj=max_dPhi_i_jk_2fill) output["max_dPtoverM_j_jj"].fill( dataset=dataset_name, selection=selection, max_dPtoverM_j_jj=max_dPtoverM_i_jk_2fill) output["min_dR_j_jj"].fill(dataset=dataset_name, selection=selection, min_dR_j_jj=min_dR_i_jk_2fill) output["min_dEta_j_jj"].fill(dataset=dataset_name, selection=selection, min_dEta_j_jj=min_dEta_i_jk_2fill) output["min_dPhi_j_jj"].fill(dataset=dataset_name, selection=selection, min_dPhi_j_jj=min_dPhi_i_jk_2fill) output["min_dPtoverM_j_jj"].fill( dataset=dataset_name, selection=selection, min_dPtoverM_j_jj=min_dPtoverM_i_jk_2fill) return output
def process(self, events): output = self._accumulator.identity() dataset_name = events.metadata['dataset'] output["total_events"][dataset_name] += events.__len__() # Initialize dict accumulators, if have not been initialized for jet in [0, 1, 2]: if dataset_name not in output[f"eta_{jet}_final"].keys(): output[f"eta_{jet}_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"ptoverM_{jet}_final"].keys(): output[f"ptoverM_{jet}_final"][dataset_name] = processor.column_accumulator(np.array([])) for pair in [(0, 1), (1, 2), (2, 0)]: if dataset_name not in output[f"dEta_{pair[0]}{pair[1]}_final"].keys(): output[f"dEta_{pair[0]}{pair[1]}_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dR_{pair[0]}{pair[1]}_final"].keys(): output[f"dR_{pair[0]}{pair[1]}_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"moverM_{pair[0]}{pair[1]}_final"].keys(): output[f"moverM_{pair[0]}{pair[1]}_final"][dataset_name] = processor.column_accumulator(np.array([])) for pair in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]: if dataset_name not in output[f"dR_{pair[0]}_{pair[1]}{pair[2]}_final"].keys(): output[f"dR_{pair[0]}_{pair[1]}{pair[2]}_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dEta_{pair[0]}_{pair[1]}{pair[2]}_final"].keys(): output[f"dEta_{pair[0]}_{pair[1]}{pair[2]}_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"Phi_{pair[0]}_{pair[1]}{pair[2]}_final"].keys(): output[f"Phi_{pair[0]}_{pair[1]}{pair[2]}_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dPtoverM_{pair[0]}_{pair[1]}{pair[2]}_final"].keys(): output[f"dPtoverM_{pair[0]}_{pair[1]}{pair[2]}_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"ptoverM_max_final"].keys(): output[f"ptoverM_max_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"ptoverM_min_final"].keys(): output[f"ptoverM_min_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"eta_max_final"].keys(): output[f"eta_max_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dR_max_final"].keys(): output[f"dR_max_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dR_min_final"].keys(): output[f"dR_min_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dEta_max_final"].keys(): output[f"dEta_max_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dEta_min_final"].keys(): output[f"dEta_min_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dR_j_jj_max_final"].keys(): output[f"dR_j_jj_max_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dR_j_jj_min_final"].keys(): output[f"dR_j_jj_min_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dEta_j_jj_max_final"].keys(): output[f"dEta_j_jj_max_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dEta_j_jj_min_final"].keys(): output[f"dEta_j_jj_min_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dPhi_j_jj_max_final"].keys(): output[f"dPhi_j_jj_max_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dPhi_j_jj_min_final"].keys(): output[f"dPhi_j_jj_min_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dPtoverM_j_jj_max_final"].keys(): output[f"dPtoverM_j_jj_max_final"][dataset_name] = processor.column_accumulator(np.array([])) if dataset_name not in output[f"dPtoverM_j_jj_min_final"].keys(): output[f"dPtoverM_j_jj_min_final"][dataset_name] = processor.column_accumulator(np.array([])) # HLT selection HLT_mask = [] if year == "2016": if "SingleMuon" in dataset_name: #this does not work, as the name of file which is under processing is unknown if "2016B2" in dataset_name: HLT_mask = events.HLT.IsoMu24 | events.HLT.IsoTkMu24 | events.HLT.Mu50 else: HLT_mask = events.HLT.IsoMu24 | events.HLT.IsoTkMu24 | events.HLT.Mu50 | events.HLT.TkMu50 else: #https://twiki.cern.ch/twiki/bin/view/CMS/HLTPathsRunIIList if "2016B2" in dataset_name: HLT_mask = events.HLT.PFHT800 | events.HLT.PFHT900 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID elif "2016H" in dataset_name: HLT_mask = events.HLT.PFHT900 | events.HLT.AK8PFJet450 | events.HLT.AK8PFJet500 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID else: HLT_mask = events.HLT.PFHT800 | events.HLT.PFHT900 | events.HLT.AK8PFJet450 | events.HLT.AK8PFJet500 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID if year == "2017": if "SingleMuon" in dataset_name: if "2017B" in dataset_name: HLT_mask = events.HLT.IsoMu27 | events.HLT.Mu50 else: HLT_mask = events.HLT.IsoMu27 | events.HLT.Mu50 | events.HLT.OldMu100 | events.HLT.TkMu100 else: HLT_mask = events.HLT.PFHT1050 | events.HLT.AK8PFJet500 | events.HLT.AK8PFJet550 | events.HLT.CaloJet500_NoJetID | events.HLT.CaloJet550_NoJetID | events.HLT.PFJet500 if year == "2018": if "SingleMuon" in dataset_name: HLT_mask = events.HLT.IsoMu24 | events.HLT.Mu50 | events.HLT.OldMu100 | events.HLT.TkMu100 else: HLT_mask = events.HLT.PFHT1050 | events.HLT.AK8PFJet500 | events.HLT.AK8PFJet550 | events.HLT.CaloJet500_NoJetID | events.HLT.CaloJet550_NoJetID | events.HLT.PFJet500 # Require 3 jets jet_mask = (events.Jet.pt > 30.) & (abs(events.Jet.eta) < 2.5) & (events.Jet.isTight) event_mask = (awk.sum(jet_mask, axis=1) >= 3) event_mask = event_mask & HLT_mask events_3j = events[event_mask] # Reduce jet mask to only events with 3 good jets jet_mask = jet_mask[event_mask] # Array of the jets to consider for trijet resonance selected_jets = events_3j.Jet[jet_mask][:, :3] # Pairs of jets #pairs = awk.argcombinations(selected_jets, 2) #jet_i, jet_j = awk.unzip(pairs) pairs = [(0, 1), (1, 2), (2, 0)] jet_i, jet_j = zip(*pairs) # Returns [0, 1, 2] , [1, 2, 0] m_ij = (selected_jets[:, jet_i] + selected_jets[:, jet_j]).mass dR_ij = selected_jets[:, jet_i].delta_r(selected_jets[:, jet_j]) dEta_ij = abs(selected_jets[:, jet_i].eta - selected_jets[:, jet_j].eta) jet_k = [2, 0, 1] dR_i_jk = selected_jets[:, jet_i].delta_r(selected_jets[:, jet_j] + selected_jets[:, jet_k]) dEta_i_jk = abs(selected_jets[:, jet_i].eta - (selected_jets[:, jet_j] + selected_jets[:, jet_k]).eta) dPhi_i_jk = abs(selected_jets[:, jet_i].phi - (selected_jets[:, jet_j] + selected_jets[:, jet_k]).phi) m3j = selected_jets.sum().mass pt_i_overM = selected_jets.pt / m3j m_01_overM = m_ij[:,0] / m3j m_12_overM = m_ij[:,1] / m3j m_20_overM = m_ij[:,2] / m3j dPtoverM_0_12 = abs(selected_jets[:, 0].pt - (selected_jets[:, 1] + selected_jets[:, 2]).pt) / m3j dPtoverM_1_20 = abs(selected_jets[:, 1].pt - (selected_jets[:, 2] + selected_jets[:, 0]).pt) / m3j dPtoverM_2_01 = abs(selected_jets[:, 2].pt - (selected_jets[:, 0] + selected_jets[:, 1]).pt) / m3j # Event selection masks # selection_masks = {} # Pre-selection selection = PackedSelection() selection.add("Dummy", m3j > 000) sel_mask = selection.require(**{name: True for name in selection.names}) # selection_masks["Pre-selection"] = sel_mask output["selected_events"][dataset_name] += events_3j[sel_mask].__len__() for jet in [0, 1, 2]: output[f"eta_{jet}_final"][dataset_name] += processor.column_accumulator(np.array(selected_jets[:, jet][sel_mask].eta)) output[f"ptoverM_{jet}_final"][dataset_name] += processor.column_accumulator(np.array(pt_i_overM[:, jet][sel_mask])) for pair in [(0, 1), (1, 2), (2, 0)]: output[f"dEta_{pair[0]}{pair[1]}_final"][dataset_name] += processor.column_accumulator(np.array(dEta_ij[:, pair[0]][sel_mask])) output[f"dR_{pair[0]}{pair[1]}_final"][dataset_name] += processor.column_accumulator(np.array(dR_ij[:, pair[0]][sel_mask])) output[f"moverM_01_final"][dataset_name] += processor.column_accumulator(np.array(m_01_overM[sel_mask])) output[f"moverM_12_final"][dataset_name] += processor.column_accumulator(np.array(m_12_overM[sel_mask])) output[f"moverM_20_final"][dataset_name] += processor.column_accumulator(np.array(m_20_overM[sel_mask])) for pair in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]: output[f"dR_{pair[0]}_{pair[1]}{pair[2]}_final"][dataset_name] += processor.column_accumulator(np.array(dR_i_jk[:, pair[0]][sel_mask])) output[f"dEta_{pair[0]}_{pair[1]}{pair[2]}_final"][dataset_name] += processor.column_accumulator(np.array(dEta_i_jk[:, pair[0]][sel_mask])) output[f"Phi_{pair[0]}_{pair[1]}{pair[2]}_final"][dataset_name] += processor.column_accumulator(np.array(dPhi_i_jk[:, pair[0]][sel_mask])) output[f"dPtoverM_0_12_final"][dataset_name] += processor.column_accumulator(np.array(dPtoverM_0_12[sel_mask])) output[f"dPtoverM_1_20_final"][dataset_name] += processor.column_accumulator(np.array(dPtoverM_1_20[sel_mask])) output[f"dPtoverM_2_01_final"][dataset_name] += processor.column_accumulator(np.array(dPtoverM_2_01[sel_mask])) max_pt_overM_2fill = awk.max(pt_i_overM[sel_mask], axis=1) min_pt_overM_2fill = awk.min(pt_i_overM[sel_mask], axis=1) max_dR_2fill = awk.max(dR_ij[sel_mask], axis=1) max_dEta_2fill = awk.max(dEta_ij[sel_mask], axis=1) min_dR_2fill = awk.min(dR_ij[sel_mask], axis=1) min_dEta_2fill = awk.min(dEta_ij[sel_mask], axis=1) min_pt_2fill = awk.min(selected_jets[sel_mask].pt, axis=1) max_eta_2fill = awk.max(abs(selected_jets[sel_mask].eta), axis=1) max_dR_i_jk_2fill = awk.max(dR_i_jk[sel_mask], axis=1) min_dR_i_jk_2fill = awk.min(dR_i_jk[sel_mask], axis=1) max_dEta_i_jk_2fill = awk.max(dEta_i_jk[sel_mask], axis=1) min_dEta_i_jk_2fill = awk.min(dEta_i_jk[sel_mask], axis=1) max_dPhi_i_jk_2fill = awk.max(dPhi_i_jk[sel_mask], axis=1) min_dPhi_i_jk_2fill = awk.min(dPhi_i_jk[sel_mask], axis=1) max_dPtoverM_i_jk_2fill = [] min_dPtoverM_i_jk_2fill = [] dPtoverM_0_12_2fill = dPtoverM_0_12[sel_mask] dPtoverM_1_20_2fill = dPtoverM_1_20[sel_mask] dPtoverM_2_01_2fill = dPtoverM_2_01[sel_mask] for pair in zip(dPtoverM_0_12_2fill, dPtoverM_1_20_2fill, dPtoverM_2_01_2fill): max_dPtoverM_i_jk_2fill.append(max(pair)) min_dPtoverM_i_jk_2fill.append(min(pair)) max_pt_overM_2fill = awk.fill_none(max_pt_overM_2fill, -99) min_pt_overM_2fill = awk.fill_none(min_pt_overM_2fill, -99) max_dR_2fill = awk.fill_none(max_dR_2fill, -99) max_dEta_2fill = awk.fill_none(max_dEta_2fill, -99) min_dR_2fill = awk.fill_none(min_dR_2fill, -99) min_dEta_2fill = awk.fill_none(min_dEta_2fill, -99) min_pt_2fill = awk.fill_none(min_pt_2fill, -99) max_eta_2fill = awk.fill_none(max_eta_2fill, -99) max_dR_i_jk_2fill = awk.fill_none(max_dR_i_jk_2fill, -99) min_dR_i_jk_2fill = awk.fill_none(min_dR_i_jk_2fill, -99) max_dEta_i_jk_2fill = awk.fill_none(max_dEta_i_jk_2fill, -99) min_dEta_i_jk_2fill = awk.fill_none(min_dEta_i_jk_2fill, -99) max_dPhi_i_jk_2fill = awk.fill_none(max_dPhi_i_jk_2fill, -99) min_dPhi_i_jk_2fill = awk.fill_none(min_dPhi_i_jk_2fill, -99) output[f"ptoverM_max_final"][dataset_name] += processor.column_accumulator(np.array(max_pt_overM_2fill)) output[f"ptoverM_min_final"][dataset_name] += processor.column_accumulator(np.array(min_pt_overM_2fill)) output[f"eta_max_final"][dataset_name] += processor.column_accumulator(np.array(max_eta_2fill)) output[f"dR_max_final"][dataset_name] += processor.column_accumulator(np.array(max_dR_2fill)) output[f"dR_min_final"][dataset_name] += processor.column_accumulator(np.array(min_dR_2fill)) output[f"dEta_max_final"][dataset_name] += processor.column_accumulator(np.array(max_dEta_2fill)) output[f"dEta_min_final"][dataset_name] += processor.column_accumulator(np.array(min_dEta_2fill)) output[f"dR_j_jj_max_final"][dataset_name] += processor.column_accumulator(np.array(max_dR_i_jk_2fill)) output[f"dR_j_jj_min_final"][dataset_name] += processor.column_accumulator(np.array(min_dR_i_jk_2fill)) output[f"dEta_j_jj_max_final"][dataset_name] += processor.column_accumulator(np.array(max_dEta_i_jk_2fill)) output[f"dEta_j_jj_min_final"][dataset_name] += processor.column_accumulator(np.array(min_dEta_i_jk_2fill)) output[f"dPhi_j_jj_max_final"][dataset_name] += processor.column_accumulator(np.array(max_dPhi_i_jk_2fill)) output[f"dPhi_j_jj_min_final"][dataset_name] += processor.column_accumulator(np.array(min_dPhi_i_jk_2fill)) output[f"dPtoverM_j_jj_max_final"][dataset_name] += processor.column_accumulator(np.array(max_dPtoverM_i_jk_2fill)) output[f"dPtoverM_j_jj_min_final"][dataset_name] += processor.column_accumulator(np.array(min_dPtoverM_i_jk_2fill)) return output
def process(self, events): output = self._accumulator.identity() dataset_name = events.metadata['dataset'] output["total_events"][dataset_name] += events.__len__() # HLT selection HLT_mask = [] if year == "2016": if "SingleMuon" in dataset_name: #this does not work, as the name of file which is under processing is unknown if "2016B2" in dataset_name: HLT_mask = events.HLT.IsoMu24 | events.HLT.IsoTkMu24 | events.HLT.Mu50 else: HLT_mask = events.HLT.IsoMu24 | events.HLT.IsoTkMu24 | events.HLT.Mu50 | events.HLT.TkMu50 else: #https://twiki.cern.ch/twiki/bin/view/CMS/HLTPathsRunIIList if "2016B2" in dataset_name: HLT_mask = events.HLT.PFHT800 | events.HLT.PFHT900 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID elif "2016H" in dataset_name: HLT_mask = events.HLT.PFHT900 | events.HLT.AK8PFJet450 | events.HLT.AK8PFJet500 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID else: HLT_mask = events.HLT.PFHT800 | events.HLT.PFHT900 | events.HLT.AK8PFJet450 | events.HLT.AK8PFJet500 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID if year == "2017": if "SingleMuon" in dataset_name: if "2017B" in dataset_name: HLT_mask = events.HLT.IsoMu27 | events.HLT.Mu50 else: HLT_mask = events.HLT.IsoMu27 | events.HLT.Mu50 | events.HLT.OldMu100 | events.HLT.TkMu100 else: HLT_mask = events.HLT.PFHT1050 | events.HLT.AK8PFJet500 | events.HLT.AK8PFJet550 | events.HLT.CaloJet500_NoJetID | events.HLT.CaloJet550_NoJetID | events.HLT.PFJet500 if year == "2018": if "SingleMuon" in dataset_name: HLT_mask = events.HLT.IsoMu24 | events.HLT.Mu50 | events.HLT.OldMu100 | events.HLT.TkMu100 else: HLT_mask = events.HLT.PFHT1050 | events.HLT.AK8PFJet500 | events.HLT.AK8PFJet550 | events.HLT.CaloJet500_NoJetID | events.HLT.CaloJet550_NoJetID | events.HLT.PFJet500 # Require 3 jets jet_mask = (events.Jet.pt > 30.) & (abs(events.Jet.eta) < 2.5) & (events.Jet.isTight) event_mask = (awk.sum(jet_mask, axis=1) >= 3) event_mask = event_mask & HLT_mask events_3j = events[event_mask] # Reduce jet mask to only events with 3 good jets jet_mask = jet_mask[event_mask] # Array of the jets to consider for trijet resonance selected_jets = events_3j.Jet[jet_mask][:, :3] # Pairs of jets #pairs = awk.argcombinations(selected_jets, 2) #jet_i, jet_j = awk.unzip(pairs) pairs = [(0, 1), (1, 2), (2, 0)] jet_i, jet_j = zip(*pairs) # Returns [0, 1, 2] , [1, 2, 0] m_ij = (selected_jets[:, jet_i] + selected_jets[:, jet_j]).mass dR_ij = selected_jets[:, jet_i].delta_r(selected_jets[:, jet_j]) dEta_ij = abs(selected_jets[:, jet_i].eta - selected_jets[:, jet_j].eta) max_dR = awk.max(dR_ij, axis=1) max_dEta = awk.max(dEta_ij, axis=1) min_dR = awk.min(dR_ij, axis=1) min_dEta = awk.min(dEta_ij, axis=1) min_pT = awk.min(selected_jets.pt, axis=1) #m01 = (selected_jets[:, 0] + selected_jets[:, 1]).mass #m12 = (selected_jets[:, 1] + selected_jets[:, 2]).mass #m20 = (selected_jets[:, 2] + selected_jets[:, 0]).mass #dR01 = (selected_jets[:, 0].delta_r(selected_jets[:, 1])) #dR12 = (selected_jets[:, 1].delta_r(selected_jets[:, 2])) #dR20 = (selected_jets[:, 2].delta_r(selected_jets[:, 0])) #dEta01 = abs(selected_jets[:, 0].eta - selected_jets[:, 1].eta) #dEta12 = abs(selected_jets[:, 1].eta - selected_jets[:, 2].eta) #dEta20 = abs(selected_jets[:, 2].eta - selected_jets[:, 0].eta) m3j = selected_jets.sum( ).mass #(selected_jets[:, 0] + selected_jets[:, 1] + selected_jets[:, 2]).mass pt_i_overM = selected_jets.pt / m3j m_01_overM = m_ij[:, 0] / m3j m_12_overM = m_ij[:, 1] / m3j m_20_overM = m_ij[:, 2] / m3j # Event selection - pre-selection selections = {} selection_items = {} selections["pre-selection"] = PackedSelection() selection_items["pre-selection"] = [] selections["pre-selection"].add("MaxDEta", max_dEta < 1.3) selection_items["pre-selection"].append("MaxDEta") selections["pre-selection"].add("MinDR", min_dR > 0.4) selection_items["pre-selection"].append("MinDR") selections["pre-selection"].add("MinJetPt", min_pT > 50.) selection_items["pre-selection"].append("MinJetPt") # Event selection - pre-selection & HLT_trigger selections["JetHLT - presel"] = PackedSelection() selection_items["JetHLT - presel"] = [] if year == "2016": JetHLT_mask = [] if "2016B2" in dataset_name: JetHLT_mask = events.HLT.PFHT800 | events.HLT.PFHT900 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID elif "2016H" in dataset_name: JetHLT_mask = events.HLT.PFHT900 | events.HLT.AK8PFJet450 | events.HLT.AK8PFJet500 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID else: JetHLT_mask = events.HLT.PFHT800 | events.HLT.PFHT900 | events.HLT.AK8PFJet450 | events.HLT.AK8PFJet500 | events.HLT.PFJet500 | events.HLT.CaloJet500_NoJetID selections["JetHLT - presel"].add("JetHLT_fired", JetHLT_mask[event_mask]) selection_items["JetHLT - presel"].append("JetHLT_fired") if year == "2017": JetHLT_mask = events.HLT.PFHT1050 | events.HLT.AK8PFJet500 | events.HLT.AK8PFJet550 | events.HLT.CaloJet500_NoJetID | events.HLT.CaloJet550_NoJetID | events.HLT.PFJet500 selections["JetHLT - presel"].add("JetHLT_fired", JetHLT_mask[event_mask]) selection_items["JetHLT - presel"].append("JetHLT_fired") if year == "2018": JetHLT_mask = events.HLT.PFHT1050 | events.HLT.AK8PFJet500 | events.HLT.AK8PFJet550 | events.HLT.CaloJet500_NoJetID | events.HLT.CaloJet550_NoJetID | events.HLT.PFJet500 selections["JetHLT - presel"].add("JetHLT_fired", JetHLT_mask[event_mask]) selection_items["JetHLT - presel"].append("JetHLT_fired") selections["JetHLT - presel"].add("MaxDEta", max_dEta < 1.3) selection_items["JetHLT - presel"].append("MaxDEta") selections["JetHLT - presel"].add("MinDR", min_dR > 0.4) selection_items["JetHLT - presel"].append("MinDR") selections["JetHLT - presel"].add("MinJetPt", min_pT > 50.) selection_items["JetHLT - presel"].append("MinJetPt") # Fill histograms for selection_name, selection in selections.items(): output["mjjj"].fill( dataset=dataset_name, selection=selection_name, mjjj=m3j[selection.require( **{name: True for name in selection_items[selection_name]})]) output["m01"].fill( dataset=dataset_name, selection=selection_name, m01=m_ij[:, 0][selection.require( **{name: True for name in selection_items[selection_name]})]) output["m12"].fill( dataset=dataset_name, selection=selection_name, m12=m_ij[:, 1][selection.require( **{name: True for name in selection_items[selection_name]})]) output["m20"].fill( dataset=dataset_name, selection=selection_name, m20=m_ij[:, 2][selection.require( **{name: True for name in selection_items[selection_name]})]) output["dR01"].fill( dataset=dataset_name, selection=selection_name, dR01=dR_ij[:, 0][selection.require( **{name: True for name in selection_items[selection_name]})]) output["dR12"].fill( dataset=dataset_name, selection=selection_name, dR12=dR_ij[:, 1][selection.require( **{name: True for name in selection_items[selection_name]})]) output["dR20"].fill( dataset=dataset_name, selection=selection_name, dR20=dR_ij[:, 2][selection.require( **{name: True for name in selection_items[selection_name]})]) output["dEta01"].fill( dataset=dataset_name, selection=selection_name, dEta01=dEta_ij[:, 0][selection.require( **{name: True for name in selection_items[selection_name]})]) output["dEta12"].fill( dataset=dataset_name, selection=selection_name, dEta12=dEta_ij[:, 1][selection.require( **{name: True for name in selection_items[selection_name]})]) output["dEta20"].fill( dataset=dataset_name, selection=selection_name, dEta20=dEta_ij[:, 2][selection.require( **{name: True for name in selection_items[selection_name]})]) output["m01overM"].fill( dataset=dataset_name, selection=selection_name, m01overM=m_01_overM[selection.require( **{name: True for name in selection_items[selection_name]})]) output["m12overM"].fill( dataset=dataset_name, selection=selection_name, m12overM=m_12_overM[selection.require( **{name: True for name in selection_items[selection_name]})]) output["m20overM"].fill( dataset=dataset_name, selection=selection_name, m20overM=m_20_overM[selection.require( **{name: True for name in selection_items[selection_name]})]) output["pt0"].fill( dataset=dataset_name, selection=selection_name, pt0=selected_jets[:, 0][selection.require( **{name: True for name in selection_items[selection_name]})].pt) output["pt1"].fill( dataset=dataset_name, selection=selection_name, pt1=selected_jets[:, 1][selection.require( **{name: True for name in selection_items[selection_name]})].pt) output["pt2"].fill( dataset=dataset_name, selection=selection_name, pt2=selected_jets[:, 2][selection.require( **{name: True for name in selection_items[selection_name]})].pt) output["eta0"].fill( dataset=dataset_name, selection=selection_name, eta0=selected_jets[:, 0][selection.require( **{name: True for name in selection_items[selection_name]})].eta) output["eta1"].fill( dataset=dataset_name, selection=selection_name, eta1=selected_jets[:, 1][selection.require( **{name: True for name in selection_items[selection_name]})].eta) output["eta2"].fill( dataset=dataset_name, selection=selection_name, eta2=selected_jets[:, 2][selection.require( **{name: True for name in selection_items[selection_name]})].eta) output["ptoverM0"].fill( dataset=dataset_name, selection=selection_name, ptoverM0=pt_i_overM[:, 0][selection.require( **{name: True for name in selection_items[selection_name]})]) output["ptoverM1"].fill( dataset=dataset_name, selection=selection_name, ptoverM1=pt_i_overM[:, 1][selection.require( **{name: True for name in selection_items[selection_name]})]) output["ptoverM2"].fill( dataset=dataset_name, selection=selection_name, ptoverM2=pt_i_overM[:, 2][selection.require( **{name: True for name in selection_items[selection_name]})]) return output
def process(self, events): output = self.accumulator.identity() # use a very loose preselection to filter the events presel = ak.num(events.Jet)>2 ev = events[presel] dataset = ev.metadata['dataset'] # load the config - probably not needed anymore cfg = loadConfig() output['totalEvents']['all'] += len(events) output['skimmedEvents']['all'] += len(ev) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): ## Generated leptons gen_lep = ev.GenL leading_gen_lep = gen_lep[ak.singletons(ak.argmax(gen_lep.pt, axis=1))] trailing_gen_lep = gen_lep[ak.singletons(ak.argmin(gen_lep.pt, axis=1))] # Get the leptons. This has changed a couple of times now, but we are using fakeable objects as baseline leptons. # The added p4 instance has the corrected pt (conePt for fakeable) and should be used for any following selection or calculation # Any additional correction (if we choose to do so) should be added here, e.g. Rochester corrections, ... ## Muons mu_v = Collections(ev, "Muon", "vetoTTH", year=year).get() # these include all muons, tight and fakeable mu_t = Collections(ev, "Muon", "tightSSTTH", year=year).get() mu_f = Collections(ev, "Muon", "fakeableSSTTH", year=year).get() muon = ak.concatenate([mu_t, mu_f], axis=1) muon['p4'] = get_four_vec_fromPtEtaPhiM(muon, get_pt(muon), muon.eta, muon.phi, muon.mass, copy=False) #FIXME new ## Electrons el_v = Collections(ev, "Electron", "vetoTTH", year=year).get() el_t = Collections(ev, "Electron", "tightSSTTH", year=year).get() el_f = Collections(ev, "Electron", "fakeableSSTTH", year=year).get() electron = ak.concatenate([el_t, el_f], axis=1) electron['p4'] = get_four_vec_fromPtEtaPhiM(electron, get_pt(electron), electron.eta, electron.phi, electron.mass, copy=False) #FIXME new if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): el_t_p = prompt(el_t) el_t_np = nonprompt(el_t) el_f_p = prompt(el_f) el_f_np = nonprompt(el_f) mu_t_p = prompt(mu_t) mu_t_np = nonprompt(mu_t) mu_f_p = prompt(mu_f) mu_f_np = nonprompt(mu_f) is_flipped = ( (el_t_p.matched_gen.pdgId*(-1) == el_t_p.pdgId) & (abs(el_t_p.pdgId) == 11) ) el_t_p_cc = el_t_p[~is_flipped] # this is tight, prompt, and charge consistent el_t_p_cf = el_t_p[is_flipped] # this is tight, prompt, and charge flipped ## Merge electrons and muons. These are fakeable leptons now lepton = ak.concatenate([muon, electron], axis=1) leading_lepton_idx = ak.singletons(ak.argmax(lepton.p4.pt, axis=1)) leading_lepton = lepton[leading_lepton_idx] trailing_lepton_idx = ak.singletons(ak.argmin(lepton.p4.pt, axis=1)) trailing_lepton = lepton[trailing_lepton_idx] dilepton_mass = (leading_lepton.p4 + trailing_lepton.p4).mass dilepton_pt = (leading_lepton.p4 + trailing_lepton.p4).pt #dilepton_dR = delta_r(leading_lepton, trailing_lepton) dilepton_dR = leading_lepton.p4.delta_r(trailing_lepton.p4) lepton_pdgId_pt_ordered = ak.fill_none(ak.pad_none(lepton[ak.argsort(lepton.p4.pt, ascending=False)].pdgId, 2, clip=True), 0) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): n_nonprompt = getNonPromptFromFlavour(electron) + getNonPromptFromFlavour(muon) n_chargeflip = getChargeFlips(electron, ev.GenPart) + getChargeFlips(muon, ev.GenPart) gp = ev.GenPart gp_e = gp[((abs(gp.pdgId)==11)&(gp.status==1)&((gp.statusFlags&(1<<0))==1)&(gp.statusFlags&(1<<8)==256))] gp_m = gp[((abs(gp.pdgId)==13)&(gp.status==1)&((gp.statusFlags&(1<<0))==1)&(gp.statusFlags&(1<<8)==256))] n_gen_lep = ak.num(gp_e) + ak.num(gp_m) else: n_gen_lep = np.zeros(len(ev)) LL = (n_gen_lep > 2) # this is the classifier for LL events (should mainly be ttZ/tZ/WZ...) mt_lep_met = mt(lepton.p4.pt, lepton.p4.phi, ev.MET.pt, ev.MET.phi) min_mt_lep_met = ak.min(mt_lep_met, axis=1) ## Tau and other stuff tau = getTaus(ev) tau = tau[~match(tau, muon, deltaRCut=0.4)] tau = tau[~match(tau, electron, deltaRCut=0.4)] track = getIsoTracks(ev) ## Jets jet = getJets(ev, minPt=25, maxEta=4.7, pt_var='pt_nom') jet = jet[ak.argsort(jet.pt_nom, ascending=False)] # need to sort wrt smeared and recorrected jet pt jet = jet[~match(jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons jet = jet[~match(jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons central = jet[(abs(jet.eta)<2.4)] btag = getBTagsDeepFlavB(jet, year=self.year) # should study working point for DeepJet light = getBTagsDeepFlavB(jet, year=self.year, invert=True) fwd = getFwdJet(light) fwd_noPU = getFwdJet(light, puId=False) high_score_btag = central[ak.argsort(central.btagDeepFlavB)][:,:2] bl = cross(lepton, high_score_btag) bl_dR = delta_r(bl['0'], bl['1']) min_bl_dR = ak.min(bl_dR, axis=1) ## forward jets j_fwd = fwd[ak.singletons(ak.argmax(fwd.p, axis=1))] # highest momentum spectator # try to get either the most forward light jet, or if there's more than one with eta>1.7, the highest pt one most_fwd = light[ak.argsort(abs(light.eta))][:,0:1] #most_fwd = light[ak.singletons(ak.argmax(abs(light.eta)))] best_fwd = ak.concatenate([j_fwd, most_fwd], axis=1)[:,0:1] jf = cross(j_fwd, jet) mjf = (jf['0']+jf['1']).mass j_fwd2 = jf[ak.singletons(ak.argmax(mjf, axis=1))]['1'] # this is the jet that forms the largest invariant mass with j_fwd delta_eta = abs(j_fwd2.eta - j_fwd.eta) ## MET -> can switch to puppi MET met_pt = ev.MET.pt met_phi = ev.MET.phi ## other variables ht = ak.sum(jet.pt, axis=1) #st = met_pt + ht + ak.sum(get_pt(muon), axis=1) + ak.sum(get_pt(electron), axis=1) st = met_pt + ht + ak.sum(lepton.p4.pt, axis=1) # define the weight weight = Weights( len(ev) ) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): # lumi weight weight.add("weight", ev.weight*cfg['lumi'][self.year]) # PU weight weight.add("PU", ev.puWeight, weightUp=ev.puWeightUp, weightDown=ev.puWeightDown, shift=False) # b-tag SFs weight.add("btag", self.btagSF.Method1a(btag, light)) # lepton SFs weight.add("lepton", self.leptonSF.get(electron, muon)) cutflow = Cutflow(output, ev, weight=weight) # slightly restructured # calculate everything from loose, require two tights on top # since n_tight == n_loose == 2, the tight and loose leptons are the same in the end # in this selection we'll get events with exactly two fakeable+tight and two loose leptons. sel = Selection( dataset = dataset, events = ev, year = self.year, ele = electron, ele_veto = el_v, mu = muon, mu_veto = mu_v, jet_all = jet, jet_central = central, jet_btag = btag, jet_fwd = fwd, jet_light = light, met = ev.MET, ) baseline = sel.dilep_baseline(cutflow=cutflow, SS=True, omit=['N_fwd>0']) baseline_OS = sel.dilep_baseline(cutflow=cutflow, SS=False, omit=['N_fwd>0']) # this is for charge flip estimation if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): BL = (baseline & ((ak.num(el_t_p_cc)+ak.num(mu_t_p))==2)) # this is the MC baseline for events with two tight prompt leptons BL_incl = (baseline & ((ak.num(el_t)+ak.num(mu_t))==2)) # this is the MC baseline for events with two tight leptons np_est_sel_mc = (baseline & \ ((((ak.num(el_t_p_cc)+ak.num(mu_t_p))==1) & ((ak.num(el_f_np)+ak.num(mu_f_np))==1)) | (((ak.num(el_t_p_cc)+ak.num(mu_t_p))==0) & ((ak.num(el_f_np)+ak.num(mu_f_np))==2)) )) # no overlap between tight and nonprompt, and veto on additional leptons. this should be enough np_obs_sel_mc = (baseline & ((ak.num(el_t)+ak.num(mu_t))==2) & ((ak.num(el_t_np)+ak.num(mu_t_np))>=1) ) # two tight leptons, at least one nonprompt np_est_sel_data = (baseline & ~baseline) # this has to be false cf_est_sel_mc = (baseline_OS & ((ak.num(el_t_p)+ak.num(mu_t_p))==2)) cf_obs_sel_mc = (baseline & ((ak.num(el_t)+ak.num(mu_t))==2) & ((ak.num(el_t_p_cf))>=1) ) # two tight leptons, at least one electron charge flip cf_est_sel_data = (baseline & ~baseline) # this has to be false weight_np_mc = self.nonpromptWeight.get(el_f_np, mu_f_np, meas='TT') weight_cf_mc = self.chargeflipWeight.flip_weight(el_t_p) else: BL = (baseline & ((ak.num(el_t)+ak.num(mu_t))==2)) BL_incl = BL np_est_sel_mc = (baseline & ~baseline) np_obs_sel_mc = (baseline & ~baseline) np_est_sel_data = (baseline & (ak.num(el_t)+ak.num(mu_t)==1) & (ak.num(el_f)+ak.num(mu_f)==1) ) cf_est_sel_mc = (baseline & ~baseline) cf_obs_sel_mc = (baseline & ~baseline) cf_est_sel_data = (baseline_OS & ((ak.num(el_t)+ak.num(mu_t))==2) ) weight_np_mc = np.zeros(len(ev)) weight_cf_mc = np.zeros(len(ev)) #rle = ak.to_numpy(ak.zip([ev.run, ev.luminosityBlock, ev.event])) run_ = ak.to_numpy(ev.run) lumi_ = ak.to_numpy(ev.luminosityBlock) event_ = ak.to_numpy(ev.event) if False: output['%s_run'%dataset] += processor.column_accumulator(run_[BL]) output['%s_lumi'%dataset] += processor.column_accumulator(lumi_[BL]) output['%s_event'%dataset] += processor.column_accumulator(event_[BL]) weight_BL = weight.weight()[BL] # this is just a shortened weight list for the two prompt selection weight_np_data = self.nonpromptWeight.get(el_f, mu_f, meas='data') weight_cf_data = self.chargeflipWeight.flip_weight(el_t) out_sel = (BL | np_est_sel_mc | cf_est_sel_mc) dummy = (np.ones(len(ev))==1) def fill_multiple_np(hist, arrays, add_sel=dummy): #reg_sel = [BL, np_est_sel_mc, np_obs_sel_mc, np_est_sel_data, cf_est_sel_mc, cf_obs_sel_mc, cf_est_sel_data], #print ('len', len(reg_sel[0])) #print ('sel', reg_sel[0]) reg_sel = [BL&add_sel, BL_incl&add_sel, np_est_sel_mc&add_sel, np_obs_sel_mc&add_sel, np_est_sel_data&add_sel, cf_est_sel_mc&add_sel, cf_obs_sel_mc&add_sel, cf_est_sel_data&add_sel], fill_multiple( hist, datasets=[ dataset, # only prompt contribution from process dataset+"_incl", # everything from process (inclusive MC truth) "np_est_mc", # MC based NP estimate "np_obs_mc", # MC based NP observation "np_est_data", "cf_est_mc", "cf_obs_mc", "cf_est_data", ], arrays=arrays, selections=reg_sel[0], # no idea where the additional dimension is coming from... weights=[ weight.weight()[reg_sel[0][0]], weight.weight()[reg_sel[0][1]], weight.weight()[reg_sel[0][2]]*weight_np_mc[reg_sel[0][2]], weight.weight()[reg_sel[0][3]], weight.weight()[reg_sel[0][4]]*weight_np_data[reg_sel[0][4]], weight.weight()[reg_sel[0][5]]*weight_cf_mc[reg_sel[0][5]], weight.weight()[reg_sel[0][6]], weight.weight()[reg_sel[0][7]]*weight_cf_data[reg_sel[0][7]], ], ) if self.evaluate or self.dump: # define the inputs to the NN # this is super stupid. there must be a better way. # used a np.stack which is ok performance wise. pandas data frame seems to be slow and memory inefficient #FIXME no n_b, n_fwd back in v13/v14 of the DNN NN_inputs_d = { 'n_jet': ak.to_numpy(ak.num(jet)), 'n_fwd': ak.to_numpy(ak.num(fwd)), 'n_b': ak.to_numpy(ak.num(btag)), 'n_tau': ak.to_numpy(ak.num(tau)), #'n_track': ak.to_numpy(ak.num(track)), 'st': ak.to_numpy(st), 'met': ak.to_numpy(ev.MET.pt), 'mjj_max': ak.to_numpy(ak.fill_none(ak.max(mjf, axis=1),0)), 'delta_eta_jj': ak.to_numpy(pad_and_flatten(delta_eta)), 'lead_lep_pt': ak.to_numpy(pad_and_flatten(leading_lepton.p4.pt)), 'lead_lep_eta': ak.to_numpy(pad_and_flatten(leading_lepton.p4.eta)), 'sublead_lep_pt': ak.to_numpy(pad_and_flatten(trailing_lepton.p4.pt)), 'sublead_lep_eta': ak.to_numpy(pad_and_flatten(trailing_lepton.p4.eta)), 'dilepton_mass': ak.to_numpy(pad_and_flatten(dilepton_mass)), 'dilepton_pt': ak.to_numpy(pad_and_flatten(dilepton_pt)), 'fwd_jet_pt': ak.to_numpy(pad_and_flatten(best_fwd.pt)), 'fwd_jet_p': ak.to_numpy(pad_and_flatten(best_fwd.p)), 'fwd_jet_eta': ak.to_numpy(pad_and_flatten(best_fwd.eta)), 'lead_jet_pt': ak.to_numpy(pad_and_flatten(jet[:, 0:1].pt)), 'sublead_jet_pt': ak.to_numpy(pad_and_flatten(jet[:, 1:2].pt)), 'lead_jet_eta': ak.to_numpy(pad_and_flatten(jet[:, 0:1].eta)), 'sublead_jet_eta': ak.to_numpy(pad_and_flatten(jet[:, 1:2].eta)), 'lead_btag_pt': ak.to_numpy(pad_and_flatten(high_score_btag[:, 0:1].pt)), 'sublead_btag_pt': ak.to_numpy(pad_and_flatten(high_score_btag[:, 1:2].pt)), 'lead_btag_eta': ak.to_numpy(pad_and_flatten(high_score_btag[:, 0:1].eta)), 'sublead_btag_eta': ak.to_numpy(pad_and_flatten(high_score_btag[:, 1:2].eta)), 'min_bl_dR': ak.to_numpy(ak.fill_none(min_bl_dR, 0)), 'min_mt_lep_met': ak.to_numpy(ak.fill_none(min_mt_lep_met, 0)), } if self.dump: for k in NN_inputs_d.keys(): output[k] += processor.column_accumulator(NN_inputs_d[k][out_sel]) if self.evaluate: NN_inputs = np.stack( [NN_inputs_d[k] for k in NN_inputs_d.keys()] ) NN_inputs = np.nan_to_num(NN_inputs, 0, posinf=1e5, neginf=-1e5) # events with posinf/neginf/nan will not pass the BL selection anyway NN_inputs = np.moveaxis(NN_inputs, 0, 1) # this is needed for a np.stack (old version) model, scaler = load_onnx_model(self.training) try: NN_inputs_scaled = scaler.transform(NN_inputs) NN_pred = predict_onnx(model, NN_inputs_scaled) best_score = np.argmax(NN_pred, axis=1) except ValueError: print ("Problem with prediction. Showing the shapes here:") print (np.shape(NN_inputs)) print (np.shape(weight_BL)) NN_pred = np.array([]) best_score = np.array([]) NN_inputs_scaled = NN_inputs raise ##k.clear_session() #FIXME below needs to be fixed again with changed NN evaluation. Should work now fill_multiple_np(output['node'], {'multiplicity':best_score}) fill_multiple_np(output['node0_score_incl'], {'score':NN_pred[:,0]}) fill_multiple_np(output['node1_score_incl'], {'score':NN_pred[:,1]}) fill_multiple_np(output['node2_score_incl'], {'score':NN_pred[:,2]}) fill_multiple_np(output['node3_score_incl'], {'score':NN_pred[:,3]}) fill_multiple_np(output['node4_score_incl'], {'score':NN_pred[:,4]}) fill_multiple_np(output['node0_score'], {'score':NN_pred[:,0]}, add_sel=(best_score==0)) fill_multiple_np(output['node1_score'], {'score':NN_pred[:,1]}, add_sel=(best_score==1)) fill_multiple_np(output['node2_score'], {'score':NN_pred[:,2]}, add_sel=(best_score==2)) fill_multiple_np(output['node3_score'], {'score':NN_pred[:,3]}, add_sel=(best_score==3)) fill_multiple_np(output['node4_score'], {'score':NN_pred[:,4]}, add_sel=(best_score==4)) #SR_sel_pp = ((best_score==0) & ak.flatten((leading_lepton[BL].pdgId<0))) #SR_sel_mm = ((best_score==0) & ak.flatten((leading_lepton[BL].pdgId>0))) #leading_lepton_BL = leading_lepton[BL] #output['lead_lep_SR_pp'].fill( # dataset = dataset, # pt = ak.to_numpy(ak.flatten(leading_lepton_BL[SR_sel_pp].pt)), # weight = weight_BL[SR_sel_pp] #) #output['lead_lep_SR_mm'].fill( # dataset = dataset, # pt = ak.to_numpy(ak.flatten(leading_lepton_BL[SR_sel_mm].pt)), # weight = weight_BL[SR_sel_mm] #) del model del scaler del NN_inputs, NN_inputs_scaled, NN_pred labels = {'topW_v3': 0, 'TTW':1, 'TTZ': 2, 'TTH': 3, 'ttbar': 4, 'rare':5, 'diboson':6} # these should be all? if dataset in labels: label_mult = labels[dataset] else: label_mult = 7 # data or anything else if self.dump: output['label'] += processor.column_accumulator(np.ones(len(ev[out_sel])) * label_mult) output['SS'] += processor.column_accumulator(ak.to_numpy(BL[out_sel])) output['OS'] += processor.column_accumulator(ak.to_numpy(cf_est_sel_mc[out_sel])) output['AR'] += processor.column_accumulator(ak.to_numpy(np_est_sel_mc[out_sel])) output['LL'] += processor.column_accumulator(ak.to_numpy(LL[out_sel])) output['weight'] += processor.column_accumulator(ak.to_numpy(weight.weight()[out_sel])) output['weight_np'] += processor.column_accumulator(ak.to_numpy(weight_np_mc[out_sel])) output['weight_cf'] += processor.column_accumulator(ak.to_numpy(weight_cf_mc[out_sel])) # first, make a few super inclusive plots output['PV_npvs'].fill(dataset=dataset, multiplicity=ev.PV[BL].npvs, weight=weight_BL) output['PV_npvsGood'].fill(dataset=dataset, multiplicity=ev.PV[BL].npvsGood, weight=weight_BL) fill_multiple_np(output['N_jet'], {'multiplicity': ak.num(jet)}) fill_multiple_np(output['N_b'], {'multiplicity': ak.num(btag)}) fill_multiple_np(output['N_central'], {'multiplicity': ak.num(central)}) fill_multiple_np(output['N_ele'], {'multiplicity':ak.num(electron)}) fill_multiple_np(output['N_mu'], {'multiplicity':ak.num(muon)}) fill_multiple_np(output['N_fwd'], {'multiplicity':ak.num(fwd)}) fill_multiple_np(output['ST'], {'ht': st}) fill_multiple_np(output['HT'], {'ht': ht}) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): output['nLepFromTop'].fill(dataset=dataset, multiplicity=ev[BL].nLepFromTop, weight=weight_BL) output['nLepFromTau'].fill(dataset=dataset, multiplicity=ev.nLepFromTau[BL], weight=weight_BL) output['nLepFromZ'].fill(dataset=dataset, multiplicity=ev.nLepFromZ[BL], weight=weight_BL) output['nLepFromW'].fill(dataset=dataset, multiplicity=ev.nLepFromW[BL], weight=weight_BL) output['nGenTau'].fill(dataset=dataset, multiplicity=ev.nGenTau[BL], weight=weight_BL) output['nGenL'].fill(dataset=dataset, multiplicity=ak.num(ev.GenL[BL], axis=1), weight=weight_BL) output['chargeFlip_vs_nonprompt'].fill(dataset=dataset, n1=n_chargeflip[BL], n2=n_nonprompt[BL], n_ele=ak.num(electron)[BL], weight=weight_BL) fill_multiple_np(output['MET'], {'pt':ev.MET.pt, 'phi':ev.MET.phi}) if not re.search(re.compile('MuonEG|DoubleMuon|DoubleEG|EGamma'), dataset): output['lead_gen_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(leading_gen_lep[BL].pt)), eta = ak.to_numpy(ak.flatten(leading_gen_lep[BL].eta)), phi = ak.to_numpy(ak.flatten(leading_gen_lep[BL].phi)), weight = weight_BL ) output['trail_gen_lep'].fill( dataset = dataset, pt = ak.to_numpy(ak.flatten(trailing_gen_lep[BL].pt)), eta = ak.to_numpy(ak.flatten(trailing_gen_lep[BL].eta)), phi = ak.to_numpy(ak.flatten(trailing_gen_lep[BL].phi)), weight = weight_BL ) fill_multiple_np( output['lead_lep'], { 'pt': pad_and_flatten(leading_lepton.p4.pt), 'eta': pad_and_flatten(leading_lepton.eta), 'phi': pad_and_flatten(leading_lepton.phi), }, ) fill_multiple_np( output['trail_lep'], { 'pt': pad_and_flatten(trailing_lepton.p4.pt), 'eta': pad_and_flatten(trailing_lepton.eta), 'phi': pad_and_flatten(trailing_lepton.phi), }, ) output['j1'].fill( dataset = dataset, pt = ak.flatten(jet.pt_nom[:, 0:1][BL]), eta = ak.flatten(jet.eta[:, 0:1][BL]), phi = ak.flatten(jet.phi[:, 0:1][BL]), weight = weight_BL ) output['j2'].fill( dataset = dataset, pt = ak.flatten(jet[:, 1:2][BL].pt_nom), eta = ak.flatten(jet[:, 1:2][BL].eta), phi = ak.flatten(jet[:, 1:2][BL].phi), weight = weight_BL ) output['j3'].fill( dataset = dataset, pt = ak.flatten(jet[:, 2:3][BL].pt_nom), eta = ak.flatten(jet[:, 2:3][BL].eta), phi = ak.flatten(jet[:, 2:3][BL].phi), weight = weight_BL ) fill_multiple_np( output['fwd_jet'], { 'pt': pad_and_flatten(best_fwd.pt), 'eta': pad_and_flatten(best_fwd.eta), 'phi': pad_and_flatten(best_fwd.phi), }, ) #output['fwd_jet'].fill( # dataset = dataset, # pt = ak.flatten(j_fwd[BL].pt), # eta = ak.flatten(j_fwd[BL].eta), # phi = ak.flatten(j_fwd[BL].phi), # weight = weight_BL #) output['high_p_fwd_p'].fill(dataset=dataset, p = ak.flatten(best_fwd[BL].p), weight = weight_BL) return output
def get(self, ele, mu, variation='central'): if self.year == 2016: ele_sf_reco = self.evaluator["ele_2016_reco"](ele[ele.pt > 20].eta, ele[ele.pt > 20].pt) ele_sf_reco_low = self.evaluator["ele_2016_reco_low"]( ele[ele.pt <= 20].eta, ele[ele.pt <= 20].pt) ele_sf_loose = self.evaluator["ele_2016_loose"]( abs(ele.eta + ele.deltaEtaSC), ele.pt) ele_sf_looseTTH = self.evaluator["ele_2016_looseTTH"]( abs(ele.eta + ele.deltaEtaSC), ele.pt) ele_sf_tight = self.evaluator["ele_2016_tight"]( abs(ele.eta + ele.deltaEtaSC), ele.pt) mu_sf_loose = self.evaluator["mu_2016_loose"](abs(mu.eta), mu.pt) mu_sf_tight = self.evaluator["mu_2016_tight"](abs(mu.eta), mu.pt) sf = ak.prod(ele_sf_reco, axis=1) * ak.prod( ele_sf_reco_low, axis=1) * ak.prod(ele_sf_loose, axis=1) * ak.prod( ele_sf_looseTTH, axis=1) * ak.prod( ele_sf_tight, axis=1) * ak.prod( mu_sf_loose, axis=1) * ak.prod(mu_sf_tight, axis=1) elif self.year == 2017: ele_sf_reco = self.evaluator["ele_2017_reco"](ele[ele.pt > 20].eta, ele[ele.pt > 20].pt) ele_sf_reco_low = self.evaluator["ele_2017_reco_low"]( ele[ele.pt <= 20].eta, ele[ele.pt <= 20].pt) ele_sf_loose = self.evaluator["ele_2017_loose"]( abs(ele.eta + ele.deltaEtaSC), ele.pt) ele_sf_looseTTH = self.evaluator["ele_2017_looseTTH"]( abs(ele.eta + ele.deltaEtaSC), ele.pt) ele_sf_tight = self.evaluator["ele_2017_tight"]( abs(ele.eta + ele.deltaEtaSC), ele.pt) mu_sf_loose = self.evaluator["mu_2017_loose"](abs(mu.eta), mu.pt) mu_sf_tight = self.evaluator["mu_2017_tight"](abs(mu.eta), mu.pt) sf = ak.prod(ele_sf_reco, axis=1) * ak.prod( ele_sf_reco_low, axis=1) * ak.prod(ele_sf_loose, axis=1) * ak.prod( ele_sf_looseTTH, axis=1) * ak.prod( ele_sf_tight, axis=1) * ak.prod( mu_sf_loose, axis=1) * ak.prod(mu_sf_tight, axis=1) elif self.year == 2018: ele_sf_reco = self.evaluator["ele_2018_reco"](ele.eta, ele.pt) ele_sf_loose = self.evaluator["ele_2018_loose"]( abs(ele.eta + ele.deltaEtaSC), ele.pt) ele_sf_looseTTH = self.evaluator["ele_2018_looseTTH"]( abs(ele.eta + ele.deltaEtaSC), ele.pt) ele_sf_tight = self.evaluator["ele_2018_tight"]( abs(ele.eta + ele.deltaEtaSC), ele.pt) mu_sf_loose = self.evaluator["mu_2018_loose"](abs(mu.eta), mu.pt) mu_sf_tight = self.evaluator["mu_2018_tight"](abs(mu.eta), mu.pt) if not variation == 'central': ele_sf_tight_err1 = self.evaluator["ele_2018_tight_eta"]( abs(ele.eta + ele.deltaEtaSC)) ele_sf_tight_err2 = self.evaluator["ele_2018_tight_pt"](ele.pt) ele_sf_tight_err1 = ak.from_regular( ele_sf_tight_err1[:, :, np.newaxis]) ele_sf_tight_err2 = ak.from_regular( ele_sf_tight_err2[:, :, np.newaxis]) ele_sf_tight_err = ak.max(ak.concatenate( [ele_sf_tight_err1, ele_sf_tight_err2], axis=2), axis=2) mu_sf_tight_err1 = self.evaluator["mu_2018_tight_eta"](abs( mu.eta)) mu_sf_tight_err2 = self.evaluator["mu_2018_tight_pt"](mu.pt) mu_sf_tight_err1 = ak.from_regular( mu_sf_tight_err1[:, :, np.newaxis]) mu_sf_tight_err2 = ak.from_regular( mu_sf_tight_err2[:, :, np.newaxis]) mu_sf_tight_err = ak.max(ak.concatenate( [mu_sf_tight_err1, mu_sf_tight_err2], axis=2), axis=2) if variation == 'up': ele_sf_tight = ele_sf_tight * ele_sf_tight_err mu_sf_tight = mu_sf_tight * mu_sf_tight_err if variation == 'down': ele_sf_tight = ele_sf_tight / ele_sf_tight_err mu_sf_tight = mu_sf_tight / mu_sf_tight_err sf = ak.prod(ele_sf_reco, axis=1) * ak.prod( ele_sf_loose, axis=1) * ak.prod( ele_sf_looseTTH, axis=1) * ak.prod( ele_sf_tight, axis=1) * ak.prod( mu_sf_loose, axis=1) * ak.prod(mu_sf_tight, axis=1) return sf
def argon_kshell_cuts_3kV(events): #4) largest_s2_amplitude_pmt < 11600 events = events[ak.all(events.s2.height_mvdc_bot < 11600, axis=1)] #5)second_largest_s2_area_pmt = 0 events = events[events.s2s_per_waveform == 1] #6)largest_s1_area_pmt >0 events = events[ak.any(events.s1.area_pe_bot > 0, axis=1)] #7)largest_s2_area_pmt > largest_s1_area_pmt event = events[ak.max(events.s2.area_pe_bot, axis=1) > ak.max( events.s1.area_pe_bot, axis=1)] #8) 0.20 < s2_frt < 0.34 events["s2", "s2_frt"] = events.s2.area_pe_top / (events.s2.area_pe_top + events.s2.area_pe_bot) mask = ak.any(events.s2.s2_frt > 0.16, axis=1) & ak.any( events.s2.s2_frt < 0.34, axis=1) events = events[mask] #9)s2_area+s1_area>1175 mask = ak.max(events.s2.area_pe_top,axis=1)+ak.max(events.s2.area_pe_bot,axis=1)+ak.max(events.s1.area_pe_top,axis=1)+\ ak.max(events.s1.area_pe_bot,axis=1)>1175 events = events[mask] #print("Length array pos 1") #print(ak.num(events,axis=0)) if ak.num(events, axis=0) == 0: return ak.Array([]) #Add the drifttime mask_s1_before_s2 = events.s1.pos_bot < ak.flatten(events.s2.pos_bot) #Cut away the event which don't have an s1 befor the s2. these are s2 only. events = events[ak.any(events.s1.area_pe_bot[mask_s1_before_s2], axis=1)] #print("Length array pos 2") #print(ak.num(events,axis=0)) if ak.num(events, axis=0) == 0: return ak.Array([]) mask_s1_before_s2 = events.s1.pos_bot < ak.flatten(events.s2.pos_bot) max_s1_before_s2 = ak.argmax(events.s1.area_pe_bot[mask_s1_before_s2], axis=1, keepdims=True) events["mask_max_s1_before_s2"] = max_s1_before_s2 events["drifttime_musec"] = ak.flatten( (ak.max(events.s2.pos_bot, axis=1) - events.s1.pos_bot[max_s1_before_s2]) / 100) #10) S2 width cut lower_width = 34.5 + 0.22 * events["drifttime_musec"] + np.sqrt( 32.6 * events["drifttime_musec"]) upper_width = 48.2 + 0.19 * events["drifttime_musec"] + np.sqrt( 47.3 * events["drifttime_musec"]) upper_mask = ak.any(events.s2.width_bot < upper_width, axis=1) lower_mask = ak.any(events.s2.width_bot > lower_width, axis=1) events = events[upper_mask & lower_mask] #11)FDV cut v = 1.9608556663165941 gate_time = 1.537109944449019 events["z"] = -v * (events["drifttime_musec"] - gate_time) events = events[events.z < -2] events = events[events.z > -28] events["r_s2"] = ak.flatten( np.sqrt(np.square(events.s2.x_corr) + np.square(events.s2.y_corr))) events = events[events.r_s2 < 10] return events
def process(self, events): dataset = events.metadata['dataset'] isRealData = not hasattr(events, "genWeight") selection = PackedSelection() weights = Weights(len(events)) output = self.accumulator.identity() if not isRealData: output['sumw'][dataset] += ak.sum(events.genWeight) if isRealData: trigger = np.zeros(len(events), dtype='bool') for t in self._triggers[self._year]: trigger = trigger | events.HLT[t] else: trigger = np.ones(len(events), dtype='bool') selection.add('trigger', trigger) if isRealData: trigger = np.zeros(len(events), dtype='bool') for t in self._muontriggers[self._year]: trigger = trigger | events.HLT[t] else: trigger = np.ones(len(events), dtype='bool') selection.add('muontrigger', trigger) fatjets = events.FatJet fatjets['msdcorr'] = corrected_msoftdrop(fatjets) fatjets['qcdrho'] = 2 * np.log(fatjets.msdcorr / fatjets.pt) fatjets['n2ddt'] = fatjets.n2b1 - n2ddt_shift(fatjets, year=self._year) fatjets['msdcorr_full'] = fatjets['msdcorr'] * self._msdSF[self._year] candidatejet = fatjets[ # https://github.com/DAZSLE/BaconAnalyzer/blob/master/Analyzer/src/VJetLoader.cc#L269 (fatjets.pt > 200) & (abs(fatjets.eta) < 2.5) & fatjets.isTight # this is loose in sampleContainer ] if self._jet_arbitration == 'pt': candidatejet = ak.firsts(candidatejet) elif self._jet_arbitration == 'mass': candidatejet = candidatejet[ak.argmax(candidatejet.msdcorr)] elif self._jet_arbitration == 'n2': candidatejet = candidatejet[ak.argmin(candidatejet.n2ddt)] elif self._jet_arbitration == 'ddb': candidatejet = candidatejet[ak.argmax(candidatejet.btagDDBvL)] else: raise RuntimeError("Unknown candidate jet arbitration") selection.add('minjetkin', (candidatejet.pt >= 450) & (candidatejet.msdcorr >= 40.) & (abs(candidatejet.eta) < 2.5)) selection.add('jetacceptance', (candidatejet.msdcorr >= 47.) & (candidatejet.pt < 1200) & (candidatejet.msdcorr < 201.)) selection.add('jetid', candidatejet.isTight) selection.add('n2ddt', (candidatejet.n2ddt < 0.)) selection.add('ddbpass', (candidatejet.btagDDBvL >= 0.89)) jets = events.Jet[(events.Jet.pt > 30.) & (abs(events.Jet.eta) < 2.5) & events.Jet.isTight] # only consider first 4 jets to be consistent with old framework jets = jets[:, :4] dphi = abs(jets.delta_phi(candidatejet)) selection.add( 'antiak4btagMediumOppHem', ak.max( jets[dphi > np.pi / 2].btagDeepB, axis=1, mask_identity=False) < BTagEfficiency.btagWPs[self._year]['medium']) ak4_away = jets[dphi > 0.8] selection.add( 'ak4btagMedium08', ak.max(ak4_away.btagDeepB, axis=1, mask_identity=False) > BTagEfficiency.btagWPs[self._year]['medium']) selection.add('met', events.MET.pt < 140.) goodmuon = ((events.Muon.pt > 10) & (abs(events.Muon.eta) < 2.4) & (events.Muon.pfRelIso04_all < 0.25) & events.Muon.looseId) nmuons = ak.sum(goodmuon, axis=1) leadingmuon = ak.firsts(events.Muon[goodmuon]) nelectrons = ak.sum( (events.Electron.pt > 10) & (abs(events.Electron.eta) < 2.5) & (events.Electron.cutBased >= events.Electron.LOOSE), axis=1, ) ntaus = ak.sum( (events.Tau.pt > 20) & events.Tau.idDecayMode, # bacon iso looser than Nano selection axis=1, ) selection.add('noleptons', (nmuons == 0) & (nelectrons == 0) & (ntaus == 0)) selection.add('onemuon', (nmuons == 1) & (nelectrons == 0) & (ntaus == 0)) selection.add('muonkin', (leadingmuon.pt > 55.) & (abs(leadingmuon.eta) < 2.1)) selection.add('muonDphiAK8', abs(leadingmuon.delta_phi(candidatejet)) > 2 * np.pi / 3) if isRealData: genflavor = 0 else: weights.add('genweight', events.genWeight) add_pileup_weight(weights, events.Pileup.nPU, self._year, dataset) bosons = getBosons(events.GenPart) matchedBoson = candidatejet.nearest(bosons, axis=None, threshold=0.8) genflavor = bosonFlavor(matchedBoson) genBosonPt = ak.fill_none(ak.firsts(bosons.pt), 0) add_VJets_NLOkFactor(weights, genBosonPt, self._year, dataset) add_jetTriggerWeight(weights, candidatejet.msdcorr, candidatejet.pt, self._year) output['btagWeight'].fill(dataset=dataset, val=self._btagSF.addBtagWeight( weights, ak4_away)) logger.debug("Weight statistics: %r" % weights.weightStatistics) msd_matched = candidatejet.msdcorr * self._msdSF[self._year] * ( genflavor > 0) + candidatejet.msdcorr * (genflavor == 0) regions = { 'signal': [ 'trigger', 'minjetkin', 'jetacceptance', 'jetid', 'n2ddt', 'antiak4btagMediumOppHem', 'met', 'noleptons' ], 'muoncontrol': [ 'muontrigger', 'minjetkin', 'jetacceptance', 'jetid', 'n2ddt', 'ak4btagMedium08', 'onemuon', 'muonkin', 'muonDphiAK8' ], 'noselection': [], } for region, cuts in regions.items(): allcuts = set() output['cutflow'].fill(dataset=dataset, region=region, genflavor=genflavor, cut=0, weight=weights.weight()) for i, cut in enumerate(cuts + ['ddbpass']): allcuts.add(cut) cut = selection.all(*allcuts) output['cutflow'].fill(dataset=dataset, region=region, genflavor=genflavor[cut], cut=i + 1, weight=weights.weight()[cut]) systematics = [ None, 'jet_triggerUp', 'jet_triggerDown', 'btagWeightUp', 'btagWeightDown', 'btagEffStatUp', 'btagEffStatDown', ] def normalize(val, cut): return ak.to_numpy(ak.fill_none(val[cut], np.nan)) def fill(region, systematic, wmod=None): selections = regions[region] cut = selection.all(*selections) sname = 'nominal' if systematic is None else systematic if wmod is None: weight = weights.weight(modifier=systematic)[cut] else: weight = weights.weight()[cut] * wmod[cut] output['templates'].fill( dataset=dataset, region=region, systematic=sname, genflavor=genflavor[cut], pt=normalize(candidatejet.pt, cut), msd=normalize(msd_matched, cut), ddb=normalize(candidatejet.btagDDBvL, cut), weight=weight, ) if wmod is not None: output['genresponse_noweight'].fill( dataset=dataset, region=region, systematic=sname, pt=normalize(candidatejet.pt, cut), genpt=normalize(genBosonPt, cut), weight=events.genWeight[cut] * wmod[cut], ) output['genresponse'].fill( dataset=dataset, region=region, systematic=sname, pt=normalize(candidatejet.pt, cut), genpt=normalize(genBosonPt, cut), weight=weight, ) for region in regions: cut = selection.all(*(set(regions[region]) - {'n2ddt'})) output['nminus1_n2ddt'].fill( dataset=dataset, region=region, n2ddt=normalize(candidatejet.n2ddt, cut), weight=weights.weight()[cut], ) for systematic in systematics: fill(region, systematic) if 'GluGluHToBB' in dataset: for i in range(9): fill(region, 'LHEScale_%d' % i, events.LHEScaleWeight[:, i]) for c in events.LHEWeight.columns[1:]: fill(region, 'LHEWeight_%s' % c, events.LHEWeight[c]) output["weightStats"] = weights.weightStatistics return output