def write_events(self, mass, coupling, energy, filename=None, numberevent=10, zfront=0, seed=None): #set random seed random.seed(seed) # get weighted sample of LLPs _, _, _, energies, weights, thetas = self.get_events(mass=mass, energy=energy, couplings = [coupling]) weighted_raw_data = np.array([energies[0], thetas[0]]).T # unweight sample unweighted_raw_data = random.choices(weighted_raw_data, weights=weights[0], k=numberevent) eventweight = sum(weights[0])/float(numberevent) # setup decay channels modes = self.model.br_functions.keys() branchings = [float(self.model.get_br(mode,mass,coupling)) for mode in modes] finalstates = [self.model.br_finalstate[mode] for mode in modes] channels = [[fs, br] for mode, br, fs in zip(modes, branchings, finalstates)] br_other = 1-sum(branchings) if br_other>0: channels.append([None, br_other]) channels=np.array(channels).T # get LLP momenta and decay location unweighted_data = [] for en, theta in unweighted_raw_data: # momentum phi= random.uniform(-math.pi,math.pi) mom = math.sqrt(en**2-mass**2) pz, pt = mom*np.cos(theta), mom*np.sin(theta) px, py = pt*np.cos(phi), pt*np.sin(phi) momentum = LorentzVector(px,py,pz,en) # position posx = theta*self.distance*np.cos(phi) posy = theta*self.distance*np.sin(phi) posz = random.uniform(0,self.length) post = 3.0e8 * np.sqrt(posz**2 + posy**2 + posz**2) position = LorentzVector(posx,posy,posz,post) # determine choice of final state pids = random.choices(channels[0], weights=channels[1], k=1)[0] pids, finalstate = self.decay_llp(momentum, pids) # save unweighted_data.append([eventweight, position, momentum, pids, finalstate]) # set output filename dirname = self.model.modelpath+"model/events/" if not os.path.exists(dirname): os.mkdir(dirname) if filename==None: filename = dirname+str(mass)+"_"+str(coupling)+".hepmc" else: filename = self.model.modelpath + filename # write to HEPMC file self.write_hepmc_file(filename=filename, data=unweighted_data, zfront=zfront)
def twobody_decay(self, p0, m0, m1, m2, phi, costheta): """ function that decays p0 > p1 p2 and returns p1,p2 """ #get axis of p0 zaxis=Vector3D(0,0,1) rotaxis=zaxis.cross(p0.vector).unit() rotangle=zaxis.angle(p0.vector) #energy and momentum of p2 in the rest frame of p0 energy1 = (m0*m0+m1*m1-m2*m2)/(2.*m0) energy2 = (m0*m0-m1*m1+m2*m2)/(2.*m0) momentum1 = math.sqrt(energy1*energy1-m1*m1) momentum2 = math.sqrt(energy2*energy2-m2*m2) #4-momentum of p1 and p2 in the rest frame of p0 en1 = energy1 pz1 = momentum1 * costheta py1 = momentum1 * math.sqrt(1.-costheta*costheta) * np.sin(phi) px1 = momentum1 * math.sqrt(1.-costheta*costheta) * np.cos(phi) p1=LorentzVector(-px1,-py1,-pz1,en1) if rotangle!=0: p1=p1.rotate(rotangle,rotaxis) en2 = energy2 pz2 = momentum2 * costheta py2 = momentum2 * math.sqrt(1.-costheta*costheta) * np.sin(phi) px2 = momentum2 * math.sqrt(1.-costheta*costheta) * np.cos(phi) p2=LorentzVector(px2,py2,pz2,en2) if rotangle!=0: p2=p2.rotate(rotangle,rotaxis) #boost p2 in p0 restframe p1_=p1.boost(-1.*p0.boostvector) p2_=p2.boost(-1.*p0.boostvector) return p1_,p2_
def MM2_calculator(df, B_M_nominal=5279.5): """Main function to calculate COM values, input is df, and output is same df but with added columns for the COM values""" # set up flight related vectors df['pv_vector'] = df.apply(lambda x: array([x.TwoBody_OWNPV_X, x.TwoBody_OWNPV_Y, x.TwoBody_OWNPV_Z]), axis=1) df['sv_vector'] = df.apply(lambda x: array([x.TwoBody_ENDVERTEX_X, x.TwoBody_ENDVERTEX_Y, x.TwoBody_ENDVERTEX_Z]), axis=1) df['flight'] = df.apply(lambda x: x.sv_vector - x.pv_vector, axis=1) df['tan_theta'] = df.apply(lambda x: mag(perp(x.flight) / x.flight[-1]), axis=1) # this is setting the 4momentum of the B meson from kinematic info from Two Body vertex df['p4B'] = df.apply(lambda x: LorentzVector(x.TwoBody_PX, x.TwoBody_PY, x.TwoBody_PZ, x.TwoBody_PE), axis=1) # PT estimate based on reconstructed mass and flight vector df['pt_est'] = df.apply(lambda x: (B_M_nominal / x.TwoBody_M) * x.tan_theta * x.TwoBody_PZ, axis=1) # calculating the eta and phi of the flight vector df['flight_eta'] = df.apply(lambda x: eta(Vector3D(x.flight[0], x.flight[1], x.flight[2]).unit()), axis=1) df['flight_phi'] = df.apply(lambda x: Vector3D(x.flight[0], x.flight[1], x.flight[2]).unit().phi(), axis=1) # estimated B candidate for this estimated momentum, measured flight direction and expected true B mass df['p4B_est'] = df.apply(lambda x: my_SetPtEtaPhiM(x.pt_est, x.flight_eta, x.flight_phi, B_M_nominal), axis=1) # estimating the boost needed to get to the B's rest frame df['boost_est'] = df.apply(lambda x: boostvector(x.p4B_est), axis=1) # calculating the missing mass^2 - this can go negative with resolution df['mm2'] = df.apply(lambda x: (x.p4B_est - x.p4B).mass2, axis=1) return df
def setpxpypzm(px, py, pz, m): """Creates a Lorentz four momentum vector using the 3momentum and mass as inputs. I have to define this myself rather than use function in the skhep library as that one doesnt output the four vector but updates the parameters which messes up the .apply method""" self = LorentzVector() self.x = px; self.y = py; self.z = pz if m > 0.: self.t = sqrt(px ** 2 + py ** 2 + pz ** 2 + m ** 2) else: self.t = sqrt(px ** 2 + py ** 2 + pz ** 2 - m ** 2) return self
def my_SetPtEtaPhiM(pt, eta, phi, m): """ Create a Lorentz 4-momentum vector defined from the transverse momentum, the pseudorapidity, the angle phi and the mass.""" px, py, pz = pt * cos(phi), pt * sin(phi), pt * sinh(eta) self = LorentzVector() self.x = px; self.y = py; self.z = pz if m > 0.: self.t = sqrt(px ** 2 + py ** 2 + pz ** 2 + m ** 2) else: self.t = sqrt(px ** 2 + py ** 2 + pz ** 2 - m ** 2) return self
def decay_in_restframe_2body(self, br, m0, m1, m2, nsample): # prepare output particles, weights = [], [] #create parent 4-vector p_mother=LorentzVector(0,0,0,m0) #MC sampling of angles for i in range(nsample): cos =random.uniform(-1.,1.) phi =random.uniform(-math.pi,math.pi) p_1,p_2=self.twobody_decay(p_mother,m0,m1,m2,phi,cos) particles.append(p_2) weights.append(br/nsample) return particles,weights
def convert_list_to_momenta(self,filename,mass,filetype="txt",nsample=1,preselectioncut=None, nocuts=False): if filetype=="txt": list_logth, list_logp, list_xs = self.readfile(filename).T elif filetype=="npy": list_logth, list_logp, list_xs = np.load(filename) else: print ("ERROR: cannot rtead file type") particles=[] weights =[] for logth,logp,xs in zip(list_logth,list_logp, list_xs): if nocuts==False and xs < 10.**-6: continue p = 10.**logp th = 10.**logth if nocuts==False and preselectioncut is not None: if not eval(preselectioncut): continue for n in range(nsample): phi= random.uniform(-math.pi,math.pi) if nsample == 1: fth, fp = 1,1 else: fth = np.random.normal(1, 0.05, 1)[0] fp = np.random.normal(1, 0.05, 1)[0] th_sm=th*fth p_sm=p*fp en = math.sqrt(p_sm**2+mass**2) pz = p_sm*np.cos(th_sm) pt = p_sm*np.sin(th_sm) px = pt*np.cos(phi) py = pt*np.sin(phi) part=LorentzVector(px,py,pz,en) particles.append(part) weights.append(xs/float(nsample)) return particles,weights
def decay_in_restframe_3body(self, br, coupling, m0, m1, m2, m3, nsample): # prepare output particles, weights = [], [] #create parent 4-vector p_mother=LorentzVector(0,0,0,m0) #integration boundary q2min,q2max = (m2+m3)**2,(m0-m1)**2 cthmin,cthmax = -1 , 1 mass = m2 #numerical integration integral=0 for i in range(nsample): #Get kinematic Variables q2 = random.uniform(q2min,q2max) cth = random.uniform(-1,1) th = np.arccos(cth) q = math.sqrt(q2) #decay meson and V cosQ =cth phiQ =random.uniform(-math.pi,math.pi) cosM =random.uniform(-1.,1.) phiM =random.uniform(-math.pi,math.pi) p_1,p_q=self.twobody_decay(p_mother,m0 ,m1,q ,phiM,cosM) p_2,p_3=self.twobody_decay(p_q ,q ,m2,m3 ,phiQ,cosQ) #branching fraction brval = eval(br) brval *= (q2max-q2min)*(cthmax-cthmin)/float(nsample) #save particles.append(p_3) weights.append(brval) return particles,weights
len(Xplot[Xplot[:, :, 2] < 0.00012]) plt.hist(gen_out[gen_out[:, :, 2] < 0.0001][:, 0], bins[0], histtype='step', label='Real', color='red') real_masses = [] real_pt = [] gen_masses = [] gen_pt = [] for i in tqdm(range(num_samples)): jetv = LorentzVector() for part in Xplot[i]: vec = LorentzVector() vec.setptetaphim(part[2], part[0], part[1], 0) jetv += vec real_masses.append(jetv.mass) real_pt.append(jetv.pt) for i in tqdm(range(num_samples)): jetv = LorentzVector() for part in gen_out[i]: vec = LorentzVector() if part[2] >= 0.0001:
def calc_w1(args, X, G, dist, losses, X_loaded=None): print("evaluating 1-WD") num_batches = np.array(100000 / np.array(args.w1_num_samples), dtype=int) # num_batches = [5, 5, 5] G.eval() N = len(X) for k in range(len(args.w1_num_samples)): print("Num Samples: " + str(args.w1_num_samples[k])) w1s = [] if args.jf: w1js = [] for j in tqdm(range(num_batches[k])): gen_out = utils.gen(args, G, dist=dist, num_samples=args.batch_size, X_loaded=X_loaded).cpu().detach().numpy() for i in range(int(args.w1_num_samples[k] / args.batch_size)): gen_out = np.concatenate((gen_out, utils.gen(args, G, dist=dist, num_samples=args.batch_size, X_loaded=X_loaded).cpu().detach().numpy()), 0) gen_out = gen_out[:args.w1_num_samples[k]] sample = X[rng.choice(N, size=args.w1_num_samples[k])].cpu().detach().numpy() w1 = [] for i in range(3): w1.append(wasserstein_distance(sample[:, :, i].reshape(-1), gen_out[:, :, i].reshape(-1))) w1s.append(w1) if args.jf: realj = [] genj = [] for i in range(args.w1_num_samples[k]): jetv = LorentzVector() for part in sample[i]: vec = LorentzVector() vec.setptetaphim(part[2], part[0], part[1], 0) jetv += vec realj.append([jetv.mass, jetv.pt]) for i in range(args.w1_num_samples[k]): jetv = LorentzVector() for part in gen_out[i]: vec = LorentzVector() vec.setptetaphim(part[2], part[0], part[1], 0) jetv += vec genj.append([jetv.mass, jetv.pt]) w1j = [] for i in range(len(args.jet_features)): w1j.append(wasserstein_distance(np.array(realj)[:, i], np.array(genj)[:, i])) w1js.append(w1j) losses['w1_' + str(args.w1_num_samples[k]) + 'm'].append(np.mean(np.array(w1s), axis=0)) losses['w1_' + str(args.w1_num_samples[k]) + 'std'].append(np.std(np.array(w1s), axis=0)) if args.jf: losses['w1j_' + str(args.w1_num_samples[k]) + 'm'].append(np.mean(np.array(w1js), axis=0)) losses['w1j_' + str(args.w1_num_samples[k]) + 'std'].append(np.std(np.array(w1js), axis=0))
def save_sample_outputs(args, D, G, X, dist, name, epoch, losses, X_loaded=None): print("drawing figs") plt.rcParams.update({'font.size': 16}) plt.style.use(hep.style.CMS) # if(args.fid): plt.suptitle("FID: " + str(losses['fid'][-1])) # noise = torch.load(args.noise_path + args.noise_file_name).to(args.device) G.eval() gen_out = utils.gen(args, G, dist=dist, num_samples=args.batch_size, X_loaded=X_loaded).cpu().detach().numpy() for i in range(int(args.num_samples / args.batch_size)): gen_out = np.concatenate( (gen_out, utils.gen(args, G, dist=dist, num_samples=args.batch_size, X_loaded=X_loaded).cpu().detach().numpy()), 0) gen_out = gen_out[:args.num_samples] if args.coords == 'cartesian': labels = ['$p_x$ (GeV)', '$p_y$ (GeV)', '$p_z$ (GeV)'] bin = np.arange(-500, 500, 10) bins = [bin, bin, bin] elif args.coords == 'polarrel': labels = ['$\eta^{rel}$', '$\phi^{rel}$', '$p_T^{rel}$'] if args.jets == 'g': bins = [ np.arange(-0.3, 0.3, 0.005), np.arange(-0.3, 0.3, 0.005), np.arange(0, 0.2, 0.002) ] elif args.jets == 't': bins = [ np.arange(-0.5, 0.5, 0.005), np.arange(-0.5, 0.5, 0.005), np.arange(0, 0.2, 0.002) ] elif args.coords == 'polarrelabspt': labels = ['$\eta^{rel}$', '$\phi^{rel}$', '$p_T (GeV)$'] bins = [ np.arange(-0.5, 0.5, 0.01), np.arange(-0.5, 0.5, 0.01), np.arange(0, 400, 4) ] labelsj = ['mass (GeV)', '$p_T (GeV)'] # print(X) # print(X.shape) if args.coords == 'cartesian': Xplot = X.cpu().detach().numpy() * args.maxp / args.norm gen_out = gen_out * args.maxp / args.norm else: Xplot = X.cpu().detach().numpy() Xplot = Xplot / args.norm Xplot[:, :, 2] += 0.5 Xplot *= args.maxepp gen_out = gen_out / args.norm gen_out[:, :, 2] += 0.5 gen_out *= args.maxepp for i in range(args.num_samples): for j in range(args.num_hits): if gen_out[i][j][2] < 0: gen_out[i][j][2] = 0 print(Xplot.shape) print(gen_out.shape) print(Xplot[0][:10]) print(gen_out[0][:10]) real_masses = [] gen_masses = [] for i in range(args.num_samples): jetv = LorentzVector() for part in Xplot[i]: vec = LorentzVector() vec.setptetaphim(part[2], part[0], part[1], 0) jetv += vec real_masses.append(jetv.mass) for i in range(args.num_samples): jetv = LorentzVector() for part in gen_out[i]: vec = LorentzVector() vec.setptetaphim(part[2], part[0], part[1], 0) jetv += vec gen_masses.append(jetv.mass) fig = plt.figure(figsize=(30, 8)) for i in range(3): fig.add_subplot(1, 4, i + 1) plt.ticklabel_format(axis='y', scilimits=(0, 0), useMathText=True) _ = plt.hist(Xplot[:, :, i].reshape(-1), bins[i], histtype='step', label='Real', color='red') _ = plt.hist(gen_out[:, :, i].reshape(-1), bins[i], histtype='step', label='Generated', color='blue') plt.xlabel('Particle ' + labels[i]) plt.ylabel('Number of Particles') # plt.title('JSD = ' + str(round(losses['jsdm'][-1][i], 3)) + ' ± ' + str(round(losses['jsdstd'][-1][i], 3))) plt.legend(loc=1, prop={'size': 18}) binsm = np.arange(0, 0.225, 0.0045) fig.add_subplot(1, 4, 4) plt.ticklabel_format(axis='y', scilimits=(0, 0), useMathText=True) # plt.ticklabel_format(axis='x', scilimits=(0, 0), useMathText=True) _ = plt.hist(real_masses, bins=binsm, histtype='step', label='Real', color='red') _ = plt.hist(gen_masses, bins=binsm, histtype='step', label='Generated', color='blue') plt.xlabel('Jet $m/p_{T}$') plt.ylabel('Jets') plt.legend(loc=1, prop={'size': 18}) name = args.name + "/" + str(epoch) plt.tight_layout(2.0) plt.savefig(args.figs_path + name + ".pdf", bbox_inches='tight') plt.close() plt.figure() if (args.loss == "og" or args.loss == "ls"): plt.plot(losses['Dr'], label='Discriminitive real loss') plt.plot(losses['Df'], label='Discriminitive fake loss') plt.plot(losses['G'], label='Generative loss') elif (args.loss == 'w'): plt.plot(losses['D'], label='Critic loss') elif (args.loss == 'hinge'): plt.plot(losses['Dr'], label='Discriminitive real loss') plt.plot(losses['Df'], label='Discriminitive fake loss') plt.plot(losses['G'], label='Generative loss') # plt.plot(losses['D'], label='Disciriminative total loss') if (args.gp): plt.plot(losses['gp'], label='Gradient penalty') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.savefig(args.losses_path + name + ".pdf", bbox_inches='tight') plt.close() if args.jf: real_masses = [] real_pts = [] gen_masses = [] gen_pts = [] for i in range(args.num_samples): jetv = LorentzVector() for part in Xplot[i]: vec = LorentzVector() vec.setptetaphim(part[2], part[0], part[1], 0) jetv += vec real_masses.append(jetv.mass) real_pts.append(jetv.pt) for i in range(args.num_samples): jetv = LorentzVector() for part in gen_out[i]: vec = LorentzVector() vec.setptetaphim(part[2], part[0], part[1], 0) jetv += vec gen_masses.append(jetv.mass) gen_pts.append(jetv.pt) mass_bins = np.arange(0, 400, 4) pt_bins = np.arange(0, 3000, 30) fig = plt.figure(figsize=(16, 8)) fig.add_subplot(1, 2, 1) plt.ticklabel_format(axis='y', scilimits=(0, 0), useMathText=True) _ = plt.hist(real_masses, bins=mass_bins, histtype='step', label='real', color='red') _ = plt.hist(gen_masses, bins=mass_bins, histtype='step', label='real', color='blue') plt.xlabel('Jet Mass (GeV)') plt.ylabel('Jets') plt.legend(loc=1, prop={'size': 18}) fig.add_subplot(1, 2, 2) plt.ticklabel_format(axis='y', scilimits=(0, 0), useMathText=True) _ = plt.hist(real_pts, bins=pt_bins, histtype='step', label='real', color='red') _ = plt.hist(gen_pts, bins=pt_bins, histtype='step', label='real', color='blue') plt.xlabel('Jet $p_T$ (GeV)') plt.ylabel('Jets') plt.legend(loc=1, prop={'size': 18}) plt.savefig(args.figs_path + name + "_mass_pt.pdf", bbox_inches='tight') plt.close() if args.fid: fid_5 = losses['fid'][::5] x = np.arange(len(losses['fid']), step=5) plt.figure() plt.plot(x, np.log10(fid_5)) # plt.ylim((0, 5)) plt.xlabel('Epoch') plt.ylabel('Log10FID') # plt.legend() plt.savefig(args.losses_path + name + "_fid.pdf", bbox_inches='tight') plt.close() x = np.arange(epoch + 1, step=args.save_epochs) # plt.rcParams.update({'font.size': 12}) # fig = plt.figure(figsize=(22, 5)) # for i in range(3): # fig.add_subplot(1, 3, i + 1) # plt.plot(x, np.log10(np.array(losses['jsdm'])[:, i])) # # plt.ylim((0, 5)) # plt.xlabel('Epoch') # plt.ylabel('Particle ' + labels[i] + ' LogJSD') # # plt.legend() # plt.savefig(args.losses_path + name + "_jsd.pdf", bbox_inches='tight') # plt.close() # if(args.gp): np.savetxt(args.losses_path + args.name + "/" + "gp.txt", losses['gp']) # np.savetxt(args.losses_path + args.name + "/" + "D.txt", losses['D']) # np.savetxt(args.losses_path + args.name + "/" + "G.txt", losses['G']) # np.savetxt(args.losses_path + args.name + "/" + "Dr.txt", losses['Dr']) # np.savetxt(args.losses_path + args.name + "/" + "Df.txt", losses['Df']) # np.savetxt(args.losses_path + args.name + "/" + "jsdm.txt", np.array(losses['jsdm'])) # np.savetxt(args.losses_path + args.name + "/" + "jsdstd.txt", np.array(losses['jsdstd'])) # if args.fid: np.savetxt(args.losses_path + args.name + "/" + "fid.txt", losses['fid']) if args.w1 and epoch >= 5: x = np.arange(5, epoch + 1, 5) plt.rcParams.update({'font.size': 12}) colors = ['blue', 'green', 'orange'] fig = plt.figure(figsize=(30, 7)) for i in range(3): fig.add_subplot(1, 3, i + 1) for k in range(len(args.w1_num_samples)): plt.plot(x, np.log10( np.array( losses['w1_' + str(args.w1_num_samples[k]) + 'm'])[:, i]), label=str(args.w1_num_samples[k]) + ' Jet Samples', color=colors[k]) # plt.fill_between(x, np.log10(np.array(losses['w1_' + str(args.num_samples[k]) + 'm'])[:, i] - np.array(losses['w1_' + str(args.num_samples[k]) + 'std'])[:, i]), np.log10(np.array(losses['w1_' + str(args.num_samples[k]) + 'm'])[:, i] + np.array(losses['w1_' + str(args.num_samples[k]) + 'std'])[:, i]), color=colors[k], alpha=0.2) # plt.plot(x, np.ones(len(x)) * np.log10(realw1m[k][i]), '--', label=str(args.num_samples[k]) + ' Real W1', color=colors[k]) # plt.fill_between(x, np.log10(np.ones(len(x)) * (realw1m[k][i] - realw1std[k][i])), np.log10(np.ones(len(x)) * (realw1m[k][i] + realw1std[k][i])), color=colors[k], alpha=0.2) plt.legend(loc=2, prop={'size': 11}) plt.xlabel('Epoch') plt.ylabel('Particle ' + labels[i] + ' LogW1') plt.savefig(args.losses_path + name + "_w1.pdf", bbox_inches='tight') plt.close() if args.jf: fig = plt.figure(figsize=(20, 7)) for i in range(len(args.jet_features)): fig.add_subplot(1, len(args.jet_features), i + 1) for k in range(len(args.w1_num_samples)): plt.plot( x, np.log10( np.array( losses['w1j_' + str(args.w1_num_samples[k]) + 'm'])[:, i]), label=str(args.w1_num_samples[k]) + ' Jet Samples', color=colors[k]) plt.legend(loc=2, prop={'size': 11}) plt.xlabel('Epoch') plt.ylabel('Particle ' + labelsj[i] + ' LogW1') plt.savefig(args.losses_path + name + "_w1j.pdf", bbox_inches='tight') plt.close() for key in losses: np.savetxt(args.losses_path + args.name + "/" + key + '.txt', losses[key]) try: remove(args.losses_path + args.name + "/" + str(epoch - args.save_epochs) + ".pdf") remove(args.losses_path + args.name + "/" + str(epoch - args.save_epochs) + "_w1.pdf") # remove(args.losses_path + args.name + "/" + str(epoch - args.save_epochs) + "_fid.pdf") except: print("couldn't remove loss file") print("saved figs")
j = int(sys.argv[1]) print(j) j = 5 # dir = '/graphganvol/data/' dir = 'data/' fnames = listdir(dir + 'weighted/') evts = uproot4.concatenate(dir + 'weighted/' + fnames[j] + ":tree") jet12mass = [] jet123mass = [] for i in tqdm(range(len(evts["fatJet1Pt"]))): jet1 = LorentzVector() jet1.setptetaphim(evts["fatJet1Pt"][i], evts["fatJet1Eta"][i], evts["fatJet1Phi"][i], evts["fatJet1Mass"][i]) jet2 = LorentzVector() jet2.setptetaphim(evts["fatJet2Pt"][i], evts["fatJet2Eta"][i], evts["fatJet2Phi"][i], evts["fatJet2Mass"][i]) jet3 = LorentzVector() jet3.setptetaphim(evts["fatJet3Pt"][i], evts["fatJet3Eta"][i], evts["fatJet3Phi"][i], evts["fatJet3Mass"][i]) jet12mass.append((jet1 + jet2).mass) jet123mass.append((jet1 + jet2 + jet3).mass) np.save(dir + fnames[j] + '_inv_mass.npy', np.array([jet12mass, jet123mass]))
def get_llp_spectrum(self, mass, coupling, channels=None, do_plot=False, save_file=True, print_stats=False, stat_cuts="p.pz>100. and p.pt/p.pz<0.1/480."): # prepare output model = self.model if channels is None: channels = [key for key in model.production.keys()] momenta_lab_all, weights_lab_all = [], [] dirname = self.model.modelpath+"model/LLP_spectra/" if not os.path.exists(dirname): os.mkdir(dirname) # loop over channels for key in model.production.keys(): # selected channels only if key not in channels: continue # summary statistics weight_sum, weight_sum_f=0,0 momenta_lab, weights_lab = [LorentzVector(0,0,-mass,mass)], [0] # 2 body decays if model.production[key][0]=="2body": # load details of decay channel pid0, pid1, br = model.production[key][1], model.production[key][2], model.production[key][3] generator, energy, nsample, massrange = model.production[key][4], model.production[key][5], model.production[key][6], model.production[key][7] if massrange is not None: if mass<massrange[0] or mass>massrange[1]: continue if self.masses(pid0) <= self.masses(pid1, mass) + mass: continue # load mother particle spectrum filename = self.dirpath + "files/hadrons/"+energy+"TeV/"+generator+"/"+generator+"_"+energy+"TeV_"+pid0+".txt" momenta_mother, weights_mother = self.convert_list_to_momenta(filename,mass=self.masses(pid0)) # get sample of LLP momenta in the mother's rest frame m0, m1, m2 = self.masses(pid0), self.masses(pid1,mass), mass momenta_llp, weights_llp = self.decay_in_restframe_2body(eval(br), m0, m1, m2, nsample) # loop through all mother particles, and decay them for p_mother, w_mother in zip(momenta_mother, weights_mother): # if mother is shortlived, add factor that requires them to decay before absorption w_decay = self.get_decay_prob(pid0, p_mother) for p_llp,w_lpp in zip(momenta_llp, weights_llp): p_llp_lab=p_llp.boost(-1.*p_mother.boostvector) momenta_lab.append(p_llp_lab) weights_lab.append(w_mother*w_lpp*w_decay) # statistics weight_sum+=w_mother*w_lpp*w_decay if print_stats: p = p_llp_lab if eval(stat_cuts): weight_sum_f+=w_mother*w_lpp*w_decay # 3 body decays if model.production[key][0]=="3body": # load details of decay channel pid0, pid1, pid2, br = model.production[key][1], model.production[key][2], model.production[key][3], model.production[key][4] generator, energy, nsample, massrange = model.production[key][5], model.production[key][6], model.production[key][7], model.production[key][8] if massrange is not None: if mass<massrange[0] or mass>massrange[1]: continue if self.masses(pid0) <= self.masses(pid1, mass) + self.masses(pid2, mass) + mass: continue # load mother particle filename = self.dirpath + "files/hadrons/"+energy+"TeV/"+generator+"/"+generator+"_"+energy+"TeV_"+pid0+".txt" momenta_mother, weights_mother = self.convert_list_to_momenta(filename,mass=self.masses(pid0)) # get sample of LLP momenta in the mother's rest frame m0, m1, m2, m3= self.masses(pid0), self.masses(pid1,mass), self.masses(pid2,mass), mass momenta_llp, weights_llp = self.decay_in_restframe_3body(br, coupling, m0, m1, m2, m3, nsample) # loop through all mother particles, and decay them for p_mother, w_mother in zip(momenta_mother, weights_mother): # if mother is shortlived, add factor that requires them to decay before absorption w_decay = self.get_decay_prob(pid0, p_mother) for p_llp,w_lpp in zip(momenta_llp, weights_llp): p_llp_lab=p_llp.boost(-1.*p_mother.boostvector) momenta_lab.append(p_llp_lab) weights_lab.append(w_mother*w_lpp*w_decay) # statistics weight_sum+=w_mother*w_lpp*w_decay if print_stats: p = p_llp_lab if eval(stat_cuts): weight_sum_f+=w_mother*w_lpp*w_decay # mixing with SM particles if model.production[key][0]=="mixing": if mass>1.699: continue pid, mixing = model.production[key][1], model.production[key][2] generator, energy, massrange = model.production[key][3], model.production[key][4], model.production[key][5] if massrange is not None: if mass<massrange[0] or mass>massrange[1]: continue filename = self.dirpath + "files/hadrons/"+energy+"TeV/"+generator+"/"+generator+"_"+energy+"TeV_"+pid+".txt" momenta_mother, weights_mother = self.convert_list_to_momenta(filename,mass=self.masses(pid)) mixing_angle = eval(mixing) for p_mother, w_mother in zip(momenta_mother, weights_mother): momenta_lab.append(p_mother) weights_lab.append(w_mother*mixing_angle**2) # statistics weight_sum+=w_mother*mixing_angle**2 if print_stats: p = p_mother if eval(stat_cuts): weight_sum_f+=w_mother*mixing_angle**2 # direct production if model.production[key][0]=="direct": #load info label, energy, coupling_ref = key, model.production[key][1], model.production[key][2] condition, masses = model.production[key][3], model.production[key][4] #determined mass benchmark below / above mass if mass<masses[0] or mass>masses[-1]: continue mass0, mass1 = 0, 1e10 for xmass in masses: if xmass<=mass and xmass>mass0: mass0=xmass if xmass> mass and xmass<mass1: mass1=xmass #load benchmark data filename0=self.model.modelpath+"model/direct/"+energy+"TeV/"+label+"_"+energy+"TeV_"+str(mass0)+".txt" filename1=self.model.modelpath+"model/direct/"+energy+"TeV/"+label+"_"+energy+"TeV_"+str(mass1)+".txt" try: momenta_llp0, weights_llp0 = self.convert_list_to_momenta(filename0,mass=mass0,nocuts=True) momenta_llp1, weights_llp1 = self.convert_list_to_momenta(filename1,mass=mass1,nocuts=True) except: print ("did not find file:", filename0, "or", filename1) continue #loop over particles eps=1e-6 for p, w_lpp0, w_lpp1 in zip(momenta_llp0, weights_llp0, weights_llp1): if condition is not None and eval(condition)==0: continue w_lpp = w_lpp0 + (w_lpp1-w_lpp0)/(mass1-mass0)*(mass-mass0) momenta_lab.append(p) weights_lab.append(w_lpp*coupling**2/coupling_ref**2) # statistics weight_sum+=w_lpp*coupling**2/coupling_ref**2 if print_stats: if eval(stat_cuts): weight_sum_f+=w_lpp*coupling**2/coupling_ref**2 #return statistcs if save_file==True: filenamesave = dirname+energy+"TeV_"+key+"_m_"+str(mass)+".npy" self.convert_to_hist_list(momenta_lab, weights_lab, do_plot=False, filename=filenamesave) if print_stats: print (key, "{:.2e}".format(weight_sum),"{:.2e}".format(weight_sum_f)) for p,w in zip(momenta_lab, weights_lab): momenta_lab_all.append(p) weights_lab_all.append(w) #return if do_plot: return self.convert_to_hist_list(momenta_lab_all, weights_lab_all, do_plot=do_plot)[0]