def make_graph(a, output): c1 = TCanvas("c1", "c1", 1000, 1000) # Creates the canvas to draw the bar chart to. c1.SetGrid() # Adds grid lines to canvas. leg = TLegend(0.7, 0.6, 0.95, 0.95) leg.AddEntry(a, "Start", "P") n0 = TNtuple("n0", "n0", "x:y:z") # creates ntuple to store the values of x y z n0.SetMarkerColor(0) n0.Fill(-4500, -4500, -5700) n0.Fill(4500, 4500, 5700) n0.Draw("x:y:z") #a.SetMarkerColor(1) #a.SetMarkerStyle(6) #a.Draw("x:y:z","","same") # Draws the histogram to the canvas. a.SetMarkerColor(2) a.SetMarkerStyle(6) a.Draw("x2:y2:z2", "", "same") # Draws the histogram to the canvas. #leg.Draw() c1.Update() # Makes the canvas show the histogram. img = ROOT.TImage.Create() # creates image img.FromPad(c1) # takes it from canvas img.WriteImage( output) # Saves it to png file with this name in input file directory. return c1
def get_pos(event): # start position of each particle a = TNtuple( "a", "a", "x:y:z:x2:y2:z2") # creates ntuple to store the values of x y z mcpart = event.getCollection("MCParticle") for ding in mcpart: ptype = ding.getPDG() if ptype != 11 and ptype != -11: pos = ding.getVertex() end = ding.getEndpoint() x = pos[0] y = pos[1] z = pos[2] x2 = end[0] y2 = end[1] z2 = end[2] print x, "\t", y, "\t", z a.Fill(x, y, z, x2, y2, z2) return a
def make_graph(n, output): c1 = TCanvas() # Creates the canvas to draw the bar chart to. c1.SetGrid() # Adds grid lines to canvas. n0 = TNtuple("n0", "n0", "x:y:z") n0.Fill(-0.1, -0.1, -1) n0.Fill(0.1, 0.1, 1) n0.Draw("x:y:z") n.SetMarkerColor(2) n.SetMarkerStyle(6) n.Draw("x:y:z","","same") # Draws the histogram to the canvas. c1.Update() # Makes the canvas show the histogram. img = ROOT.TImage.Create() # creates image img.FromPad(c1) # takes it from canvas img.WriteImage(output) # Saves it to png file with this name in input file directory. return c1
def make_ntup(file_name, tree_name, branches, outfile, n_events, new_tree_name): if new_tree_name == "": new_tree_name = tree_name print file_name # Get the event tree tree = TChain(tree_name) tree.Add(file_name) if not tree: print "Error: No tree named %s in %s" % (tree_name, file_name) sys.exit() # Check branches exist branches_avail = [x.GetName() for x in tree.GetListOfBranches()] for b in branches: if not b in branches_avail: print "Error branch '%s' not a branch in input tree" % (b) print "Branches available are: \n" print "\t".join(branches_avail) sys.exit() # output out_file = TFile(outfile, "RECREATE") nt = TNtuple(new_tree_name, "", ":".join(branches)) if (n_events < 0): n_events = tree.GetEntries() # loop over events and fill the branches of new ntuple for index, entry in enumerate(tree): if index > n_events: break vals = array('f', [entry.__getattr__(b) for b in branches]) nt.Fill(vals) if (index % 100000 == 0): print index, "/", n_events # Save out_file.cd() nt.Write() out_file.Close() print "Written %i entries of branch(es) '%s' \nto tree %s \nin file %s" % ( n_events, ":".join(branches), new_tree_name, outfile)
def FillNTuple(tupname, data, names) : """ Create and fill ROOT NTuple with the data sample. tupname : name of the NTuple data : data sample names : names of the NTuple variables """ variables = "" for n in names : variables += "%s:" % n variables = variables[:-1] values = len(names)*[ 0. ] avalues = array.array('f', values) nt = TNtuple(tupname, "", variables) for d in data : for i in range(len(names)) : avalues[i] = d[i] nt.Fill(avalues) nt.Write()
def get_mom(event): # Each detector is a 'collection', the No. of Elements are the hits. k=0 n = TNtuple("n", "n", "x:y:z") # creates ntuple to store the values of x y z mcpart = event.getCollection("MCParticle") # opens the collection nbin = mcpart.getNumberOfElements() # gets the number of hits on the beamcal for ding in mcpart: # for each hit in the beamcal pos = ding.getMomentum() # gets position in 3 vector array ptype = ding.getPDG() if ptype != 11 and ptype != -11: x = pos[0] # sets value from 3 vector array to single variables y = pos[1] z = pos[2] k+=1 n.Fill(x,y,z) # fills the ntuple print k return n
def get_pos( event ): # Each detector is a 'collection', the No. of Elements are the hits. xcoord1 = [] # a list for each x coordinate ycoord1 = [] zcoord1 = [] n = TNtuple("n", "n", "x:y:z") # creates ntuple to store the values of x y z BCAL = event.getCollection("BeamCalHits") # opens the collection nbin = BCAL.getNumberOfElements() # gets the number of hits on the beamcal for ding in BCAL: # for each hit in the beamcal pos = ding.getPosition() # gets position in 3 vector array x = pos[0] # sets value from 3 vector array to single variables y = pos[1] z = pos[2] n.Fill(x, y, z) # fills the ntuple return n
def get_pos( event ): # Each detector is a 'collection', the No. of Elements are the hits. xcoord1 = [] ycoord1 = [] zcoord1 = [] nparts = 0 # counter for number of interesting particles n = TNtuple("n", "n", "x:y:z") # makes the ntuple to be filled mcpart = event.getCollection("MCParticle") for ding in mcpart: if ding.getPDG() != 11 and ding.getPDG() != -11: # id not e+ or e- pos = ding.getVertex() # gets the start position as 3 vector array x = pos[0] # assigns value from array to single variables y = pos[1] z = pos[2] n.Fill(x, y, z) # fills the ntuple nparts += 1 # counts up for each particle print nparts return n
cmd=['lumiCalc.py -n 0.0429 -c frontier://LumiProd/CMS_LUMI_PROD -r ',' -o ','.csvt lumibyls'] a={} with open(jsonfile) as f: a = json.load(f) f.close() if not os.path.isdir(wdir): os.system('mkdir '+wdir) f = TFile(wdir+'/lumis.root','recreate') ntuple = TNtuple('ntuple','data from ascii file','run:ls:lumiDelivered:lumiReported') for run, lumis in a.iteritems(): fullcmd=cmd[0]+run+cmd[1]+wdir+'/'+run+cmd[2] print 'Get luminosity information for run '+run os.system(fullcmd) rf=open(wdir+'/'+run+'.csvt','r') crf=csv.reader(rf) crf.next() for row in crf: ntuple.Fill(int(row[0]),int(row[1]),float(row[2]),float(row[3])) rf.close() os.system('rm '+wdir+'/'+run+'.csvt') f.Write() f.Close() print 'Luminosity tree written to working directory ./'+wdir os.system('cp '+jsonfile+' '+wdir+'/'+jsonfile) sys.exit()
#!/usr/bin/env python # Example taken from https://root.cern.ch/how/how-write-ttree-python # and modified to run... """ Creates a simple ROOT file with a tree containing a branch with a large array. """ from ROOT import TFile, TNtuple from array import array f = TFile('TNtuple.root', 'recreate') t = TNtuple('n1', 'ntuple with 3 columes', "x:y:z") x = array('i', [0]) y = array('f', [0]) z = array('d', [0]) for i in range(100): x[0] = i y[0] = i + i / 13 z[0] = i + i / 17 t.Fill(x[0], y[0], z[0]) f.Write() f.Close()
class SimpleJetNTupler(Analyzer): '''dump very few quantities into a TNtuple for jet resolution studies.''' ### def __init__(self,cfg_ana, cfg_comp, looperName): ### loadLibs() ### super (SimpleJetNTupler, self).__init__(cfg_ana, cfg_comp, looperName) def declareHandles(self): super(SimpleJetNTupler, self).declareHandles() self.handles['jets'] = AutoHandle(*self.cfg_ana.jetCollection) if self.cfg_ana.useGenLeptons: self.mchandles['genParticlesPruned'] = AutoHandle( 'genParticlesPruned', 'std::vector<reco::GenParticle>') else: self.mchandles['genParticles'] = AutoHandle( 'prunedGen', 'std::vector<reco::GenParticle>') self.mchandles['genJets'] = AutoHandle(*self.cfg_ana.genJetsCollection) self.handles['vertices'] = AutoHandle('offlinePrimaryVertices', 'std::vector<reco::Vertex>') # .... .... .... .... .... .... .... .... .... .... .... .... .... .... .... .... .... .... def beginLoop(self): super(SimpleJetNTupler, self).beginLoop() self.file = TFile('/'.join([self.looperName, 'testJetsNT.root']), 'recreate') if self.cfg_ana.applyPFLooseId: from ROOT import PFJetIDSelectionFunctor self.isPFLooseFunc = PFJetIDSelectionFunctor( 0, PFJetIDSelectionFunctor.LOOSE) ## Workaround: for some reason PyROOT does not bind nor PFJetIDSelectionFunctor(Jet)PFJetIDSelectionFunctor.getBitsTemplates from ROOT import pat self.isPFLooseFunc.bits = pat.strbitset() for i in "CHF", "NHF", "CEF", "NEF", "NCH", "nConstituents": self.isPFLooseFunc.bits.push_back(i) ## /Workaround self.isPFLoose = lambda x: self.isPFLooseFunc( x, self.isPFLooseFunc.bits) else: self.isPFLoose = lambda x: True self.myntuple = TNtuple( self.cfg_ana.ntupleName, self.cfg_ana.ntupleName, 'genPt:recoPt:genEta:recoEta:genPhi:recoPhi:nvtx') # .... .... .... .... .... .... .... .... .... .... .... .... .... .... .... .... .... .... def process(self, iEvent, event): #read all the handles defined beforehand self.readCollections(iEvent) jetEtaCut = 4.5 # get the vertexes event.vertices = self.handles['vertices'].product() # self.h_nvtx.Fill (len (event.vertices)) # get the jets in the jets variable jets = self.handles['jets'].product() # filter jets with some selections event.jets = [ jet for jet in jets if (abs(jet.eta()) < jetEtaCut and jet.pt() > self.cfg_ana.ptCut and self.isPFLoose(jet)) ] # get status 2 leptons if 'genParticlesPruned' in self.mchandles: event.genLeptons = [ lep for lep in self.mchandles['genParticlesPruned'].product() if lep.status() == 2 and (abs(lep.pdgId()) == 11 or abs( lep.pdgId()) == 13 or abs(lep.pdgId()) == 15) ] else: event.genLeptons = [ lep for lep in self.mchandles['genParticles'].product() if lep.status() == 3 and (abs(lep.pdgId()) == 11 or abs( lep.pdgId()) == 13 or abs(lep.pdgId()) == 15) ] # @ Pasquale: why level 3 and not level 2? # event.selGenLeptons = [GenParticle (lep) for lep in event.genLeptons if (lep.pt ()>self.cfg_ana.ptCut and abs (lep.eta ()) < jetEtaCut)] # get genJets event.genJets = map(GenJet, self.mchandles['genJets'].product()) # filter genjets as for reco jets event.selGenJets = [ GenJet(jet) for jet in event.genJets if (jet.pt() > self.cfg_ana.genPtCut) ] #FIXME why are there cases in which there's 4 or 6 leptons? if len(event.genLeptons) != 2: return # in case I want to filter out taus # 11, 13, 15 : e, u, T # event.genOneLepton = [GenParticle (part) for part in event.genLeptons if abs (part.pdgId ()) == 15] # remove leptons from jets if closer than 0.2 event.cleanJets = cleanObjectCollection(event.jets, event.genLeptons, 0.2) event.matchingCleanJets = matchObjectCollection2( event.cleanJets, event.selGenJets, 0.25) # assign to each jet its gen match (easy life :)) for jet in event.cleanJets: jet.gen = event.matchingCleanJets[jet] event.matchedCleanJets = [ jet for jet in event.matchingCleanJets if jet.gen != None ] for jet in event.matchedCleanJets: self.myntuple.Fill(jet.gen.pt(), jet.pt(), jet.gen.eta(), jet.eta(), jet.gen.phi(), jet.phi(), len(event.vertices)) # .... .... .... .... .... .... .... .... .... .... .... .... .... .... .... .... .... .... def write(self): from ROOT import gROOT gROOT.SetBatch(True) self.file.cd() self.myntuple.Write() self.file.Close()
def make_graph(a, b, c, d, e, f, g, h, i, j, k, l, output): c1 = TCanvas("c1", "c1", 800, 800) # Creates the canvas to draw the bar chart to. c1.SetGrid() # Adds grid lines to canvas. leg = TLegend(0.7, 0.6, 0.95, 0.95) leg.AddEntry(a, "EcalBarrelHits", "P") leg.AddEntry(b, "EcalEndcapHits", "P") leg.AddEntry(c, "HcalBarrelHits", "P") leg.AddEntry(d, "HcalEndcapHits", "P") leg.AddEntry(e, "LumiCalHits", "P") leg.AddEntry(f, "MuonBarrelHits", "P") leg.AddEntry(g, "MuonEndcapHits", "P") leg.AddEntry(h, "SiTrackerBarrelHits", "P") leg.AddEntry(i, "SiTrackerEndcapHits", "P") leg.AddEntry(j, "SiTrackerForwardHits", "P") leg.AddEntry(k, "SiVertexBarrelHits", "P") leg.AddEntry(l, "SiVertexEndcapHits", "P") n0 = TNtuple("n0", "n0", "x:y:z") # creates ntuple to store the values of x y z n0.SetMarkerColor(0) n0.Fill(-4500, -4500, -5700) n0.Fill(4500, 4500, 5700) n0.Draw("x:y:z") a.SetMarkerColor(1) a.SetMarkerStyle(6) a.Draw("x:y:z", "", "same") # Draws the histogram to the canvas. b.SetMarkerColor(2) b.SetMarkerStyle(6) b.Draw("x:y:z", "", "same") # Draws the histogram to the canvas. c.SetMarkerColor(3) c.SetMarkerStyle(6) c.Draw("x:y:z", "", "same") # Draws the histogram to the canvas. d.SetMarkerColor(4) d.SetMarkerStyle(6) d.Draw("x:y:z", "", "same") # Draws the histogram to the canvas. e.SetMarkerColor(5) e.SetMarkerStyle(6) e.Draw("x:y:z", "", "same") # Draws the histogram to the canvas. f.SetMarkerColor(6) f.SetMarkerStyle(6) f.Draw("x:y:z", "", "same") # Draws the histogram to the canvas. g.SetMarkerColor(7) g.SetMarkerStyle(6) g.Draw("x:y:z", "", "same") # Draws the histogram to the canvas. h.SetMarkerColor(8) h.SetMarkerStyle(6) h.Draw("x:y:z", "", "same") # Draws the histogram to the canvas. i.SetMarkerColor(9) i.SetMarkerStyle(6) i.Draw("x:y:z", "", "same") # Draws the histogram to the canvas. j.SetMarkerColor(30) j.SetMarkerStyle(6) j.Draw("x:y:z", "", "same") # Draws the histogram to the canvas. k.SetMarkerColor(40) k.SetMarkerStyle(6) k.Draw("x:y:z", "", "same") # Draws the histogram to the canvas. l.SetMarkerColor(28) l.SetMarkerStyle(6) l.Draw("x:y:z", "", "same") # Draws the histogram to the canvas. leg.Draw() c1.Update() # Makes the canvas show the histogram. img = ROOT.TImage.Create() # creates image img.FromPad(c1) # takes it from canvas img.WriteImage( output) # Saves it to png file with this name in input file directory. return c1
def main(argv=None): start = time.time() # ROOT batch mode ROOT.gROOT.SetBatch(1) ''' # ============================================================ # ArgumentParser # ============================================================ parser = argparse.ArgumentParser(description='Cosmic Tracks') parser.add_argument("config_file") parser.add_argument("data_file", nargs="+") parser.add_argument("-o", "--out_path") parser.add_argument("-i", "--uid", type=int, default=0, help="Unique identifier used in output files.") parser.add_argument("-n", "--max_evts", type=int, default=0, metavar="N", help="Stop after %(metavar)s events.") parser.add_argument("-n", type=int, default=0, metavar="N", help="Stop after %(metavar)s spills.") parser.add_argument("-s", "--seed", type=int, help="Set the RNG seed.") parser.add_argument("-m", "--measure", action="store_true", help="Measure rho, phi and doca for each gamma and fill into histogram.") parser.add_argument("-a", "--analyse", action="store_true", help="Run analysis.") parser.add_argument("-d", "--display", action="store_true", help="Save event display CSVs.") parser.add_argument("-D", "--debug", action="store_true", help="Only one event per spill, fixed vertex position.") args = parser.parse_args(argv) ''' # ============================================================ # Set paths # ============================================================ datapath = '/data/SingleModule_Nov2020/LArPix/dataRuns/rootTrees/combined_with_light' print(' datapath: ', datapath) outputpath = '/home/lhep/PACMAN/larpix-analysis/lightCharge_anticorrelation' print(' outputpath: ', outputpath) files = sorted( [os.path.basename(path) for path in glob.glob(datapath + '/*.root')]) print(' datafiles: ') for f in files: print(' ', f) # ============================================================ # Define voxelisation # ============================================================ n_voxels_x = 70 n_voxels_y = 70 n_voxels_z = 70 pitch_x = 4.434 pitch_y = 4.434 pitch_z = 4.434 x_min = -pitch_x * n_voxels_x / 2. #155.19 x_max = pitch_x * n_voxels_x / 2. #155.19 y_min = -pitch_y * n_voxels_y / 2. #155.19 y_max = pitch_y * n_voxels_y / 2. #155.19 #z_min = - pitch_z * n_voxels_z/2. #155.19 #z_max = pitch_z * n_voxels_z/2. #155.19 #y_min = -155.19 #y_max = 155.19 z_min = 0 z_max = 400 # ============================================================ # Input tree # ============================================================ #inputFileName = (str(args.data_file)[34:])[:-7] # excludes ending .root for file_number in range(len(files)): # Only process specific file(s) #if files[file_number] != 'datalog_2020_11_29_12_22_02_CET_evd.h5': # continue #if not (file_number >= 0 and file_number < 10): # continue inputFileName = files[file_number] print(' -------------------------------------- ') print(' Processing file', inputFileName) outFileName = inputFileName[:-7] + '.root' input_tree = ROOT.TChain("t_out", "t_out") #for root_file in config["data_files"]: # input_tree.Add(root_file) #input_tree.Add( "/path/to.root" ) input_tree.Add(datapath + '/' + inputFileName) #not_used_files = [13,32,50,61,66,77,81,86,89,92,94,97,99] #print " Do not use files with numbers in {:} " .format(not_used_files) # Define if plots are made or not make_plots = True if make_plots: plot_folder = inputFileName[16:-5] os.system('rm -rf plots/' + str(plot_folder)) os.system('mkdir plots/' + str(plot_folder)) # Turn on all branches input_tree.SetBranchStatus("*", 1) # Define Histograms / NTuples # --------------------------------------------------------- makePlots = True h1_trLength = TH1F('h1_trLength', ' ; Track length [mm] ; Entries [-]', 150, 0, 500) h2_trLength_vs_nHits = TH2F( 'h2_trLength_vs_nHits', ' ; Track Length [mm] ; Number of Hits [-] ; Entries [-]', 100, 0, 500, 100, 0, 500) h3_event_hits = TH3F('h3_event_hits', ' ; x ; y; z', 70, -155, 155, 70, -155, 155, 100, -300, 3000) ntuple = TNtuple('ntuple', 'data from ascii file', 'x:y:z:cont') plot4d = TH3F('h3_ev_hits', ' ; x ; y; z', 70, -155.19, 155.19, 70, -155.19, 155.19, 200, -500, 1500) # Make track selection # --------------------------------------------------------- # TODO: Make 3D histogram to test selection goodness # Event with only 1 track (right?) # Analyse input tree # --------------------------------------------------------- n_tracks = input_tree.GetEntries() print(' n_tracks: ', n_tracks) x_min = 100 x_max = -100 y_min = 100 y_max = -100 z_min = 100 z_max = -100 # Loop over all tracks in input_tree for track_id in range(n_tracks): input_tree.GetEntry(track_id) print(' Processing track', track_id, 'of', n_tracks, '...') if track_id > 5: break #print(' t_eventID: ', input_tree.t_eventID) #print(' t_trackID: ', input_tree.t_trackID) #print(' t_event_q: ', input_tree.t_event_q) #print(' t_track_q: ', input_tree.t_track_q) #print(' t_event_nhits: ', input_tree.t_event_nhits) #print(' t_track_nhits: ', input_tree.t_track_nhits) h1_trLength.Fill(input_tree.t_track_length) h2_trLength_vs_nHits.Fill(input_tree.t_track_length, input_tree.t_track_nhits) # Get all hits in the event voxels = np.zeros((n_voxels_x, n_voxels_y, n_voxels_z)) for hit in range(10): #input_tree.t_event_nhits): if input_tree.t_event_hits_x[hit] < x_min: x_min = input_tree.t_event_hits_x[hit] if input_tree.t_event_hits_x[hit] > x_max: x_max = input_tree.t_event_hits_x[hit] if input_tree.t_event_hits_y[hit] < y_min: y_min = input_tree.t_event_hits_y[hit] if input_tree.t_event_hits_y[hit] > y_max: y_max = input_tree.t_event_hits_y[hit] if input_tree.t_event_hits_z[hit] < z_min: z_min = input_tree.t_event_hits_z[hit] if input_tree.t_event_hits_z[hit] > z_max: z_max = input_tree.t_event_hits_z[hit] #print(' hit: ', hit, ' \t x: ', input_tree.t_event_hits_x[hit], '\t y: ', input_tree.t_event_hits_y[hit], ' \t z: ', input_tree.t_event_hits_z[hit]) voxel_x = math.floor((input_tree.t_event_hits_x[hit] + (pitch_x * (n_voxels_x) / 2.)) / pitch_x) voxel_y = math.floor((input_tree.t_event_hits_y[hit] + (pitch_y * (n_voxels_y) / 2.)) / pitch_y) voxel_z = math.floor((input_tree.t_event_hits_z[hit] + (pitch_z * (n_voxels_z) / 2.)) / pitch_z) #print(' voxel_x: ', voxel_x, ' \t voxel_y: ', voxel_y, ' \t voxel_z: ', voxel_z) if voxel_x < n_voxels_x and voxel_y < n_voxels_y and voxel_z < n_voxels_z: voxels[voxel_x][voxel_y][voxel_z] += input_tree.t_event_hits_q[ hit] # TODO: make under- and overflow voxel for every coordinate h3_event_hits.Fill(input_tree.t_event_hits_x[hit], input_tree.t_event_hits_y[hit], input_tree.t_event_hits_z[hit], input_tree.t_event_hits_q[hit]) for vox_x in range(n_voxels_x): vox_x_middle = x_min + (vox_x + 0.5) * pitch_x for vox_y in range(n_voxels_y): vox_y_middle = y_min + (vox_y + 0.5) * pitch_y for vox_z in range(n_voxels_z): vox_z_middle = z_min + (vox_z + 0.5) * pitch_z if voxels[vox_x][vox_y][vox_z] > 0: ntuple.Fill(vox_x_middle, vox_y_middle, vox_z_middle, voxels[vox_x][vox_y][vox_z]) #h3_event_hits.Fill(vox_x_middle,vox_y_middle,vox_z_middle,voxels[vox_x][vox_y][vox_z]) if (track_id % 2 == 0): now = time.time() print(' Processed', math.floor(track_id * 100 / n_tracks), 'of', n_tracks, 'tracks. \t Elapsed time:', (now - start), ' seconds ... \r') print(' x_min: ', x_min) print(' x_max: ', x_max) print(' y_min: ', y_min) print(' y_max: ', y_max) print(' z_min: ', z_min) print(' z_max: ', z_max) c0 = ROOT.TCanvas() ROOT.gStyle.SetOptStat(0) ROOT.gStyle.SetOptTitle(0) ntuple.Draw('x:y:z:cont>>plot4d', '', 'COLZ') #plot4d.SetLabelSize(0.5) #plot4d.SetMarkerSize(300) #ntuple.SetMarkerSize(300) #ntuple.SetMarkerColor(2) #ntuple.SetFillColor(38) #h3_event_hits.Draw("COLZ") c0.Print('test.png') # Make plots # --------------------------------------------------------- if makePlots: plot_h1_trLength(h1_trLength, 'h1_trLength', plot_folder) plot_h2_trLength_vs_nHits(h2_trLength_vs_nHits, 'h2_trLength_vs_nHits', plot_folder) plot_h3_event_hits(h3_event_hits, 'h3_event_hits', plot_folder)
def selectEvents(fileName,saveProbes=False,saveSummary=False,outputDir='./',xsec=-1,correctionsMap={}): gSystem.ExpandPathName(fileName) file=TFile.Open(fileName) #exclusivity of triggers per PD eTriggersOnly = ('SingleEle' in fileName) muTriggersOnly = ('SingleMu' in fileName) #normalizations and corrections origEvents=1.0 puWeightsGr=None if xsec>0 : origEvents=file.Get('smDataAnalyzer/cutflow').GetBinContent(1) if origEvents==0 : print '[Warning] 0 initial events ?' #derive pileup weights origPileup=file.Get('smDataAnalyzer/pileup') try: dataPileupFile=TFile.Open(correctionsMap['pu']) dataPileup=dataPileupFile.Get('pileup') normF=origPileup.Integral()/dataPileup.Integral() if normF>0 : puWeightsGr=TGraph() for xbin in xrange(1,origPileup.GetXaxis().GetNbins()+1) : iweight=1.0 if origPileup.GetBinContent(xbin)>0 : iweight=normF*dataPileup.GetBinContent(xbin)/origPileup.GetBinContent(xbin) puWeightsGr.SetPoint( puWeightsGr.GetN(), origPileup.GetXaxis().GetBinCenter(xbin), iweight ) dataPileupFile.Close() except : print 'No data pileup file provided or other error occurred. If you wish add -w pu,pu_file.root' jecCorrector=None jecUncertainty=None try: prefix='Data' if xsec>0 : prefix='MC' jecDir=correctionsMap['jec'] gSystem.ExpandPathName(jecDir) jetCorLevels='L1FastJet' jetCorFiles=jecDir+'/'+prefix+'_L1FastJet_AK5PFchs.txt' jetCorLevels=jetCorLevels+':L2Relative' jetCorFiles=jetCorFiles+':'+jecDir+'/'+prefix+'_L2Relative_AK5PFchs.txt' jetCorLevels=jetCorLevels+':L3Absolute' jetCorFiles=jetCorFiles+':'+jecDir+'/'+prefix+'_L3Absolute_AK5PFchs.txt' #if prefix=='Data': # jetCorLevels=jetCorLevels+':L2L3Residual' # jetCorFiles=jetCorFiles+':'+jecDir+'/'+prefix+'_L2L3Residual_AK5PFchs.txt' jecCorrector=FactorizedJetCorrector(jetCorLevels,jetCorFiles) print 'Jet energy corrector initialized with levels ',jetCorLevels,' for ',prefix if prefix=='MC': jecUncertainty=JetCorrectionUncertainty(jecDir+"/"+prefix+"_Uncertainty_AK5PFchs.txt") print 'Jet uncertainty is ',jecUncertainty except Exception as e: print '[Error]',e tree=file.Get("smDataAnalyzer/data") nev = tree.GetEntries() outUrl=outputDir+'/'+os.path.basename(fileName) monitor=Monitor(outUrl) #same the initial normalization and cross section monitor.addValue(origEvents,'iniEvents') monitor.addValue(xsec,'crossSection') #some basic histograms monitor.addHisto('nvtx', ';Vertices;Events', 50,0,50) monitor.addHisto('nvtxraw', ';Vertices;Events', 50,0,50) monitor.addHisto('vmass', ';Mass [GeV];Events', 50,0,250) monitor.addHisto('vmt', ';Transverse mass [GeV];Events', 50,0,250) monitor.addHisto('vpt', ';Boson transverse momentum [GeV];Events',50,0,250) monitor.addHisto('leg1pt', ';Transverse momentum [GeV];Events', 50,0,250) monitor.addHisto('leg2pt', ';Transverse momentum [GeV];Events', 50,0,250) monitor.addHisto('leg1iso', ';Relative isolation;Events', 50,0,0.5) monitor.addHisto('leg2iso', ';Relative isolation;Events', 50,0,0.5) #save a summary ntuple for analysis summaryTuple=None if saveSummary : varList='cat:weight:nvtx:njets' varList=varList+':v_mass:v_mt:v_pt:genv_mass:genv_pt' varList=varList+':leg1_pt:leg1_eta:leg1_phi:genleg1_pt:leg1_relIso' varList=varList+':leg2_pt:leg2_eta:leg2_phi:genleg2_pt:leg2_relIso' varList=varList+':sumEt:ht' varList=varList+':met_lesup:met_lesdown:met_jesup:met_jesdown:met_jerup:met_jerdown:met_umetup:met_umetdown' summaryTuple=TNtuple('data','summary',varList) summaryTuple.SetDirectory(0) monitor.addObject(summaryTuple) #save a dedicated ntuple for Tag and Probe probesTuple=None probesId = array.array( 'f', [ 0 ] ) probesPt = array.array( 'f', [ 0 ] ) probesEta = array.array( 'f', [ 0 ] ) probesPhi = array.array( 'f', [ 0 ] ) probesNvtx = array.array( 'f', [ 0 ] ) probesMass = array.array( 'f', [ 0 ] ) probesIsMatched = array.array( 'i', [0] ) probesPassLoose = array.array( 'i', [ 0 ] ) probesPassTight = array.array( 'i', [ 0 ] ) probesFireTrigger = array.array( 'i', [ 0 ] ) if saveProbes : probesTuple=TTree('tandp','summary for tandp') probesTuple.Branch( 'id', probesId, 'id/F' ) probesTuple.Branch( 'pt', probesPt, 'pt/F' ) probesTuple.Branch( 'eta', probesEta, 'eta/F' ) probesTuple.Branch( 'phi', probesPhi, 'phi/F' ) probesTuple.Branch( 'nvtx', probesNvtx, 'nvtx/F' ) probesTuple.Branch( 'mass', probesMass, 'mass/F' ) probesTuple.Branch( 'isMatched', probesIsMatched, 'isMatched/I' ) probesTuple.Branch( 'passLoose', probesPassLoose, 'passLoose/I' ) probesTuple.Branch( 'passTight', probesPassTight, 'passTight/I' ) probesTuple.Branch( 'fireTrigger', probesFireTrigger, 'fireTrigger/I' ) probesTuple.SetDirectory(0) monitor.addObject(probesTuple) # # LOOP OVER THE EVENTS # for iev in xrange(0,nev): tree.GetEntry(iev) if iev%10000 == 0 : sys.stdout.write("\r[ %d/100 ] completed" %(100.*iev/nev)) sys.stdout.flush() #check mc truth (select V bosons from the hard process genBosonP4=TLorentzVector(0,0,0,0) genNeutP4=TLorentzVector(0,0,0,0) for g in xrange(0,tree.mcn): if tree.mc_status[g]!=3 : continue genP4=TLorentzVector(tree.mc_px[g],tree.mc_py[g],tree.mc_pz[g],tree.mc_en[g]) if abs(tree.mc_id[g])==12 or abs(tree.mc_id[g])==14 or abs(tree.mc_id[g])==14 : genNeutP4=genNeutP4+genP4 if abs(tree.mc_id[g])!=23 and abs(tree.mc_id[g])!=24 : continue genBosonP4=genP4 #get triggers that fired eFire,mFire,emFire=decodeTriggerWord(tree.tbits) if eTriggersOnly : mFire=False if muTriggersOnly : eFire=False #select the leptons leptonCands=[] validTags=[] lepSums=[TLorentzVector(0,0,0,0)]*3 lepFlux=TLorentzVector(0,0,0,0) for l in xrange(0,tree.ln) : lep=LeptonCand(tree.ln_id[l],tree.ln_px[l],tree.ln_py[l],tree.ln_pz[l],tree.ln_en[l]) if lep.p4.Pt()<20 : continue if abs(tree.ln_id[l])==11 : if math.fabs(lep.p4.Eta())>2.5 : continue if math.fabs(lep.p4.Eta())>1.4442 and math.fabs(lep.p4.Eta())<1.566 : continue if abs(tree.ln_id[l])==13 : if math.fabs(lep.p4.Eta())>2.1 : continue relIso, isLoose, isLooseIso, isTight, isTightIso = selectLepton(tree.ln_id[l],tree.ln_idbits[l],tree.ln_gIso[l],tree.ln_chIso[l],tree.ln_nhIso[l],tree.ln_puchIso[l],lep.p4.Pt()) lep.selectionInfo(relIso,isLoose, isLooseIso, isTight, isTightIso) lep.triggerInfo(tree.ln_Tbits[l]) #check the generator level information genMatchIdx=tree.ln_genid[l] if genMatchIdx < tree.mcn : lep.genMatch(tree.mc_id[genMatchIdx],tree.mc_px[genMatchIdx],tree.mc_py[genMatchIdx],tree.mc_pz[genMatchIdx],tree.mc_en[genMatchIdx]) else : lep.genMatch(0,0,0,0,0) leptonCands.append(lep) if not saveProbes: continue if not isTight or not isTightIso or lep.Tbits==0 : continue if abs(lep.id)==11 and not eFire: continue if abs(lep.id)==13 and not mFire: continue validTags.append( len(leptonCands)-1 ) lepSums[1]=lepSums[1]+lep.getP4('lesup')-lep.p4 lepSums[2]=lepSums[2]+lep.getP4('lesdown')-lep.p4 lepFlux=lepFlux+lep.p4 #check if probes tree should be saved if saveProbes and len(validTags)>0: # choose a random tag tagIdx=random.choice(validTags) tag=leptonCands[tagIdx] #find probe probe=None for l in xrange(0,len(leptonCands)) : if l==tagIdx: continue if abs(tag.id)!=abs(leptonCands[l].id) : continue probe=leptonCands[l] break #for electrons save superclusters if probe is not found matchToEle=1 #if abs(tag.id)==11 and probe is None : # matchToEle=0 # for sc in xrange(0,tree.scn) : # sc_en=tree.scn_e[sc] # sc_eta=tree.scn_eta[sc] # sc_phi=tree.scn_phi[sc] # sc_pt=sc_en/math.cosh(sc_eta) # sc_p4=TLorentzVector(0,0,0,0) # sc_p4.SetPtEtaPhiE(sc_pt,sc_eta,sc_phi,sc_en) # lscp4=tag.p4+sc_p4 # if math.fabs(lscp4.M()-91)>30 : continue # scCand=LeptonCand(tag.id,sc_p4.Px(),sc_p4.Py(),sc_p4.Pz(),sc_p4.E()) # scCand.selectionInfo(0,0,0,0,0) # scCand.triggerInfo(0) # probe=scCand # break if abs(tag.id)==13 : matchToEle=0 #save info if probe is not None: tpp4=tag.p4+probe.p4 if math.fabs(tpp4.M()-91)<30 : probesId[0]=probe.id probesPt[0]=probe.p4.Pt() probesEta[0]=probe.p4.Eta() probesPhi[0]=probe.p4.Phi() probesNvtx[0]=tree.nvtx probesMass[0]=tpp4.M() probesIsMatched[0]=(probe.genId!=0) probesPassLoose[0]=(probe.passLoose and probe.passLooseIso) probesPassTight[0]=(probe.passTight and probe.passTightIso) probesFireTrigger[0]=(probe.Tbits>0) probesTuple.Fill() #jets selJets=[] jetSums=[TLorentzVector(0,0,0,0)]*5 jetFlux=TLorentzVector(0,0,0,0) ht=0 for j in xrange(0,tree.jn) : jet=JetCand(tree.jn_px[j],tree.jn_py[j],tree.jn_pz[j],tree.jn_en[j],tree.jn_area[j],tree.jn_torawsf[j]) #cross clean with loose isolated leptons overlapFound=False for l in leptonCands: if not l.passLoose or not l.passLooseIso : continue dR=jet.p4.DeltaR(l.p4) if dR>0.4 : continue overlapFound=True break if overlapFound: continue #very loose kinematics cuts if math.fabs(jet.p4.Eta())>4.7 or jet.p4.Pt()<10 : continue #save it jet.genMatch(tree.jn_genpx[j],tree.jn_py[j],tree.jn_pz[j],tree.jn_en[j],tree.jn_genid[j],tree.jn_genflav[j]) jet.updateJEC(jecCorrector,jecUncertainty,tree.rho,tree.nvtx) selJets.append(jet) #account for all the corrections you have applied jetSums[0]=jetSums[0] + jet.getCorrectedJet() - jet.getCorrectedJet('raw') jetSums[1]=jetSums[1] + jet.getCorrectedJet('jesup') - jet.getCorrectedJet() jetSums[2]=jetSums[2] + jet.getCorrectedJet('jesdown') - jet.getCorrectedJet() jetSums[3]=jetSums[3] + jet.getCorrectedJet('jerup') - jet.getCorrectedJet() jetSums[4]=jetSums[4] + jet.getCorrectedJet('jerdown') - jet.getCorrectedJet() jetFlux=jetFlux+jet.p4 ht=ht+jet.p4.Pt() # met metCand=METCand(tree.met_pt[0]*math.cos(tree.met_phi[0]),tree.met_pt[0]*math.sin(tree.met_phi[0]),0,tree.met_pt[0]) metCand.genMatch(genNeutP4.Px(),genNeutP4.Py(),genNeutP4.Pz(),genNeutP4.E()) metCand.addSumEts(tree.met_sumet[0], tree.met_chsumet[0]) metCand.addJetCorrections(jetSums) metCand.addLeptonCorrections(lepSums) unclFlux=-(metCand.p4+lepFlux+jetFlux) unclSums=[TLorentzVector(0,0,0,0),unclFlux*0.10,unclFlux*(-0.10)] metCand.addUnclusteredCorrections(unclSums) #build the candidate vCand=buildVcand(eFire,mFire,emFire,leptonCands,metCand) if vCand is None : continue #prepare to save weight=1.0 if puWeightsGr is not None: weight=puWeightsGr.Eval(tree.ngenITpu) #show isolations for ileg in [0,1]: hname='leg'+str(ileg+1)+'iso' lid='' if abs(vCand.m_legs[ileg].id)==11 : lid='e' elif abs(vCand.m_legs[ileg].id)==13 : lid='mu' else : continue monitor.fill(hname,[lid],vCand.m_legs[ileg].relIso,weight) tags=[vCand.tag] monitor.fill('nvtxraw',tags, tree.nvtx, 1.0) monitor.fill('nvtx', tags, tree.nvtx, weight) monitor.fill('vmass', tags, vCand.p4.M(), weight) monitor.fill('vpt', tags, vCand.p4.Pt(), weight) monitor.fill('leg1pt', tags, vCand.m_legs[0].p4.Pt(), weight) monitor.fill('leg2pt', tags, vCand.m_legs[1].p4.Pt(), weight) for var in ['','lesup','lesdown','jesup','jesdown','jerup','jerdown','umetup','umetdown']: mtVar=vCand.computeMt(var) monitor.fill('vmt', [vCand.tag+var], mtVar, weight) if saveSummary : values=[ vCand.id, weight, tree.nvtx, len(selJets), vCand.p4.M(), vCand.mt, vCand.p4.Pt(), genBosonP4.M(), genBosonP4.Pt(), vCand.m_legs[0].p4.Pt(),vCand.m_legs[0].p4.Eta(),vCand.m_legs[0].p4.Phi(), vCand.m_legs[0].genP4.Pt(), vCand.m_legs[0].relIso, vCand.m_legs[1].p4.Pt(),vCand.m_legs[1].p4.Eta(),vCand.m_legs[1].p4.Phi(), vCand.m_legs[1].genP4.Pt(), vCand.m_legs[1].relIso, metCand.sumet, ht, metCand.p4Vars['lesup'].Pt(),metCand.p4Vars['lesdown'].Pt(),metCand.p4Vars['jesup'].Pt(),metCand.p4Vars['jesdown'].Pt(),metCand.p4Vars['jerup'].Pt(),metCand.p4Vars['jerdown'].Pt(),metCand.p4Vars['umetup'].Pt(),metCand.p4Vars['umetdown'].Pt() ] summaryTuple.Fill(array.array("f",values)) file.Close() monitor.close()
# piPlus meson PiPlus[nPiPlus].SetPxPyPzE(float(word[2]), float(word[3]), float(word[4]), float(word[5])) nPiPlus += 1 elif value == 9: # piMinus meson PiMinus.SetPxPyPzE(float(word[2]), float(word[3]), float(word[4]), float(word[5])) if value == 8 and nPiPlus == 2: Neutron = Beam + Target - (PiPlus[0] + PiPlus[1] + PiMinus ) #missing neutron vector n, pip1, pip2, pim = Neutron.Mag(), PiPlus[0].Mag(), PiPlus[1].Mag( ), PiMinus.Mag() im1, im2, im3, im4, im5, im6, im7, im8 = n + pip1 + pip2 + pim, pip1 + pip2 + pim, pip1 + pim, pip2 + pim, pip1 + pip2, n + pip1, n + pip2, n + pim IMspectra.Fill(im1, im2, im3, im4, im5, im6, im7, im8) #filling Ntuple rootFile.Write() #saving the Ntuple in a root file IMCanvas = TCanvas("cc", "Invariant mass spectra", 10, 10, 1000, 700) #creating a 2x4 canvas IMCanvas.Divide(2, 4) IMCanvas.cd(1) #Navigation and filling of Canvas IMspectra.Draw("npip1pip2pim") IMCanvas.cd(2) IMspectra.Draw("pip1pip2pim") IMCanvas.cd(3) IMspectra.Draw("pip1pim") IMCanvas.cd(4) IMspectra.Draw("pip2pim")
context = zmq.Context() socket = context.socket(zmq.SUB) print("Collecting updates from LTC2983 publisher") socket.connect("tcp://usop01:5556") socket.setsockopt(zmq.SUBSCRIBE, '') print("Storing data on: " + filename) nsamples = 0 while True: msg = socket.recv().split('\0')[0] fval = float(msg) ntuple.Fill(fval) ch2plot.Fill(fval) nsamples = nsamples + 1 if (nsamples % 1000) == 0: f.Write() if (nsamples == 5000): print("Done !") f.Write() sys.exit(0) #ch2plot.Draw() #c1.Update()
readEvent = False nPart = 0 nEvents = 0 for line in lines: if(line == "<mgrwt>\n"): readEvent = False if(readEvent): nPart += 1 if(nPart == 1): continue varList = line.split() PID = int(varList[0]) Px = float(varList[6]) Py = float(varList[7]) Pz = float(varList[8]) E = float(varList[9]) m = float(varList[10]) if(nPart == 2): eleBeamNtuple.Fill(PID,Px,Py,Pz,E,m) if(nPart == 4): ApNtuple.Fill(PID,Px,Py,Pz,E,m) if(nPart == 5): RhoNtuple.Fill(PID,Px,Py,Pz,E,m) if(nPart == 6): eleRecoilNtuple.Fill(PID,Px,Py,Pz,E,m) if(nPart == 7): WRecoilNtuple.Fill(PID,Px,Py,Pz,E,m) if(nPart == 8): posDecayNtuple.Fill(PID,Px,Py,Pz,E,m) if(nPart == 9): eleDecayNtuple.Fill(PID,Px,Py,Pz,E,m) if(nPart == 10): PionNtuple.Fill(PID,Px,Py,Pz,E,m) nEvents += 1 if(nEvents%10000 == 0): print "Adding event number " + str(nEvents) if(line == "<event>\n"): readEvent = True nPart = 0 LHEfile.close() rootfile.Write()
def main(): if len(sys.argv) < 3: print("Usage: ToyMC [numberEvents] [randomSeed]") return numberEvents = int(sys.argv[1]) seed = int(sys.argv[2]) print( "==================================== TRAIN ====================================" ) f = root_open( "legotrain_350_20161117-2106_LHCb4_fix_CF_pPb_MC_ptHardMerged.root", "read" ) hJetPt = f.Get("AliJJetJtTask/AliJJetJtHistManager/JetPt/JetPtNFin{:02d}".format(2)) hZ = f.Get("AliJJetJtTask/AliJJetJtHistManager/Z/ZNFin{:02d}".format(2)) FillFakes = False dummy_variable = True weight = True NBINS = 50 LimL = 0.1 LimH = 500 logBW = (TMath.Log(LimH) - TMath.Log(LimL)) / NBINS LogBinsX = [LimL * math.exp(ij * logBW) for ij in range(0, NBINS + 1)] hJetPtMeas = Hist(LogBinsX) hJetPtTrue = Hist(LogBinsX) myRandom = TRandom3(seed) fEff = TF1("fEff", "1-0.5*exp(-x)") jetBinBorders = [5, 10, 20, 30, 40, 60, 80, 100, 150, 500] hJetPtMeasCoarse = Hist(jetBinBorders) hJetPtTrueCoarse = Hist(jetBinBorders) NBINSJt = 64 low = 0.01 high = 10 BinW = (TMath.Log(high) - TMath.Log(low)) / NBINSJt LogBinsJt = [low * math.exp(i * BinW) for i in range(NBINSJt + 1)] hJtTrue = Hist(LogBinsJt) hJtMeas = Hist(LogBinsJt) hJtFake = Hist(LogBinsJt) LogBinsPt = jetBinBorders jetPtBins = [(a, b) for a, b in zip(jetBinBorders, jetBinBorders[1:])] hJtTrue2D = Hist2D(LogBinsJt, LogBinsPt) hJtMeas2D = Hist2D(LogBinsJt, LogBinsPt) hJtFake2D = Hist2D(LogBinsJt, LogBinsPt) hJtMeasBin = [Hist(LogBinsJt) for i in jetBinBorders] hJtTrueBin = [Hist(LogBinsJt) for i in jetBinBorders] response = RooUnfoldResponse(hJtMeas, hJtTrue) response2D = RooUnfoldResponse(hJtMeas2D, hJtTrue2D) responseBin = [RooUnfoldResponse(hJtMeas, hJtTrue) for i in jetBinBorders] responseJetPt = RooUnfoldResponse(hJetPtMeas, hJetPtTrue) responseJetPtCoarse = RooUnfoldResponse(hJetPtMeasCoarse, hJetPtTrueCoarse) # Histogram index is jet pT index, Bin 0 is 5-10 GeV # Histogram X axis is observed jT, Bin 0 is underflow # Histogram Y axis is observed jet Pt, Bin 0 is underflow # Histogram Z axis is True jT, Bin 0 is underflow responses = [Hist3D(LogBinsJt, LogBinsPt, LogBinsJt) for i in jetPtBins] misses = Hist2D(LogBinsJt, LogBinsPt) fakes2D = Hist2D(LogBinsJt, LogBinsPt) outFile = TFile("tuple.root", "recreate") responseTuple = TNtuple( "responseTuple", "responseTuple", "jtObs:ptObs:jtTrue:ptTrue" ) hMultiTrue = Hist(50, 0, 50) hMultiMeas = Hist(50, 0, 50) hZMeas = Hist(50, 0, 1) hZTrue = Hist(50, 0, 1) hZFake = Hist(50, 0, 1) responseMatrix = Hist2D(LogBinsJt, LogBinsJt) numberJets = 0 numberFakes = 0 numberJetsMeasBin = [0 for i in jetBinBorders] numberJetsTrueBin = [0 for i in jetBinBorders] numberFakesBin = [0 for i in jetBinBorders] ieout = numberEvents / 10 if ieout > 10000: ieout = 10000 fakeRate = 1 start_time = datetime.now() print("Processing Training Events") for ievt in range(numberEvents): tracksTrue = [] tracksMeas = [0 for x in range(100)] if ievt % ieout == 0 and ievt > 0: time_elapsed = datetime.now() - start_time time_left = timedelta( seconds=time_elapsed.total_seconds() * 1.0 * (numberEvents - ievt) / ievt ) print( "Event {} [{:.2f}%] Time Elapsed: {} ETA: {}".format( ievt, 100.0 * ievt / numberEvents, fmtDelta(time_elapsed), fmtDelta(time_left), ) ) jetTrue = TVector3(0, 0, 0) jetMeas = TVector3(0, 0, 0) jetPt = hJetPt.GetRandom() remainder = jetPt if jetPt < 5: continue nt = 0 nt_meas = 0 while remainder > 0: trackPt = hZ.GetRandom() * jetPt if trackPt < remainder: track = TVector3() remainder = remainder - trackPt else: trackPt = remainder remainder = -1 if trackPt > 0.15: track.SetPtEtaPhi( trackPt, myRandom.Gaus(0, 0.1), myRandom.Gaus(math.pi, 0.2) ) tracksTrue.append(track) jetTrue += track if fEff.Eval(trackPt) > myRandom.Uniform(0, 1): tracksMeas[nt] = 1 jetMeas += track nt_meas += 1 else: tracksMeas[nt] = 0 nt += 1 fakes = [] for it in range(fakeRate * 100): if myRandom.Uniform(0, 1) > 0.99: fake = TVector3() fake.SetPtEtaPhi( myRandom.Uniform(0.15, 1), myRandom.Gaus(0, 0.1), myRandom.Gaus(math.pi, 0.2), ) fakes.append(fake) jetMeas += fake hJetPtMeas.Fill(jetMeas.Pt()) hJetPtTrue.Fill(jetTrue.Pt()) responseJetPt.Fill(jetMeas.Pt(), jetTrue.Pt()) responseJetPtCoarse.Fill(jetMeas.Pt(), jetTrue.Pt()) hMultiTrue.Fill(nt) hMultiMeas.Fill(nt_meas) ij_meas = GetBin(jetBinBorders, jetMeas.Pt()) ij_true = GetBin(jetBinBorders, jetTrue.Pt()) if nt < 5 or nt_meas < 5: continue numberJets += 1 if ij_meas >= 0: numberJetsMeasBin[ij_meas] += 1 hJetPtMeasCoarse.Fill(jetMeas.Pt()) if ij_true >= 0: numberJetsTrueBin[ij_true] += 1 hJetPtTrueCoarse.Fill(jetTrue.Pt()) for track, it in zip(tracksTrue, range(100)): zTrue = (track * jetTrue.Unit()) / jetTrue.Mag() jtTrue = (track - scaleJet(jetTrue, zTrue)).Mag() hZTrue.Fill(zTrue) if ij_true >= 0: if weight: hJtTrue.Fill(jtTrue, 1.0 / jtTrue) hJtTrueBin[ij_true].Fill(jtTrue, 1.0 / jtTrue) hJtTrue2D.Fill(jtTrue, jetTrue.Pt(), 1.0 / jtTrue) else: hJtTrue.Fill(jtTrue) hJtTrueBin[ij_true].Fill(jtTrue) hJtTrue2D.Fill(jtTrue, jetTrue.Pt()) if ij_meas >= 0: if tracksMeas[it] == 1: zMeas = (track * jetMeas.Unit()) / jetMeas.Mag() jtMeas = (track - scaleJet(jetMeas, zMeas)).Mag() hZMeas.Fill(zMeas) if weight: hJtMeasBin[ij_meas].Fill(jtMeas, 1.0 / jtMeas) hJtMeas.Fill(jtMeas, 1.0 / jtMeas) hJtMeas2D.Fill(jtMeas, jetMeas.Pt(), 1.0 / jtMeas) else: hJtMeas.Fill(jtMeas) hJtMeasBin[ij_meas].Fill(jtMeas) hJtMeas2D.Fill(jtMeas, jetMeas.Pt()) response.Fill(jtMeas, jtTrue) responseBin[ij_true].Fill(jtMeas, jtTrue) response2D.Fill(jtMeas, jetMeas.Pt(), jtTrue, jetTrue.Pt()) responseMatrix.Fill(jtMeas, jtTrue) responses[ij_true].Fill(jtMeas, jetMeas.Pt(), jtTrue) responseTuple.Fill(jtMeas, jetMeas.Pt(), jtTrue, jetTrue.Pt()) else: response.Miss(jtTrue) responseBin[ij_true].Miss(jtTrue) response2D.Miss(jtTrue, jetTrue.Pt()) misses.Fill(jtTrue, jetTrue.Pt()) responseTuple.Fill(-1, -1, jtTrue, jetTrue.Pt()) if ij_meas >= 0: for fake in fakes: zFake = (fake * jetMeas.Unit()) / jetMeas.Mag() jtFake = (fake - scaleJet(jetMeas, zFake)).Mag() hZMeas.Fill(zFake) hZFake.Fill(zFake) if weight: hJtMeas.Fill(jtFake, 1.0 / jtFake) hJtMeasBin[ij_meas].Fill(jtFake, 1.0 / jtFake) hJtMeas2D.Fill(jtFake, jetMeas.Pt(), 1.0 / jtFake) hJtFake2D.Fill(jtFake, jetMeas.Pt(), 1.0 / jtFake) hJtFake.Fill(jtFake, 1.0 / jtFake) else: hJtMeas.Fill(jtFake) hJtMeasBin[ij_meas].Fill(jtFake) hJtMeas2D.Fill(jtFake, jetMeas.Pt()) hJtFake2D.Fill(jtFake, jetMeas.Pt()) hJtFake.Fill(jtFake) if FillFakes: response.Fake(jtFake) responseBin[ij_true].Fake(jtFake) response2D.Fake(jtFake, jetMeas.Pt()) fakes2D.Fill(jtFake, jetMeas.Pt()) responseTuple.Fill(jtFake, jetMeas.Pt(), -1, -1) numberFakes += 1 numberFakesBin[ij_true] += 1 response2Dtest = make2Dresponse( responses, jetPtBins, hJtMeas2D, hJtTrue2D, misses=misses, fakes=fakes2D ) if dummy_variable: hJetPtMeas.Reset() hJetPtTrue.Reset() hMultiTrue.Reset() hMultiMeas.Reset() hJetPtMeasCoarse.Reset() hJetPtTrueCoarse.Reset() hZTrue.Reset() hZMeas.Reset() hJtTrue.Reset() hJtTrue2D.Reset() hJtMeas.Reset() hJtMeas2D.Reset() hJtFake.Reset() hJtFake2D.Reset() for h, h2 in zip(hJtTrueBin, hJtMeasBin): h.Reset() h2.Reset() numberJetsMeasBin = [0 for i in jetBinBorders] numberJetsTrueBin = [0 for i in jetBinBorders] numberJets = 0 print("Create testing data") start_time = datetime.now() numberEvents = numberEvents / 2 for ievt in range(numberEvents): tracksTrue = [] tracksMeas = [0 for x in range(100)] if ievt % ieout == 0 and ievt > 0: time_elapsed = datetime.now() - start_time time_left = timedelta( seconds=time_elapsed.total_seconds() * 1.0 * (numberEvents - ievt) / ievt ) print( "Event {} [{:.2f}%] Time Elapsed: {} ETA: {}".format( ievt, 100.0 * ievt / numberEvents, fmtDelta(time_elapsed), fmtDelta(time_left), ) ) jetTrue = TVector3(0, 0, 0) jetMeas = TVector3(0, 0, 0) jetPt = hJetPt.GetRandom() remainder = jetPt if jetPt < 5: continue nt = 0 nt_meas = 0 while remainder > 0: trackPt = hZ.GetRandom() * jetPt if trackPt < remainder: track = TVector3() remainder = remainder - trackPt else: trackPt = remainder remainder = -1 if trackPt > 0.15: track.SetPtEtaPhi( trackPt, myRandom.Gaus(0, 0.1), myRandom.Gaus(math.pi, 0.2) ) tracksTrue.append(track) jetTrue += track if fEff.Eval(trackPt) > myRandom.Uniform(0, 1): tracksMeas[nt] = 1 jetMeas += track nt_meas += 1 else: tracksMeas[nt] = 0 nt += 1 fakes = [] for it in range(fakeRate * 100): if myRandom.Uniform(0, 1) > 0.99: fake = TVector3() fake.SetPtEtaPhi( myRandom.Uniform(0.15, 1), myRandom.Gaus(0, 0.1), myRandom.Gaus(math.pi, 0.2), ) fakes.append(fake) jetMeas += fake hJetPtMeas.Fill(jetMeas.Pt()) hJetPtTrue.Fill(jetTrue.Pt()) hMultiTrue.Fill(nt) hMultiMeas.Fill(nt_meas) ij_meas = GetBin(jetBinBorders, jetMeas.Pt()) ij_true = GetBin(jetBinBorders, jetTrue.Pt()) if nt < 5 or nt_meas < 5: continue numberJets += 1 if ij_meas >= 0: numberJetsMeasBin[ij_meas] += 1 hJetPtMeasCoarse.Fill(jetMeas.Pt()) if ij_true >= 0: numberJetsTrueBin[ij_true] += 1 hJetPtTrueCoarse.Fill(jetTrue.Pt()) for track, it in zip(tracksTrue, range(100)): zTrue = (track * jetTrue.Unit()) / jetTrue.Mag() jtTrue = (track - scaleJet(jetTrue, zTrue)).Mag() hZTrue.Fill(zTrue) if ij_true >= 0: if weight: hJtTrue.Fill(jtTrue, 1.0 / jtTrue) hJtTrueBin[ij_true].Fill(jtTrue, 1.0 / jtTrue) hJtTrue2D.Fill(jtTrue, jetTrue.Pt(), 1.0 / jtTrue) else: hJtTrue.Fill(jtTrue) hJtTrueBin[ij_true].Fill(jtTrue) hJtTrue2D.Fill(jtTrue, jetTrue.Pt()) if ij_meas >= 0: if tracksMeas[it] == 1: zMeas = (track * jetMeas.Unit()) / jetMeas.Mag() jtMeas = (track - scaleJet(jetMeas, zMeas)).Mag() hZMeas.Fill(zMeas) if weight: hJtMeasBin[ij_meas].Fill(jtMeas, 1.0 / jtMeas) hJtMeas.Fill(jtMeas, 1.0 / jtMeas) hJtMeas2D.Fill(jtMeas, jetMeas.Pt(), 1.0 / jtMeas) else: hJtMeas.Fill(jtMeas) hJtMeasBin[ij_meas].Fill(jtMeas) hJtMeas2D.Fill(jtMeas, jetMeas.Pt()) if ij_meas >= 0: for fake in fakes: zFake = (fake * jetMeas.Unit()) / jetMeas.Mag() jtFake = (fake - scaleJet(jetMeas, zFake)).Mag() hZMeas.Fill(zFake) hZFake.Fill(zFake) if weight: hJtMeas.Fill(jtFake, 1.0 / jtFake) hJtMeasBin[ij_meas].Fill(jtFake, 1.0 / jtFake) hJtMeas2D.Fill(jtFake, jetMeas.Pt(), 1.0 / jtFake) hJtFake2D.Fill(jtFake, jetMeas.Pt(), 1.0 / jtFake) hJtFake.Fill(jtFake, 1.0 / jtFake) else: hJtMeas.Fill(jtFake) hJtMeasBin[ij_meas].Fill(jtFake) hJtMeas2D.Fill(jtFake, jetMeas.Pt()) hJtFake2D.Fill(jtFake, jetMeas.Pt()) hJtFake.Fill(jtFake) time_elapsed = datetime.now() - start_time print( "Event {} [{:.2f}%] Time Elapsed: {}".format( numberEvents, 100.0, fmtDelta(time_elapsed) ) ) if not FillFakes: hJtMeas.Add(hJtFake, -1) hJtMeas2D.Add(hJtFake2D, -1) responseTuple.Print() outFile.Write() # printTuple(responseTuple) hJtMeasProjBin = [ makeHist(hJtMeas2D.ProjectionX("histMeas{}".format(i), i, i), bins=LogBinsJt) for i in range(1, len(jetBinBorders)) ] hJtMeasProj = makeHist(hJtMeas2D.ProjectionX("histMeas"), bins=LogBinsJt) hJtTrueProjBin = [ makeHist(hJtTrue2D.ProjectionX("histTrue{}".format(i), i, i), bins=LogBinsJt) for i in range(1, len(jetBinBorders)) ] hJtTrueProj = makeHist(hJtTrue2D.ProjectionX("histTrue"), bins=LogBinsJt) hJtFakeProjBin = [ makeHist(hJtFake2D.ProjectionX("histFake{}".format(i), i, i), bins=LogBinsJt) for i in range(1, len(jetBinBorders)) ] if not FillFakes: for h, h2 in zip(hJtMeasBin, hJtFakeProjBin): h.Add(h2, -1) for h in ( hJtMeasProj, hJtTrueProj, hJtMeas, hJtTrue, hJtFake, hZFake, hZMeas, hZTrue, ): h.Scale(1.0 / numberJets, "width") for meas, true, n_meas, n_true in zip( hJtMeasBin, hJtTrueBin, numberJetsMeasBin, numberJetsTrueBin ): if n_meas > 0: meas.Scale(1.0 / n_meas, "width") if n_true > 0: true.Scale(1.0 / n_true, "width") numberJetsMeasFromHist = [ hJetPtMeasCoarse.GetBinContent(i) for i in range(1, hJetPtMeasCoarse.GetNbinsX() + 1) ] numberJetsTrueFromHist = [ hJetPtTrueCoarse.GetBinContent(i) for i in range(1, hJetPtTrueCoarse.GetNbinsX() + 1) ] print("Total number of jets: {}".format(numberJets)) print("Total number of fakes: {}".format(numberFakes)) print("Measured jets by bin") print(numberJetsMeasBin) print(numberJetsMeasFromHist) print("True jets by bin") print(numberJetsTrueBin) print(numberJetsTrueFromHist) hRecoJetPtCoarse = unfoldJetPt(hJetPtMeasCoarse, responseJetPtCoarse, jetBinBorders) numberJetsFromReco = [ hRecoJetPtCoarse.GetBinContent(i) for i in range(1, hRecoJetPtCoarse.GetNbinsX()) ] print("Unfolded jet numbers by bin:") print(numberJetsFromReco) print("Fakes by bin") print(numberFakesBin) print( "==================================== UNFOLD ===================================" ) unfold = RooUnfoldBayes(response, hJtMeas, 4) # OR unfoldSVD = RooUnfoldSvd(response, hJtMeas, 20) # OR unfold2D = RooUnfoldBayes(response2D, hJtMeas2D, 4) for u in (unfold, unfoldSVD, unfold2D): u.SetVerbose(0) # response2Dtest = makeResponseFromTuple(responseTuple,hJtMeas2D,hJtTrue2D) unfold2Dtest = RooUnfoldBayes(response2Dtest, hJtMeas2D, 4) unfoldBin = [ RooUnfoldBayes(responseBin[i], hJtMeasBin[i]) for i in range(len(jetBinBorders)) ] for u in unfoldBin: u.SetVerbose(0) hRecoBayes = makeHist(unfold.Hreco(), bins=LogBinsJt) hRecoSVD = makeHist(unfoldSVD.Hreco(), bins=LogBinsJt) hRecoBin = [ makeHist(unfoldBin[i].Hreco(), bins=LogBinsJt) for i in range(len(jetBinBorders)) ] hReco2D = make2DHist(unfold2D.Hreco(), xbins=LogBinsJt, ybins=LogBinsPt) hReco2Dtest = make2DHist(unfold2Dtest.Hreco(), xbins=LogBinsJt, ybins=LogBinsPt) hRecoJetPt = unfoldJetPt(hJetPtMeas, responseJetPt, LogBinsX) hReco2DProjBin = [ makeHist(hReco2D.ProjectionX("histReco{}".format(i), i, i), bins=LogBinsJt) for i in range(1, len(jetBinBorders)) ] hReco2DTestProjBin = [ makeHist( hReco2Dtest.ProjectionX("histRecoTest{}".format(i), i, i), bins=LogBinsJt ) for i in range(1, len(jetBinBorders)) ] hReco2DProj = makeHist(hReco2D.ProjectionX("histReco"), bins=LogBinsJt) hReco2DProj.Scale(1.0 / numberJets, "width") for h, h2, n in zip(hReco2DProjBin, hReco2DTestProjBin, numberJetsFromReco): if n > 0: h.Scale(1.0 / n, "width") h2.Scale(1.0 / n, "width") # unfold.PrintTable (cout, hJtTrue) for h, h2, nj in zip(hJtMeasProjBin, hJtFakeProjBin, numberJetsMeasBin): if nj > 0: h.Scale(1.0 / nj, "width") h2.Scale(1.0 / nj, "width") # else: # print("nj is 0 for {}".format(h.GetName())) for h, nj in zip(hJtTrueProjBin, numberJetsTrueBin): if nj > 0: h.Scale(1.0 / nj, "width") # draw8grid(hJtMeasBin[1:],hJtTrueBin[1:],jetPtBins[1:],xlog = True,ylog = True,name="newfile.pdf",proj = hJtMeasProjBin[2:], unf2d = hReco2DProjBin[2:], unf=hRecoBin[1:]) if numberEvents > 1000: if numberEvents > 1000000: filename = "ToyMC_{}M_events.pdf".format(numberEvents / 1000000) else: filename = "ToyMC_{}k_events.pdf".format(numberEvents / 1000) else: filename = "ToyMC_{}_events.pdf".format(numberEvents) draw8gridcomparison( hJtMeasBin, hJtTrueBin, jetPtBins, xlog=True, ylog=True, name=filename, proj=None, unf2d=hReco2DProjBin, unf2dtest=hReco2DTestProjBin, unf=hRecoBin, fake=hJtFakeProjBin, start=1, stride=1, ) drawQA( hJtMeas, hJtTrue, hJtFake, hRecoBayes, hRecoSVD, hReco2DProj, hZ, hZTrue, hZMeas, hZFake, hMultiMeas, hMultiTrue, hJetPt, hJetPtTrue, hJetPtMeas, hRecoJetPt, responseMatrix, ) outFile.Close()
def main(): """ Creates a simple summary of the ROI for fast calibration. """ fIn = TFile.Open(FLAGS.noPUFile, 'READ') fOut = TFile(FLAGS.outpath, 'RECREATE') varnames = ['genen', 'geneta', 'genphi'] for i in range(1,NREG+1): varnames += ['en_sr{}_ROI'.format(i), 'noise_sr{}_ROI'.format(i)] for idet in range(1,NSUBDETS+1): varnames += ['en_sr{}_det{}'.format(i,idet), 'noise_sr{}_det{}'.format(i,idet)] for il in range(1,NLAYERS+1): varnames += ['en_sr{}_layer{}'.format(i,il), 'noise_sr{}_layer{}'.format(i,il)] output_tuples = TNtuple('summary','summary',':'.join(varnames)) tree = fIn.Get('an_mask/CEE_HEF_HEB') for t in tree: #define the ROIs roiList={} for ir in range(0,t.ROIs.size()): if t.ROIs[ir].pdgid() < 0 and t.ROIs[ir].pdgid() != -211: continue roiList[ir] = ROISummary(t.ROIs[ir].p4(), Nlayers=NLAYERS, PartType='Pion') for h in t.Hits: roiIdx = h.associatedROI() rid = t.ROIs[roiIdx].pdgid() roiKey = roiIdx if rid>0 or rid==-211 else abs(rid)-1 mipflag = True en = h.en(mipflag) subdet = h.subdet() layer = int(h.layerId()) #originally it is long isNoise = True if rid<0 and rid!=-211 else False regIdx = h.signalRegion() roiList[roiKey].AddHit(en=en, layer=layer, subdet=subdet, isNoise=isNoise, regIdx=regIdx) for r in roiList: varvals = [] genP4 = roiList[r].genP4 varvals += [genP4.E(),genP4.Eta(),genP4.Phi()] for ireg in range(1,NREG+1): recP4 = roiList[r].RecoP4(ireg) assert(np.isclose(recP4.E(), ( roiList[r].SubdetEnergyDeposited(ireg, 1) + roiList[r].SubdetEnergyDeposited(ireg, 2) + roiList[r].SubdetEnergyDeposited(ireg, 3) ) )) noiseROI = roiList[r].NoiseInROI(ireg) varvals += [recP4.E(),noiseROI] for idet in range(1,NSUBDETS+1): recEnSubdet = roiList[r].SubdetEnergyDeposited(ireg, idet) noiseSubdet = roiList[r].SubdetNoiseDeposited(ireg, idet) varvals += [recEnSubdet, noiseSubdet] for il in range(1,NLAYERS+1): recEn = roiList[r].RecoEnergyDeposited(ireg, il) noiseLayer = roiList[r].NoiseInLayer(ireg, il) varvals += [recEn, noiseLayer] output_tuples.Fill(array.array("f", varvals)) fOut.cd() fOut.Write() fOut.Close()
nEvBkg) else: hMassSB = GetSideBandHisto(hMassData, mean, sigma) B = GetExpectedBackgroundFromSB(hMassSB, mean, sigma, Nexp, nEvBkg) if inputCfg['PredForFprompt']['estimateFprompt']: fprompt = ComputeExpectedFprompt(PtMin[iPt], PtMax[iPt], effPrompt, \ hPredPrompt, effFD, hPredFD, RatioRaaFDPrompt) else: fprompt = inputCfg['fprompt'] S = GetExpectedSignal(PtMin[iPt] - PtMax[iPt], sigmaFONLL, Raa, Taa, effPrompt, Acc, fprompt, BR, fractoD, Nexp) array4Ntuple.append(PtMin[iPt]) array4Ntuple.append(PtMax[iPt]) array4Ntuple.append(S) array4Ntuple.append(B) array4Ntuple.append(S / math.sqrt(S + B)) array4Ntuple.append(S / B) array4Ntuple.append(effPrompt) array4Ntuple.append(effFD) array4Ntuple.append(fprompt) tSignif.Fill(array.array("f", array4Ntuple)) elapsed_time = time.time() - start_time print('total elapsed time: %f s' % elapsed_time) tSignif.Write() outfile.Close()
# Efficiency EffAccFDError = np.sqrt((effFDUnc / effFD)**2 + (preselEffFDUnc / preselEffFD)**2 + (accUnc / acc)**2) * effTimesAccFD EffAccPromptError = np.sqrt( (effPromptUnc / effPrompt)**2 + (preselEffPromptUnc / preselEffPrompt)**2 + (accUnc / acc)**2) * effTimesAccPrompt tupleForNtuple = cutSet + ( ptMin, ptMax, ParCutMin, ParCutMax, EffAccPromptError, EffAccFDError, errS, errExpBkg, errSignif, errSoverB, expSignif, expSoverB, expSignal, expBkg, effTimesAccPrompt, effTimesAccFD, fPrompt[0], fFD[0]) tSignif.Fill(np.array(tupleForNtuple, 'f')) estValues = { 'Signif': expSignif, 'SoverB': expSoverB, 'S': expSignal, 'B': expBkg, 'EffAccPrompt': effTimesAccPrompt, 'EffAccFD': effTimesAccFD, 'fPrompt': fPrompt[0], 'fFD': fFD[0] } estValuesErr = { 'SignifError': errSignif, 'SoverBError': errSoverB, 'SError': errS, 'BError': errExpBkg,
Nbin = (len(lineList)) # get number of bins Line_string = str(lineList[0]) bin_init, _, _ = Line_string.split() bin_init = float(bin_init) # get initial bin Line_string = str(lineList[len(lineList) - 1]) _, bin_final, _ = Line_string.split() bin_final = float(bin_final) # get final bin f.seek(0) # reset python read line hist = TH1D("h1f", "h1f", Nbin, bin_init, bin_final) ntuple = TNtuple("ntuple", "ntuple", "low_bin:high_bin:bin_contents") total_e = 0 for i in range(1, Nbin + 1): Line_string = str(f.readline()) ss, ff, bin_c = Line_string.split() ss = float(ss) ff = float(ff) bin_c = float(bin_c) hist.SetBinContent(i, bin_c) total_e = total_e + bin_c ntuple.Fill(ss, ff, bin_c) #gStyle.SetOptStat() hist.Draw() gPad.Update() can.Update() wf = TFile("root_ntuple_from_txt.root", "RECREATE") hist.Write() ntuple.Write() wf.Close()
def LeCroy2Root(directory, outputRootFile): masterFile = TFile(outputRootFile, "recreate") print("listing files...") filesPerChannel = listFilesPerChannel(directory) ## se rellena el ultimo canal en 0: print len(filesPerChannel) nMC = len( filesPerChannel[0]) # number of available measurement per channel for i in range(4 - len(filesPerChannel)): filesPerChannel.append([0] * nMC) #### Get time from CHANNEL n ####################### TimeNch = 1 ######################################## nMC = len( filesPerChannel[0]) # number of available measurement per channel # formar la tupla con las columnas necesarias: # measuresLabels = "event:time:C1:C2:C3:C4" measuresLabels = "event:time:C1:C2:C3" tup = TNtuple("osc", "LeCroy Readings", measuresLabels) readingGroup = [] print("loading data..." + str(nMC)) # para cada frame for i in range(nMC): # obtener cada canal for j in range(len(filesPerChannel)): if (filesPerChannel[j][i] == 0): readingGroup.append([]) # print "channel: " + str(j) + " empty" else: # print "channel: " + str(j) + " ok" # print directory+filesPerChannel[j][i] readingGroup.append(readTrc(directory + filesPerChannel[j][i])) largos = [] for j in range(len(readingGroup)): if (not (len(readingGroup[j]) in largos)): if (len(readingGroup[j]) != 0): largos.append(len(readingGroup[j][0])) n_data = max(largos) for j in range(len(readingGroup)): if (len(readingGroup[j]) == 0): x_data = [0] * n_data y_data = [0] * n_data d_data = [0] * n_data readingGroup[j] = [x_data, y_data, d_data] # luego, generar listas a cargar a la tupla event = i for [time, c1, c2, c3] in zip(readingGroup[TimeNch - 1][0], readingGroup[0][1], readingGroup[1][1], readingGroup[2][1]): tup.Fill(event, time, c1, c2, c3) # for [time,c1,c2,c3,c4] in zip(readingGroup[TimeNch-1][0], readingGroup[0][1], readingGroup[1][1], readingGroup[2][1], readingGroup[3][1]): tup.Fill(event,time,c1,c2,c3,c4) # for [time,c1] in zip(readingGroup[0][0], readingGroup[0][1]): # tup.Fill(event,time,c1) # for [time,c2,c3] in zip(readingGroup[0][0], readingGroup[0][1], ): # tup.Fill(event,time,c2,c3) readingGroup = [] if (int((float(i * 100) / nMC) * 100) % 100 == 0): sys.stdout.write(str(float(i * 100) / nMC) + "%" + "\r") sys.stdout.flush() tup.Write("", tup.kOverwrite) masterFile.Close() sys.stdout.write("\n")
random = TRandom3() kUPDATE = 1000 for i in xrange(50000): # Generate random numbers # px, py = random.gauss(0, 1), random.gauss(0, 1) px, py = random.Gaus(0, 1), random.Gaus(0, 1) pz = px * px + py * py # rnd = random.random() rnd = random.Rndm(1) # Fill histograms hpx.Fill(px) hpxpy.Fill(px, py) hprof.Fill(px, pz) if not _reflex: ntuple.Fill(px, py, pz, rnd, i) # Update display every kUPDATE events if i and i % kUPDATE == 0: if i == kUPDATE: hpx.Draw() c1.Modified(True) c1.Update() if gSystem.ProcessEvents(): # allow user interrupt break gBenchmark.Show('hsimple') # Save all objects in this file
rannor, rndm = gRandom.Rannor, gRandom.Rndm # In[5]: _px = array('d', [0]) _py = array('d', [0]) for i in range(25000): # Generate random values. rannor(_px, _py) px, py = _px[0], _py[0] pz = px * px + py * py random = rndm(1) # Fill histograms. hpx.Fill(px) hpxpy.Fill(px, py) hprof.Fill(px, pz) ntuple.Fill(px, py, pz, random, i) c1 = gROOT.FindObject('c1') if c1: c1 = 0 c1 = TCanvas('c1', 'Histogram Example', 200, 10, 700, 500) c1.SetFillColor(42) c1.GetFrame().SetFillColor(21) c1.GetFrame().SetBorderSize(6) c1.GetFrame().SetBorderMode(-1) hpx.SetFillColor(48) hpx.Draw() c1.Modified() c1.Update() # In[ ]: # In[7]: gROOT.GetListOfCanvases().Draw()
Paul Eugenio PHZ4151C Florida State University April 2, 2019 """ from __future__ import division, print_function import numpy as np from ROOT import TLorentzVector, TNtuple, TCanvas, TFile rootFile = TFile("ntp.root", "RECREATE") def f(x): return x**2 squares = TNtuple("sqntuple", "Squares", "x:x2") sqCanvas = TCanvas("cc", "squares", 10, 10, 800, 600) sqCanvas.Divide(1, 2) for k in range(1, 10, 1): squares.Fill(k, f(k)) squares.Draw("x") sqCanvas.cd(2) squares.Draw("x2") rootFile.Write()
def get_pos( event ): # Each detector is a 'collection', the No. of Elements are the hits. a = TNtuple("a", "a", "x:y:z") # creates ntuple to store the values of x y z b = TNtuple("b", "b", "x:y:z") # creates ntuple to store the values of x y z c = TNtuple("c", "c", "x:y:z") # creates ntuple to store the values of x y z d = TNtuple("d", "d", "x:y:z") # creates ntuple to store the values of x y z e = TNtuple("e", "e", "x:y:z") # creates ntuple to store the values of x y z f = TNtuple("f", "f", "x:y:z") # creates ntuple to store the values of x y z g = TNtuple("g", "g", "x:y:z") # creates ntuple to store the values of x y z h = TNtuple("h", "h", "x:y:z") # creates ntuple to store the values of x y z i = TNtuple("i", "i", "x:y:z") # creates ntuple to store the values of x y z j = TNtuple("j", "j", "x:y:z") # creates ntuple to store the values of x y z k = TNtuple("k", "k", "x:y:z") # creates ntuple to store the values of x y z l = TNtuple("l", "l", "x:y:z") # creates ntuple to store the values of x y z ECALB = event.getCollection("EcalBarrelHits") for ding in ECALB: pos = ding.getPosition() x = pos[0] y = pos[1] z = pos[2] a.Fill(x, y, z) ECALE = event.getCollection("EcalEndcapHits") for ding in ECALE: pos = ding.getPosition() x = pos[0] y = pos[1] z = pos[2] b.Fill(x, y, z) HCALB = event.getCollection("HcalBarrelHits") for ding in HCALB: pos = ding.getPosition() x = pos[0] y = pos[1] z = pos[2] c.Fill(x, y, z) HCALE = event.getCollection("HcalEndcapHits") for ding in HCALE: pos = ding.getPosition() x = pos[0] y = pos[1] z = pos[2] d.Fill(x, y, z) LUMICAL = event.getCollection("LumiCalHits") for ding in LUMICAL: pos = ding.getPosition() x = pos[0] y = pos[1] z = pos[2] e.Fill(x, y, z) MUONB = event.getCollection("MuonBarrelHits") for ding in MUONB: pos = ding.getPosition() x = pos[0] y = pos[1] z = pos[2] f.Fill(x, y, z) MUONE = event.getCollection("MuonEndcapHits") for ding in MUONE: pos = ding.getPosition() x = pos[0] y = pos[1] z = pos[2] g.Fill(x, y, z) SITB = event.getCollection("SiTrackerBarrelHits") for ding in SITB: pos = ding.getPosition() x = pos[0] y = pos[1] z = pos[2] h.Fill(x, y, z) SITE = event.getCollection("SiTrackerEndcapHits") for ding in SITE: pos = ding.getPosition() x = pos[0] y = pos[1] z = pos[2] i.Fill(x, y, z) SITF = event.getCollection("SiTrackerForwardHits") for ding in SITF: pos = ding.getPosition() x = pos[0] y = pos[1] z = pos[2] j.Fill(x, y, z) SIVB = event.getCollection("SiVertexBarrelHits") for ding in SIVB: pos = ding.getPosition() x = pos[0] y = pos[1] z = pos[2] k.Fill(x, y, z) SIVE = event.getCollection("SiVertexEndcapHits") for ding in SIVE: pos = ding.getPosition() x = pos[0] y = pos[1] z = pos[2] l.Fill(x, y, z) return a, b, c, d, e, f, g, h, i, j, k, l
## ## \macro_output ## \macro_code ## ## \author Wim Lavrijsen import sys, os from ROOT import TFile, TNtuple, TROOT ifn = os.path.join(str(TROOT.GetTutorialDir()), 'pyroot', 'aptuple.txt') ofn = 'aptuple.root' print('opening file %s ...' % ifn) infile = open(ifn, 'r') lines = infile.readlines() title = lines[0] labels = lines[1].split() print('writing file %s ...' % ofn) outfile = TFile(ofn, 'RECREATE', 'ROOT file with an NTuple') ntuple = TNtuple('ntuple', title, ':'.join(labels)) for line in lines[2:]: words = line.split() row = map(float, words) ntuple.Fill(*row) outfile.Write() print('done')