# print TrackerHits,len(RecoTracks) #get track efficiency eff = hf.GetTrackEfficiency(stripTolerance[0] * pitch, XY, mXmY, RecoTracks, TrackerZ[0:2], pitch) trackEfficiency.append(100 * eff) #calculate track ambiguity rate nFakeTracks = len(RecoTracks) - (nProton * eff) if len(RecoTracks) > 0: trackAmbiguity.append(100 * (len(RecoTracks) - nProton) / len(RecoTracks)) correctedTrackAmbiguity.append(100 * nFakeTracks / len(RecoTracks)) if saveStripMaps: hf.DrawTrackMap("TrackMap", RecoTracks, XY, xmax) ################### REAR TRACKERS ####################################################### phantomdepth = 30.0 #cm energy = 230.0 #MeV XY_MS, mXmY_MS = hf.ApplyMultipleScattering( XY, mXmY, phantomdepth, energy, (ZMeans[2] + ZMeans[1]) / 2) #XY_MS,mXmY_MS=XY,mXmY #reconstruct hits for each tracker module TrackerHits = [] MaxNStrips = [] for module in [2, 3, 4]: #convert simulated protons to strips
nTracks.append(len(RecoTracks)) #get track efficiency eff = hf.GetTrackEfficiency(stripTolerance * pitch, XY, mXmY, RecoTracks, TrackerZ, pitch) trackEfficiency.append(100 * eff) #calculate track ambiguity rate nFakeTracks = len(RecoTracks) - (nProton * eff) if len(RecoTracks) > 0: trackAmbiguity.append(100 * (len(RecoTracks) - nProton) / len(RecoTracks)) correctedTrackAmbiguity.append(100 * nFakeTracks / len(RecoTracks)) if saveStripMaps and len(TrackerAngles) > 1: hf.DrawTrackMap("TrackMap", RecoTracks, XY, xmax) for module in range(len(TrackerAngles)): print "Module ", module, ": Hits= ", sum( nTrackerHits[module]), " Ambiguity= ", np.mean( ambiguity[module]), "%", " Efficiency=", np.mean( trackerEffs[module]), "Purity= ", 100 - np.mean( correctedAmbiguity[module]), "%" print "Combined: Tracks= ", sum(nTracks), " Ambiguity= ", np.mean( trackAmbiguity), "%", " Efficiency=", np.mean( trackEfficiency ), "Purity= ", 100 - np.mean(correctedTrackAmbiguity), "%" print "Errors: Eff. Error=", np.std(trackEfficiency) / math.sqrt( NLoops), "Purity Err", np.std(correctedTrackAmbiguity) / math.sqrt( NLoops), "%"