def get_fluxes_and_names(fallback_to_ic3_labels_flux=False): flux_models = [] for obj in dir(fluxes): cls = getattr(fluxes, obj) if isclass(cls): if issubclass(cls, fluxes.CompiledFlux) and \ cls != fluxes.CompiledFlux: # Try to use flux from icecube.weighting.fluxes try: flux_model = MIMIC_NEUTRINOFLUX(cls(), obj) flux_model.getFlux(1e4, 14, 0.) # Fall back to ic3_labels version # (currently necessary for python >=3.8) except Exception as e: # try to obtain flux from ic3_labels if fallback_to_ic3_labels_flux: log_warn('Caught error:' + str(e)) log_warn( 'Falling back to ic3_labels flux {}'.format(obj)) cls = getattr(_fluxes, obj) flux_model = MIMIC_NEUTRINOFLUX(cls(), obj) # if not falling back on ic3_labels fluxed: raise error else: raise e flux_models.append(flux_model) return flux_models, \ [str(flux_model_i) + 'Weight' for flux_model_i in flux_models]
def _create_in_array(self,frame): if self.EnergyRecoName: En = np.log10(frame[self.EnergyRecoName].energy) elif self.LaputopParamsName: En = np.log10(frame[self.LaputopParamsName].s125) else: log_fatal('One of EnergyRecoName_I3Particle or LaputopParamsName needs to be given') ze = np.cos(frame[self.AngularRecoName].dir.zenith) hits = frame[self.HitsName] unhits = frame[self.UnhitsName] excluded = frame[self.ExcludedName] #hits_t, hits_q, hits_r = np.array([[signed_log(hit.time_residual),np.log10(hit.charge), log_plus_one(hit.distance)] for hit in hits]).T hits_t = signed_log(np.array([hit.time_residual for hit in hits])) hits_q = np.log10(np.array([hit.charge for hit in hits])) hits_r = log_plus_one(np.array([hit.distance for hit in hits])) hits_E = np.ones_like(hits_r)*En hits_z = np.ones_like(hits_r)*ze #unhits_t, unhits_q, unhits_r = np.array([[signed_log(hit.time_residual),np.log10(hit.charge), log_plus_one(hit.distance)] for hit in unhits]).T unhits_t = signed_log(np.array([hit.time_residual for hit in unhits])) unhits_q = np.log10(np.array([hit.charge for hit in unhits])) unhits_r = log_plus_one(np.array([hit.distance for hit in unhits])) unhits_E = np.ones_like(unhits_r)*En unhits_z = np.ones_like(unhits_r)*ze #excluded_t, excluded_q, excluded_r = np.array([[signed_log(hit.time_residual),np.log10(hit.charge), log_plus_one(hit.distance)] for hit in excluded]).T excluded_t = signed_log(np.array([hit.time_residual for hit in excluded])) excluded_q = np.log10(np.array([hit.charge for hit in excluded])) excluded_r = log_plus_one(np.array([hit.distance for hit in excluded])) excluded_E = np.ones_like(excluded_r)*En excluded_z = np.ones_like(excluded_r)*ze # ready data for entry to 5D hist t = np.concatenate( (hits_t, unhits_t, excluded_t) ) q = np.concatenate( (hits_q, unhits_q, excluded_q) ) r = np.concatenate( (hits_r, unhits_r, excluded_r) ) E = np.concatenate( (hits_E, unhits_E, excluded_E) ) z = np.concatenate( (hits_z, unhits_z, excluded_z) ) if len(t)!=162 or len(q)!=162 or len(r)!=162: print 'N_t %s N_q %s N_r %s'%(len(t),len(q),len(r)) log_fatal('Total Tanks in Event not 162') if np.isnan(t).any() or np.isnan(q).any() or np.isnan(r).any(): print 't',t print 'q',q print 'r',r log_warn('signed_time/logq/logr have nans') in_array=np.vstack([E,z,q,t,r]).T return in_array
def __init__(self, particle_type, energy, density=0.9216*(I3Units.g/I3Units.cm3)): # initalize variables self.a = 0. self.b = 0. self.emScale = 1. self.emScaleSigma = 0. # protect against extremely low energies # NB: while Equation 4.11 of Leif's Masters' thesis is written in terms # of log10, we use the natural log here and divide the energy-scaling # coefficients (beta) below by ln(10) to compensate self.logE = max(0., np.log(energy)) self.Lrad = 0.358*(I3Units.g/I3Units.cm3)/density self.isElectron = particle_type in [ dataclasses.I3Particle.ParticleType.EMinus, dataclasses.I3Particle.ParticleType.EPlus, dataclasses.I3Particle.ParticleType.Brems, dataclasses.I3Particle.ParticleType.DeltaE, dataclasses.I3Particle.ParticleType.PairProd, dataclasses.I3Particle.ParticleType.Gamma, # Pi0 decays to 2 gammas and produce EM showers dataclasses.I3Particle.ParticleType.Pi0, dataclasses.I3Particle.ParticleType.EMinus, dataclasses.I3Particle.ParticleType.EMinus, ] self.isHadron = particle_type in [ dataclasses.I3Particle.ParticleType.Hadrons, dataclasses.I3Particle.ParticleType.Neutron, dataclasses.I3Particle.ParticleType.PiPlus, dataclasses.I3Particle.ParticleType.PiMinus, dataclasses.I3Particle.ParticleType.K0_Long, dataclasses.I3Particle.ParticleType.KPlus, dataclasses.I3Particle.ParticleType.KMinus, dataclasses.I3Particle.ParticleType.PPlus, dataclasses.I3Particle.ParticleType.PMinus, dataclasses.I3Particle.ParticleType.K0_Short, dataclasses.I3Particle.ParticleType.Eta, dataclasses.I3Particle.ParticleType.Lambda, dataclasses.I3Particle.ParticleType.SigmaPlus, dataclasses.I3Particle.ParticleType.Sigma0, dataclasses.I3Particle.ParticleType.SigmaMinus, dataclasses.I3Particle.ParticleType.Xi0, dataclasses.I3Particle.ParticleType.XiMinus, dataclasses.I3Particle.ParticleType.OmegaMinus, dataclasses.I3Particle.ParticleType.NeutronBar, dataclasses.I3Particle.ParticleType.LambdaBar, dataclasses.I3Particle.ParticleType.SigmaMinusBar, dataclasses.I3Particle.ParticleType.Sigma0Bar, dataclasses.I3Particle.ParticleType.SigmaPlusBar, dataclasses.I3Particle.ParticleType.Xi0Bar, dataclasses.I3Particle.ParticleType.XiPlusBar, dataclasses.I3Particle.ParticleType.OmegaPlusBar, dataclasses.I3Particle.ParticleType.DPlus, dataclasses.I3Particle.ParticleType.DMinus, dataclasses.I3Particle.ParticleType.D0, dataclasses.I3Particle.ParticleType.D0Bar, dataclasses.I3Particle.ParticleType.DsPlus, dataclasses.I3Particle.ParticleType.DsMinusBar, dataclasses.I3Particle.ParticleType.LambdacPlus, dataclasses.I3Particle.ParticleType.WPlus, dataclasses.I3Particle.ParticleType.WMinus, dataclasses.I3Particle.ParticleType.Z0, dataclasses.I3Particle.ParticleType.NuclInt, ] self.isMuon = particle_type in [ dataclasses.I3Particle.ParticleType.MuMinus, dataclasses.I3Particle.ParticleType.MuPlus, ] self.isTau = particle_type in [ dataclasses.I3Particle.ParticleType.TauMinus, dataclasses.I3Particle.ParticleType.TauPlus, ] if ((not self.isHadron) and (not self.isElectron) and (not self.isMuon) and (not self.isTau)): # if we don't know it but it has a pdg code, # it is probably a hadron.. self.isHadron = True # Added safety check: throw error in this case to make sure nothing # weird is happenning unkowingly # raise ValueError('Unkown particle type {!r}'.format(particle_type)) log_warn( 'Unkown particle type {!r}. Assuming this is a hadron!'.format( particle_type) ) if self.isElectron: if particle_type == dataclasses.I3Particle.ParticleType.EPlus: self.a = 2.00035+0.63190*self.logE self.b = self.Lrad/0.63008 elif particle_type in [ dataclasses.I3Particle.ParticleType.Gamma, dataclasses.I3Particle.ParticleType.Pi0, # gamma, pi0 ]: self.a = 2.83923+0.58209*self.logE self.b = self.Lrad/0.64526 else: self.a = 2.01849+0.63176*self.logE self.b = self.Lrad/0.63207 elif self.isHadron: self.E0 = 0. self.m = 0. self.f0 = 1. self.rms0 = 0. self.gamma = 0. if particle_type == dataclasses.I3Particle.ParticleType.PiMinus: self.a = 1.69176636+0.40803489 * self.logE self.b = self.Lrad / 0.34108075 self.E0 = 0.19826506 self.m = 0.16218006 self.f0 = 0.31859323 self.rms0 = 0.94033488 self.gamma = 1.35070162 elif particle_type == dataclasses.I3Particle.ParticleType.K0_Long: self.a = 1.95948974+0.34934666 * self.logE self.b = self.Lrad / 0.34535151 self.E0 = 0.21687243 self.m = 0.16861530 self.f0 = 0.27724987 self.rms0 = 1.00318874 self.gamma = 1.37528605 elif particle_type == dataclasses.I3Particle.ParticleType.PPlus: self.a = 1.47495778+0.40450398 * self.logE self.b = self.Lrad / 0.35226706 self.E0 = 0.29579368 self.m = 0.19373018 self.f0 = 0.02455403 self.rms0 = 1.01619344 self.gamma = 1.45477346 elif particle_type == dataclasses.I3Particle.ParticleType.Neutron: self.a = 1.57739060+0.40631102 * self.logE self.b = self.Lrad / 0.35269455 self.E0 = 0.66725124 self.m = 0.19263595 self.f0 = 0.17559033 self.rms0 = 1.01414337 self.gamma = 1.45086895 elif particle_type == dataclasses.I3Particle.ParticleType.PMinus: self.a = 1.92249171+0.33701751 * self.logE self.b = self.Lrad / 0.34969748 self.E0 = 0.29579368 self.m = 0.19373018 self.f0 = 0.02455403 self.rms0 = 1.01094637 self.gamma = 1.50438415 else: self.a = 1.58357292+0.41886807 * self.logE self.b = self.Lrad / 0.33833116 self.E0 = 0.18791678 self.m = 0.16267529 self.f0 = 0.30974123 self.rms0 = 0.95899551 self.gamma = 1.35589541 e = max(2.71828183, energy) self.emScale = 1. - pow(e/self.E0, -self.m)*(1.-self.f0) self.emScaleSigma = \ self.emScale*self.rms0*pow(np.log(e), -self.gamma) else: raise ValueError('Particle type {!r} is not a shower'.format( particle_type)) if (energy < 1.*I3Units.GeV): self.b = 0. # this sets the cascade length to 0
def Physics(self, frame): # Load reconstructed quantities laputop = frame[self.track] coszen = np.cos(laputop.dir.zenith) Par = LaputopParameter params = I3LaputopParams.from_frame(frame, self.track + 'Params') logs125 = params.value(Par.Log10_S125) # Locate the Zen/S125 bin to use appropriate 2D PDF for zen_high, zen_low in zenith_bins: if coszen < zen_high and coszen >= zen_low: break zen_high = None #if not self.highEbins: if not self.highE: s125_bins = np.array( zip(np.arange(-0.5, 1, 0.1), np.arange(-0.4, 1.1, 0.1))) else: s125_bins = np.array( zip(np.arange(-0.5, 2.1, 0.1), np.arange(-0.4, 2.2, 0.1))) for s125_low, s125_high in s125_bins: if logs125 < s125_high and logs125 >= s125_low: break s125_high = None # See whether Pulse Containers are found in frame good_event = self.slcpulses in frame or self.hlcpulses in frame if not good_event: log_info( 'Either %s or %s missing in frame. LLH Ratio not being calculated.' % (self.slcpulses, self.hlcpulses)) # Store Nans if event out of S125/Zen bin or Pulse Containers not found if s125_high == None or zen_high == None or not good_event: dict = {} for key in ['q_r', 'q_t', 't_r']: dict['LLH_Hadron_%s' % key] = np.nan dict['LLH_Gamma_%s' % key] = np.nan dict['LLH_Ratio'] = np.nan frame.Put(self.objname, dataclasses.I3MapStringDouble(dict)) self.PushFrame(frame) return self.llh.events = {} event_doms = [] axis = frame[self.track] rotation = to_shower_cs(axis) origin = np.array([[axis.pos.x], [axis.pos.y], [axis.pos.z]]) self.llh.events['laputop_x'] = [ axis.pos.x ] #len(self.events['laputop_x']) used for a loop later for pulsename, tag in [[self.slcpulses, 'slc'], [self.hlcpulses, 'hlc']]: if pulsename not in frame: log_warn('%s not found in frame' % pulsename) continue self.llh.events[tag + '_rperp'] = [] self.llh.events[tag + '_q'] = [] self.llh.events[tag + '_t'] = [] pulses = dataclasses.I3RecoPulseSeriesMap.from_frame( frame, pulsename) for k, m in pulses.iteritems(): event_doms.append((k[0], k[1], k[2])) for i, pulse in enumerate(m): if not k in self.geometry.omgeo: log_fatal("OM {om} not in geometry!".format(om=k)) continue position = self.geometry.omgeo[k].position det_cs_position = np.array([[position.x], [position.y], [position.z]]) shower_cs_position = rotation * (det_cs_position - origin) shower_cs_radius = np.sqrt(shower_cs_position[0]**2 + shower_cs_position[1]**2) time = pulse.time - float( axis.time - shower_cs_position[2] / I3Constants.c) # Correct SLC Time Stamp if SLC Time Correction Pickle has been supplied if self.slc_time_corr != None and pulsename == self.slcpulses: newtime = correct_slc_time(self.mean_slc_charge, self.median_time_diff, time, pulse.charge) time = newtime self.llh.events[tag + '_rperp'].append( np.log10(np.float(shower_cs_radius))) self.llh.events[tag + '_q'].append(np.log10(pulse.charge)) self.llh.events[tag + '_t'].append(np.log10(time)) nohit_t, nohit_q, nohit_rperp, nohit_om, nohit_string = self.llh.no_hit_doms( event_doms, axis.dir.azimuth, axis.dir.zenith, axis.pos.x, axis.pos.y) self.llh.events['nohit_t'] = nohit_t self.llh.events['nohit_q'] = nohit_q self.llh.events['nohit_rperp'] = nohit_rperp self.llh.events['nohit_om'] = nohit_om self.llh.events['nohit_string'] = nohit_string # need to convert each list to list of list. for key in self.llh.events.keys(): self.llh.events[key] = [self.llh.events[key]] # print('s125_high = {}'.format(s125_high)) # print('str(s125_high) = {}'.format(str(s125_high))) self.llh.calc_llh_values_new(histbins=histbins, histrange=histrange, s125=str(s125_high), zen=str(zen_high), ignorezerobinsinboth=True, generatepdf=False, pdffileinstance=None, trace_tanks=False) dict = {} LLHRatio = 0 for key in ['q_r', 'q_t', 't_r']: dict['LLH_Hadron_%s' % key] = self.llh.events['proton_like'][key][0] dict['LLH_Gamma_%s' % key] = self.llh.events['gamma_like'][key][0] LLHRatio += self.llh.events['proton_like'][key][ 0] - self.llh.events['gamma_like'][key][0] dict['LLH_Ratio'] = LLHRatio frame.Put(self.objname, dataclasses.I3MapStringDouble(dict)) # Event arrays need to be cleared out: del self.llh.events self.PushFrame(frame) return
def Physics(self, frame): mese_dict = { 'n_files': self._n_files, 'n_events_per_run': self._n_events_per_run, } # get MC info energy_true = frame['MCPrimary'].energy zenith_true = frame['MCPrimary'].dir.zenith azimuth_true = frame['MCPrimary'].dir.azimuth # ------- # NuGen # ------- if self._dataset_type in ['nugen', 'genie']: # get oneweight / n_gen oneweight = frame['I3MCWeightDict']['OneWeight'] / self._ngen true_type = frame['I3MCWeightDict']['PrimaryNeutrinoType'] is_tau = (np.abs(true_type) == 16).all() # calculate astrophysical weights mese_dict['weight_E269'] = 2.09e-18 * oneweight * (energy_true / 1e5)**-2.69 mese_dict['weight_E250'] = 2.23e-18 * oneweight * (energy_true / 1e5)**-2.5 # calculate atmospheric weights if is_tau: mese_dict['weight_conv'] = oneweight * atmosphericFlux( neutrinoEnergy=energy_true, neutrinoZenith=zenith_true, neutrinoType=true_type, atmFluxConv=None, atmFluxPrompt=None, ) * 2. * self.conv_flux_multiplier mese_dict['weight_prompt'] = oneweight * atmosphericFlux( neutrinoEnergy=energy_true, neutrinoZenith=zenith_true, neutrinoType=true_type, atmFluxConv=None, atmFluxPrompt=None, ) * 2. * self.prompt_flux_multiplier else: mese_dict['weight_conv'] = oneweight * atmosphericFlux( neutrinoEnergy=energy_true, neutrinoZenith=zenith_true, neutrinoType=true_type, atmFluxConv=self.honda, atmFluxPrompt=None, ) * 2. * self.conv_flux_multiplier mese_dict['weight_prompt'] = oneweight * atmosphericFlux( neutrinoEnergy=energy_true, neutrinoZenith=zenith_true, neutrinoType=true_type, atmFluxConv=None, atmFluxPrompt=self.enberg, ) * 2. * self.prompt_flux_multiplier # --------------------- # Atmospheric Self Veto # --------------------- # get true_depth if 'IntersectionPoint' in frame: true_depth = frame['IntersectionPoint'].z else: muon = mu_utils.get_muon_of_inice_neutrino(frame) tau = tau_utils.get_tau_of_inice_neutrino(frame) if muon is not None: # found a muon entry = self._get_muon_entry(frame, muon) true_depth = entry.z elif tau is not None: # found a tau entry = self._get_particle_entry(tau) true_depth = entry.z else: # no muon or tau exists: cascade cascade = get_cascade_of_primary_nu(frame, frame['MCPrimary'], convex_hull=None, extend_boundary=800)[0] if cascade is not None: true_depth = cascade.pos.z else: cascade = get_cascade_of_primary_nu( frame, frame['MCPrimary'], convex_hull=None, extend_boundary=float('inf'))[0] # Muon coming out of hadronic shower? daughters = frame['I3MCTree'].get_daughters(cascade) # collect possible muons from daughters of daughters # e.g. Nu -> Nu + Hadrons -> Mu muons = [] for d in daughters: muons.extend([ m for m in frame['I3MCTree'].get_daughters(d) if mu_utils.is_muon(m) ]) if muons: # pick highest energy muon indices = np.argsort([m.energy for m in muons]) muon = muons[indices[-1]] entry = self._get_muon_entry(frame, muon) true_depth = entry.z else: true_depth = cascade.pos.z # apply self veto veto_args = (true_type, energy_true, np.cos(zenith_true), 1950. - true_depth) if 'IsHese' in frame: if frame['IsHese'].value: mese_dict['veto_conv'] = self.honda_veto_hese(*veto_args) mese_dict['veto_prompt'] = self.enberg_veto_hese( *veto_args) else: mese_dict['veto_conv'] = self.honda_veto_mese(*veto_args) mese_dict['veto_prompt'] = self.enberg_veto_mese( *veto_args) else: log_warn('WARNING: IsHese does not exist. Using MESE veto') mese_dict['veto_conv'] = self.honda_veto_mese(*veto_args) mese_dict['veto_prompt'] = self.honda_veto_mese(*veto_args) mese_dict['weight_conv'] *= mese_dict['veto_conv'] mese_dict['weight_prompt'] *= mese_dict['veto_prompt'] # --------------------- # ------- # MuonGun # ------- elif self._dataset_type == 'muongun': if 'MuonWeight_GaisserH4a' in frame: # --- Where does magic number of 1.6 come from? MuonMultiplier mese_dict['muon_weight'] = \ frame['MuonWeight_GaisserH4a'].value * 1.6 / self._ngen # ----------------- # Experimental Data # ----------------- elif self._dataset_type == 'data': mjd = frame['I3EventHeader'].start_time.mod_julian_day_double # ----------------------------------------------------- # final track cut: # drop low energy downgoing tracks and duplicate events # ----------------------------------------------------- try: # get TrackFit_zenith TrackFit_zenith = frame['TrackFit'].dir.zenith # get energy_millipede energy_millipede = frame['MillipedeDepositedEnergy'].value # mask events track_mask = data_dict['is_cascade_reco'] | \ ~((np.cos(TrackFit_zenith) > 0.3) & (energy_millipede < 10e3)) if self._dataset_type in ['muongun', 'nugen', 'genie']: uniq_mask = np.r_[True, np.diff(energy_true) != 0] else: uniq_mask = np.r_[True, np.diff(mjd) != 0] mese_dict['passed_final_track_cut'] = track_mask & uniq_mask except Exception as e: # log_warn(e) pass # ----------------------------------------------------- for k, item in mese_dict.items(): mese_dict[k] = float(item) frame[self._output_key] = dataclasses.I3MapStringDouble(mese_dict) self.PushFrame(frame)
def get_cascade_em_equivalent(mctree, cascade_primary): """Get electro-magnetic (EM) equivalent energy of a given cascade. Recursively walks through daughters of a provided cascade primary and collects EM equivalent energy. Note: muons and taus are added completely as EM equivalent energy! This disregards the fact that a tau can for instance decay and the neutrino may carry away a big portion of energy Parameters ---------- mctree : I3MCTree The current I3MCTree cascade_primary : I3Particle The cascade primary particle. Returns ------- float The total EM equivalent energy of the given cascade. float The total EM equivalent energy of the EM cascade. float The total EM equivalent energy of the hadronic cascade. float The total EM equivalent energy in muons and taus (tracks). """ daughters = mctree.get_daughters(cascade_primary) # --------------------------------- # stopping conditions for recursion # --------------------------------- if (cascade_primary.location_type != dataclasses.I3Particle.LocationType.InIce): # skip particles that are way outside of the detector volume return 0., 0., 0., 0. # check if we have a muon or tau if cascade_primary.type in [ dataclasses.I3Particle.ParticleType.MuMinus, dataclasses.I3Particle.ParticleType.MuPlus, dataclasses.I3Particle.ParticleType.TauMinus, dataclasses.I3Particle.ParticleType.TauPlus, ]: # For simplicity we will assume that all energy is deposited. # Note: this is wrong for instance for taus that decay where the # neutrino will carry away a large fraction of the energy return cascade_primary.energy, 0., 0., cascade_primary.energy if len(daughters) == 0: if cascade_primary.is_neutrino: # skip neutrino: the energy is not visible return 0., 0., 0., 0. else: # get EM equivalent energy energy = convert_to_em_equivalent(cascade_primary) # EM energy if cascade_primary.type in EMTypes: return energy, energy, 0., 0. # Hadronic energy elif cascade_primary.type in HadronTypes: return energy, 0., energy, 0. else: log_warn('Unknown particle type: {}. Assuming hadron!'.format( cascade_primary.type)) return energy, 0., energy, 0. # --------------------------------- # collect energy from hadronic, em, and tracks energy_total = 0. energy_em = 0. energy_hadron = 0. energy_track = 0. # recursively walk through daughters and accumulate energy for daugther in daughters: # get energy depositions of particle and its daughters e_total, e_em, e_hadron, e_track = get_cascade_em_equivalent( mctree, daugther) # CMC splits up hadronic cascades to segments of electrons # In other words: if the cascade primary is a hadron, the daughter # particles need to contribute to the hadronic component of the shower if cascade_primary.type in HadronTypes: e_hadron += e_em e_em = 0 # accumulate energies energy_total += e_total energy_em += e_em energy_hadron += e_hadron energy_track += e_track return energy_total, energy_em, energy_hadron, energy_track
def Physics(self, frame): # get muon muon = mu_utils.get_muon( frame=frame, primary=frame[self._primary_key], convex_hull=self._convex_hull, ) labels = dataclasses.I3MapStringDouble() if self._write_vector: binned_energy_losses, bin_center_pos = \ mu_utils.get_binned_energy_losses_in_cube( frame=frame, muon=muon, bin_width=self._bin_width, boundary=self._boundary, return_bin_centers=self._write_vector ) else: binned_energy_losses = mu_utils.get_binned_energy_losses_in_cube( frame=frame, muon=muon, bin_width=self._bin_width, boundary=self._boundary, return_bin_centers=self._write_vector) # write to frame labels['track_anchor_x'] = muon.pos.x labels['track_anchor_y'] = muon.pos.y labels['track_anchor_z'] = muon.pos.z labels['track_anchor_time'] = muon.time labels['azimuth'] = muon.dir.azimuth labels['zenith'] = muon.dir.zenith for i, energy_i in enumerate(binned_energy_losses): # stop adding energy losses if we reached the maximum if self._max_num_bins is not None: if i >= self._max_num_bins: msg = 'MaxNumBinsis set to {}. '.format(self._max_num_bins) msg += 'Cutting off an additional {} losses!'.format( len(binned_energy_losses) - self._max_num_bins) log_warn(msg) break labels['EnergyLoss_{:05d}'.format(i)] = energy_i # pad rest with NaNs if self._max_num_bins is not None: for i in range(len(binned_energy_losses), self._max_num_bins): labels['EnergyLoss_{:05d}'.format(i)] = float('NaN') frame.Put(self._output_key, labels) if self._write_vector: part_vec = dataclasses.I3VectorI3Particle() for energy_i, pos_i in zip(binned_energy_losses, bin_center_pos): part = dataclasses.I3Particle() part.pos = dataclasses.I3Position(*pos_i) part.energy = energy_i part.dir = dataclasses.I3Direction(muon.dir) part.time = ((muon.pos - part.pos).magnitude / dataclasses.I3Constants.c) part_vec.append(part) frame.Put(self._output_key + 'ParticleVector', part_vec) self.PushFrame(frame)
def __call__(self, bias_data): """Apply Bias Function Parameters ---------- bias_data : dict Dictionary of bias input data. Contents may include: { 'frame': the current I3Frame, } Returns ------- float Keep probability: probability with which this event should be kept. """ frame = bias_data['frame'] # get primary mc_tree = frame[self.mctree_name] primaries = mc_tree.get_primaries() assert len(primaries) == 1, 'Expected only 1 Primary!' # get muon muon = mu_utils.get_muon( frame, primaries[0], detector.icecube_hull, mctree_name=self.mctree_name, ) if muon is None: # if muon did not hit the convex hull, or if no muon exists, # it will be None. In this case we set default values found_muon = False cos_zen = np.cos(primaries[0].dir.zenith) track_length = 0. max_rel_loss = 0. else: found_muon = True cos_zen = np.cos(muon.dir.zenith) track_length = mu_utils.get_muon_track_length_inside( muon, detector.icecube_hull) # get muon energy losses losses = [ loss for loss in mc_tree.get_daughters(muon) if not mu_utils.is_muon(loss) and geometry.is_in_detector_bounds(loss.pos, extend_boundary=60) ] # compute relative energy losses rel_losses = [] loss_energies = [] for loss in losses: # get energy of muon prior to energy loss distance = (muon.pos - loss.pos).magnitude energy = mu_utils.get_muon_energy_at_distance( frame, muon, np.clip(distance - 1, 0., float('inf'))) # If the loss is at the muon decay point, the returned energy # might be NaN, assert this and set default value of 1 GeV if not np.isfinite(energy): assert np.abs(distance - muon.length) < 1, (energy, muon) energy = 1 rel_loss = loss.energy / energy if rel_loss > 1. or rel_loss < 0.: msg = 'Found out of bounds rel_loss: {:3.3f}. '.format( rel_loss) msg += 'Clipping value to [0, 1]' log_warn(msg) rel_loss = np.clip(rel_loss, 0., 1.) loss_energies.append(loss.energy) rel_losses.append(rel_loss) if rel_losses: max_rel_loss = rel_losses[np.argmax(loss_energies)] else: max_rel_loss = 0. # bias based on zenith if self.cos_zenith_sigmoid_scale is None: zenith_keep_prob = 1.0 else: zenith_keep_prob = self.sigmoid( -cos_zen, s=self.cos_zenith_sigmoid_scale, b=self.cos_zenith_sigmoid_bias, ) # bias based on in detector track length if self.track_length_sigmoid_scale is None: track_length_prob = 1.0 else: track_length_prob = self.sigmoid( track_length, s=self.track_length_sigmoid_scale, b=self.track_length_sigmoid_bias, ) # bias based on maximum relative energy loss if self.muon_loss_sigmoid_scale is None: max_rel_loss_prob = 1. else: max_rel_loss_prob = self.sigmoid( max_rel_loss, s=self.muon_loss_sigmoid_scale, b=self.muon_loss_sigmoid_bias, ) bias_info = { 'found_muon': found_muon, 'cos_zenith': cos_zen, 'track_length_in_detector': track_length, 'max_relative_energy_loss': max_rel_loss, } keep_prob = zenith_keep_prob * track_length_prob * max_rel_loss_prob return keep_prob, bias_info
def _create_in_array(self, frame, time_transformation=signed_log): """ Create the IceTop specific input array that goes into GeneratePDF / CalcLLHR . This is method will need to be adapted for each analysis. """ if not self.Use_Laputop: En = np.log10(frame[self.EnergyRecoName].energy) else: En = frame[self.LaputopParamsName].value( recclasses.LaputopParameter.Log10_S125) ze = np.cos(frame[self.AngularRecoName].dir.zenith) hits = frame[self.HitsName] unhits = frame[self.UnhitsName] excluded = frame[self.ExcludedName] hits_t = time_transformation( np.array([hit.time_residual for hit in hits])) hits_q = np.log10(np.array([hit.charge for hit in hits])) hits_qunlog = np.array([hit.charge for hit in hits]) hits_r = log_plus_one(np.array([hit.distance for hit in hits])) hits_E = np.ones_like(hits_r) * En hits_z = np.ones_like(hits_r) * ze hits_doms = np.array([hit.DOMkey for hit in hits]) if np.isnan(hits_q).any(): select = np.isnan(hits_q) log_error('nan q doms in %s' % self.HitsName) log_error( zip(hits_q[select], hits_qunlog[select], hits_doms[select])) if np.isnan(hits_t).any(): select = np.isnan(hits_t) log_error('nan t doms in %s' % self.HitsName) log_error(zip(hits_t[select], hits_doms[select])) unhits_t = time_transformation( np.array([hit.time_residual for hit in unhits])) unhits_q = np.log10(np.array([hit.charge for hit in unhits])) unhits_r = log_plus_one(np.array([hit.distance for hit in unhits])) unhits_E = np.ones_like(unhits_r) * En unhits_z = np.ones_like(unhits_r) * ze excluded_t = time_transformation( np.array([hit.time_residual for hit in excluded])) excluded_q = np.log10(np.array([hit.charge for hit in excluded])) excluded_r = log_plus_one(np.array([hit.distance for hit in excluded])) excluded_E = np.ones_like(excluded_r) * En excluded_z = np.ones_like(excluded_r) * ze # ready data for entry to 5D hist t = np.concatenate((hits_t, unhits_t, excluded_t)) q = np.concatenate((hits_q, unhits_q, excluded_q)) r = np.concatenate((hits_r, unhits_r, excluded_r)) E = np.concatenate((hits_E, unhits_E, excluded_E)) z = np.concatenate((hits_z, unhits_z, excluded_z)) if len(t) != 162 or len(q) != 162 or len(r) != 162: log_error('N_t %s N_q %s N_r %s' % (len(t), len(q), len(r))) log_fatal('Total Tanks in Event not 162') if np.isnan(t).any() or np.isnan(q).any() or np.isnan(r).any(): #print 't',t #print 'q',q #print 'r',r log_warn( 'signed_time/logq/logr have nans, logs125%.2f coszen%.2f' % (En, ze)) in_array = np.vstack([E, z, q, t, r]).T return in_array