def calculate_chi_square(event_info, zenith, azimuth, energy): target_direction = hp.spherical_to_cartesian(zenith, azimuth) event_direction = hp.spherical_to_cartesian(float(event_info[2]), float(event_info[3])) direction_error = hp.get_angle(target_direction, event_direction) energy_error = np.log10(energy) - np.log10(float(event_info[1])) return (direction_error / sigma_angle)**2 + (energy_error / sigma_log_energy)**2
def get_pred_angle_diff_data(true_directions_cartesian, predicted_directions_cartesian): # Only pick first 100000 data # N = 100000 # nu_direction_predict = nu_direction_predict[:N] # nu_direction = nu_direction[:N] angle_difference_data = np.array([ hp.get_angle(predicted_directions_cartesian[i], true_directions_cartesian[i]) for i in range(len(true_directions_cartesian)) ]) * 180 / np.pi return angle_difference_data
def run(self, evt, station, det, channels_to_use=None, cosmic_ray=False): """ Fits the direction using templates Parameters ---------- evt: event station: station det: detector channels_to_use: list (default: [0, 1, 2, 3] antenna to use for fit cosmic_ray: bool type to set correlation template """ if channels_to_use is None: channels_to_use = [0, 1, 2, 3] if (cosmic_ray): type_str = 'cr' xcorrelations = chp.cr_xcorrelations else: type_str = 'nu' xcorrelations = chp.nu_xcorrelations station_id = station.get_id() channels = station.iter_channels(channels_to_use) times = [] positions = [] for iCh, channel in enumerate(channels): channel_id = channel.get_id() times.append( channel[xcorrelations]['{}_ref_xcorr_time'.format(type_str)] + channel.get_trace_start_time()) positions.append(det.get_relative_position(station_id, channel_id)) times = np.array(times) positions = np.array(positions) site = det.get_site(station_id) n_ice = ice.get_refractive_index(-0.01, site) from scipy import optimize as opt def obj_plane(params, positions, t_measured): zenith, azimuth = params if cosmic_ray: if ((zenith < 0) or (zenith > 0.5 * np.pi)): return np.inf else: if ((zenith < 0.5 * np.pi) or (zenith > np.pi)): return np.inf v = hp.spherical_to_cartesian(zenith, azimuth) c = constants.c * units.m / units.s if not cosmic_ray: c = c / n_ice logger.debug("using speed of light = {:.4g}".format(c)) t_expected = -(np.dot(v, positions.T) / c) sigma = 1 * units.ns chi2 = np.sum(((t_expected - t_expected.mean()) - (t_measured - t_measured.mean()))**2 / sigma**2) logger.debug("texp = {texp}, tm = {tmeas}, {chi2}".format( texp=t_expected, tmeas=t_measured, chi2=chi2)) return chi2 method = "Nelder-Mead" options = {'maxiter': 1000, 'disp': False} zenith_start = 135 * units.deg if cosmic_ray: zenith_start = 45 * units.deg starting_chi2 = {} for starting_az in np.array([0, 90, 180, 270]) * units.degree: starting_chi2[starting_az] = obj_plane((zenith_start, starting_az), positions, times) azimuth_start = min(starting_chi2, key=starting_chi2.get) res = opt.minimize(obj_plane, x0=[zenith_start, azimuth_start], args=(positions, times), method=method, options=options) output_str = "reconstucted angles theta = {:.1f}, phi = {:.1f}".format( res.x[0] / units.deg, hp.get_normalized_angle(res.x[1]) / units.deg) if station.has_sim_station(): sim_zen = station.get_sim_station()[stnp.zenith] sim_az = station.get_sim_station()[stnp.azimuth] dOmega = hp.get_angle( hp.spherical_to_cartesian(sim_zen, sim_az), hp.spherical_to_cartesian(res.x[0], res.x[1])) output_str += " MC theta = {:.1f}, phi = {:.1f}, dOmega = {:.2f}".format( sim_zen / units.deg, sim_az / units.deg, dOmega / units.deg) logger.info(output_str) station[stnp.zenith] = res.x[0] station[stnp.azimuth] = hp.get_normalized_angle(res.x[1]) if (cosmic_ray): station[stnp.cr_zenith] = res.x[0] station[stnp.cr_azimuth] = hp.get_normalized_angle(res.x[1]) else: station[stnp.nu_zenith] = res.x[0] station[stnp.nu_azimuth] = hp.get_normalized_angle(res.x[1])
plt.hist(zeniths, bins=np.arange(0, 181, 5)) plt.xlabel('zenith angle [deg]') plt.ylabel('unweighted entries') plt.figtext( 1.0, 0.2, "N: " + str(len(zeniths)) + "\nmean: " + str(np.average(zeniths)) + "\nstd: " + str(np.std(zeniths))) plt.suptitle("neutrino direction") plt.savefig(os.path.join(plot_folder, 'neutrino_direction.pdf'), bbox_inches="tight") plt.clf() #plot difference between cherenkov angle and viewing angle # i.e., opposite to the direction of propagation. We need the propagation direction here, so we multiply the shower axis with '-1' shower_axis = -1.0 * hp.spherical_to_cartesian(theta, phi) viewing_angles_d = np.array( [hp.get_angle(x, y) for x, y in zip(shower_axis, launch_vectors[:, 0, 0])]) viewing_angles_r = np.array( [hp.get_angle(x, y) for x, y in zip(shower_axis, launch_vectors[:, 0, 1])]) # calculate correct chereknov angle for ice density at vertex position ice = medium.southpole_simple() n_indexs = np.array( [ice.get_index_of_refraction(x) for x in np.array([xx, yy, zz]).T]) rho = np.arccos(1. / n_indexs) weightsExt = weights for chan in range(1, len(launch_vectors[0])): viewing_angles_d = np.append( viewing_angles_d, np.array([ hp.get_angle(x, y) for x, y in zip(shower_axis, launch_vectors[:, chan, 0]) ]))
def get_test_emission(self, frame): propagator = propagation.get_propagation_module('analytic') ice = medium.get_ice_model('ARAsim_southpole') # work up a simple example to test functionality ant = np.array([0,0,-100]) vertex = np.array([-2543.18,2319.96,-1828.74]) azi = 2.4658 zen = 1.0863 inelast = 0.55 inttype = 'cc' flavor = 14 energy = 9.20e+18 n_index = ice.get_index_of_refraction(vertex) cherenkov_angle = np.arccos(1./n_index) shower_axis = -1 * hp.spherical_to_cartesian(zen, azi) r = propagator(vertex, ant, medium=ice, attenuation_model='SP1', n_frequencies_integration=25, n_reflections=0 ) r.find_solutions() num_solutions = r.get_number_of_solutions() viewing_angles = [] distances = [] launch_vectors = [] receive_vectors = [] # print('num solutions is {}'.format(num_solutions)) for iS in range(num_solutions): launch_vectors.append(r.get_launch_vector(iS)) receive_vectors.append(r.get_receive_vector(iS)) # launch_vector = r.get_launch_vector(iS) viewing_angles.append(hp.get_angle(shower_axis, launch_vectors[iS])) distances.append(r.get_path_length(iS)) # viewing_angle = hp.get_angle(shower_axis, launch_vector) #fem, fhad = helper._get_em_had_fraction(inelast, inttype, flavor) fem=0 fhad=0.55 signal = askaryan.get_time_trace( energy = energy * fhad, theta = viewing_angles[0], N = self._n_samples, dt = self._dt, shower_type='HAD', n_index = n_index, R=distances[0], model=self._askaryan_model, seed=self._seed ) polarization_direction_onsky = util_geo.calculate_polarization_vector(launch_vectors[0], shower_axis) icetray.logging.log_debug("Polarization direction on sky {}".format(polarization_direction_onsky)) this_eR, this_eTheta, this_ePhi = np.outer(polarization_direction_onsky, signal) # create traces for the eR, eTheta, and ePhi components inside eR = icetradio.I3Trace() eTheta = icetradio.I3Trace() ePhi = icetradio.I3Trace() eR.trace = this_eR eR.traceStartTime = 0 eR.samplingRate = self._sampling_rate eTheta.trace = this_eTheta eTheta.traceStartTime = 0 eTheta.samplingRate = self._sampling_rate ePhi.trace = this_ePhi ePhi.traceStartTime = 0 eTheta.samplingRate = self._sampling_rate # put those traces inside an EField field = icetradio.I3EField() field.eR = eR field.eTheta = eTheta field.ePhi = ePhi frame.Put("DummyEField", field)
for i in range(flav.size): if (flav[i]): launch_vectors = np.append(launch_vectors, [lvtemp[i]], axis=0) zenith_inp = np.append(zenith_inp, [zeniths[i]], axis=0) azimuth_inp = np.append(azimuth_inp, [azimuths[i]], axis=0) xx = np.append(xx, xtemp[i]) yy = np.append(yy, ytemp[i]) zz = np.append(zz, ztemp[i]) launch_vectors = np.delete(launch_vectors, 0, axis=0) zenith_inp = np.delete(zenith_inp, 0, axis=0) azimuth_inp = np.delete(azimuth_inp, 0, axis=0) shower_axis = -1 * hp.spherical_to_cartesian(zenith_inp, azimuth_inp) viewing_angles = np.array([hp.get_angle(x, y) for x, y in zip(shower_axis, launch_vectors[:, 0, 0])]) # calculate correct chereknov angle for ice density at vertex position ice = medium.southpole_simple() n_indexs = np.array([ice.get_index_of_refraction(x) for x in np.array([xx, yy, zz]).T]) rho = np.arccos(1. / n_indexs) mask = ~np.isnan(viewing_angles) fig, ax = php.get_histogram((viewing_angles[mask] - rho[mask]) / units.deg, weights=weightstemp[mask], bins=np.arange(-30, 30, 1), xlabel='viewing - cherenkov angle [deg] - ' + title, figsize=(6, 6)) fig.savefig(os.path.join(plot_folder, title + '_{}_dCherenkov.pdf'.format(key))) ########################### # plot flavor ratios ###########################
path2 = r2.get_path(0) # ax.plot3D(path1.T[0], path1.T[1], path1.T[2], label='path 1') ax.plot3D(path2.T[0], path2.T[1], path2.T[2], label='path {}'.format(j)) ax.plot3D([vertex[0], vertex[0] + 500 * v[0]], [vertex[1], vertex[1] + 500 * v[1]], [vertex[2], vertex[2] + 500 * v[2]], '--', label='shower direction') dT = [] for l in l2: # print("{:.1f}".format((theta - hp.get_angle(-v, l))/units.deg)) dT.append((theta - hp.get_angle(-v, l)) / units.deg) dTs.append(np.min(np.abs(np.array(dT)))) if (plot): R3 = hp.get_rotation(np.array([0, 0, 1]), -v) for phi in np.linspace(0, 2 * np.pi, 200): l = hp.spherical_to_cartesian(theta, phi) # zen, az = hp.cartesian_to_spherical(v[0], v[1], v[2]) R1 = np.array([[np.cos(az), -np.sin(az), 0], [np.sin(az), np.cos(az), 0], [0, 0, 1]]) R2 = np.array([[np.cos(zen), 0, -np.sin(zen)], [0, 1, 0], [np.sin(zen), 0, np.cos(zen)]]) # l2 = np.matmul(R1, np.matmul(R2,l)) l2 = np.matmul(R3, l)
def generate_signal(deposited_energy, shower_axis, em_or_had, launch_vector, distance, arrival_time, n_index, attenuation_values, dt, n_samples, model, seed, keep_unattenuated_fields=False): """ A function to generate askaryan fields at the antennas Get the askaryan signals/fields at the antenna. This means that the fields returned by this function will already include the polarization factors, the 1/R, and the attenuation due to the ice. Parameters ---------- deposited_energy: double or float energy deposited in the shower in eV shower_axis: I3Position the shower axis launch_vector: I3Position the launch vector of the ray that makes the signal distance: float the path length traveled by the signal in m (including ray bending!) arrival_time: float the time the field arrives at the antenna, in seconds (including ray bending!) n_index: float the index of refraction at the vertex atttenuation_values: complex np array the complex frequency-dependent attenuation factors dt: float the time between samples for the askaryan emission, in seconds n_samples: int the number of samples to have in the Askaryan emission model: string what Askaryan model should be used to generate the emission options are described in NuRadioMC.SignalGen.askaryan https://github.com/nu-radio/NuRadioMC/blob/master/NuRadioMC/SignalGen/askaryan.py seed: int what random number seed should be used in generating the askaryan emission keep_unattenuated_fields: bool whether or not to keep a copy of the E-fields that does not have attenuation factors applied default is False, to reduce file output sizes Returns ------- signal: I3RadioSignal the radio signal container for this event """ local_launch_vector = util_dataclasses.i3pos_to_np(launch_vector) local_shower_axis = util_dataclasses.i3pos_to_np(shower_axis) viewing_angle = hp.get_angle(local_shower_axis, local_launch_vector) signal = askaryan.get_time_trace(energy=deposited_energy, theta=viewing_angle, N=n_samples, dt=dt, shower_type=em_or_had, n_index=n_index, R=distance, model=model, seed=seed) signal_spectrum = fft.time2freq(signal, 1. / dt) attenuated_signal_spectrum = signal_spectrum * attenuation_values attenuated_signal = fft.freq2time(attenuated_signal_spectrum, 1. / dt) # calculate the polarization polarization_direction_onsky = util_geo.calculate_polarization_vector( local_launch_vector, local_shower_axis) icetray.logging.log_debug("Polarization direction on sky {}".format( polarization_direction_onsky)) # create the e-fields at the antenna this_eR_attenuated, this_eTheta_attenuated, this_ePhi_attenuated = np.outer( polarization_direction_onsky, attenuated_signal) # store the eR, eTheta, ePhi components in trace for attenuated field sampling_rate = 1. / dt eR_attenuated = util_dataclasses.fill_I3Trace(this_eR_attenuated, arrival_time, sampling_rate) eTheta_attenuated = util_dataclasses.fill_I3Trace(this_eTheta_attenuated, arrival_time, sampling_rate) ePhi_attenuated = util_dataclasses.fill_I3Trace(this_ePhi_attenuated, arrival_time, sampling_rate) # put those traces into fields field_watt = util_dataclasses.fill_I3EField(eR_attenuated, eTheta_attenuated, ePhi_attenuated) # and finally, create and return a signal object signal = icetradio.I3RadioSignal() signal.view_angle = viewing_angle * icetray.I3Units.rad signal.polarization_vector = util_dataclasses.np_to_i3pos( polarization_direction_onsky, 'sph') signal.field_watt = field_watt if keep_unattenuated_fields: # make a copy of the fields that doesn't include the attenuation factor # we can generally *not* save this information as a space saving measure this_eR, this_eTheta, this_ePhi = np.outer( polarization_direction_onsky, signal) eR = util_dataclasses.fill_I3Trace(this_eR, arrival_time, sampling_rate) eTheta = util_dataclasses.fill_I3Trace(this_eTheta, arrival_time, sampling_rate) ePhi = util_dataclasses.fill_I3Trace(this_ePhi, arrival_time, sampling_rate) field_noatt = util_dataclasses.fill_I3EField(eR, eTheta, ePhi) signal.field_noatt = field_noatt return signal
ax.legend() ax.set_ylim(maxy) fig.tight_layout() fig.savefig( os.path.join(plot_folder, '{}_polarization_unweighted.png'.format(key))) ########################### # plot viewing angle ########################### shower_axis = -1 * hp.spherical_to_cartesian( np.array(fin['zeniths'])[triggered], np.array(fin['azimuths'])[triggered]) launch_vectors = np.array(station['launch_vectors'])[triggered] viewing_angles = np.array([ hp.get_angle(x, y) for x, y in zip(shower_axis, launch_vectors[:, 0, 0]) ]) # calculate correct chereknov angle for ice density at vertex position ice = medium.southpole_simple() n_indexs = np.array([ ice.get_index_of_refraction(x) for x in np.array([ np.array(fin['xx'])[triggered], np.array(fin['yy'])[triggered], np.array(fin['zz'])[triggered] ]).T ]) rho = np.arccos(1. / n_indexs) mask = ~np.isnan(viewing_angles)
def run(self, evt, station, det, n_index=None, ZenLim=None, AziLim=None, channel_pairs=((0, 2), (1, 3)), use_envelope=False): """ reconstruct signal arrival direction for all events Parameters ---------- evt: Event The event to run the module on station: Station The station to run the module on det: Detector The detector description n_index: float the index of refraction ZenLim: 2-dim array/list of floats (default: [0 * units.deg, 90 * units.deg]) the zenith angle limits for the fit AziLim: 2-dim array/list of floats (default: [0 * units.deg, 360 * units.deg]) the azimuth angle limits for the fit channel_pairs: pair of pair of integers specify the two channel pairs to use, default ((0, 2), (1, 3)) use_envelope: bool (default False) if True, the hilbert envelope of the traces is used """ if ZenLim is None: ZenLim = [0 * units.deg, 90 * units.deg] if AziLim is None: AziLim = [0 * units.deg, 360 * units.deg] use_correlation = True def ll_regular_station(angles, corr_02, corr_13, sampling_rate, positions, trace_start_times): """ Likelihood function for a four antenna ARIANNA station, using correction. Using correlation, has no built in wrap around, pulse needs to be in the middle """ zenith = angles[0] azimuth = angles[1] times = [] for pos in positions: tmp = [geo_utl.get_time_delay_from_direction(zenith, azimuth, pos[0], n=n_index), geo_utl.get_time_delay_from_direction(zenith, azimuth, pos[1], n=n_index)] times.append(tmp) delta_t_02 = times[0][1] - times[0][0] delta_t_13 = times[1][1] - times[1][0] # take different trace start times into account delta_t_02 -= (trace_start_times[0][1] - trace_start_times[0][0]) delta_t_13 -= (trace_start_times[1][1] - trace_start_times[1][0]) delta_t_02 *= sampling_rate delta_t_13 *= sampling_rate pos_02 = int(corr_02.shape[0] / 2 - delta_t_02) pos_13 = int(corr_13.shape[0] / 2 - delta_t_13) # weight_02 = np.sum(corr_02 ** 2) # Normalize crosscorrelation # weight_13 = np.sum(corr_13 ** 2) # # likelihood = -1 * (corr_02[pos_02] ** 2 / weight_02 + corr_13[pos_13] ** 2 / weight_13) # After deliberating a bit, I don't think we should use the square because anti-correlating # pulses would be wrong, given that it is not a continous waveform weight_02 = np.sum(np.abs(corr_02)) # Normalize crosscorrelation weight_13 = np.sum(np.abs(corr_13)) likelihood = -1 * (corr_02[pos_02] / weight_02 + corr_13[pos_13] / weight_13) return likelihood def ll_regular_station_fft(angles, corr_02_fft, corr_13_fft, sampling_rate, positions, trace_start_times): """ Likelihood function for a four antenna ARIANNA station, using FFT convolution Using FFT convolution, has built-in wrap around, but ARIANNA signals are too short for it to be accurate will show problems at zero time delay """ zenith = angles[0] azimuth = angles[1] times = [] for pos in positions: tmp = [geo_utl.get_time_delay_from_direction(zenith, azimuth, pos[0], n=n_index) * sampling_rate, geo_utl.get_time_delay_from_direction(zenith, azimuth, pos[1], n=n_index) * sampling_rate] times.append(tmp) delta_t_02 = (times[0][1] + trace_start_times[0][1] * sampling_rate) - (times[0][0] + trace_start_times[0][0] * sampling_rate) delta_t_13 = (times[1][1] + trace_start_times[1][1] * sampling_rate) - (times[1][0] + trace_start_times[1][0] * sampling_rate) if delta_t_02 < 0: pos_02 = int(delta_t_02 + corr_02_fft.shape[0]) else: pos_02 = int(delta_t_02) if delta_t_13 < 0: pos_13 = int(delta_t_13 + corr_13_fft.shape[0]) else: pos_13 = int(delta_t_13) weight_02 = np.sum(np.abs(corr_02_fft)) # Normalize crosscorrelation weight_13 = np.sum(np.abs(corr_13_fft)) likelihood = -1 * (np.abs(corr_02_fft[pos_02]) ** 2 / weight_02 + np.abs(corr_13[pos_13]) ** 2 / weight_13) return likelihood station_id = station.get_id() positions_pairs = [[det.get_relative_position(station_id, channel_pairs[0][0]), det.get_relative_position(station_id, channel_pairs[0][1])], [det.get_relative_position(station_id, channel_pairs[1][0]), det.get_relative_position(station_id, channel_pairs[1][1])]] sampling_rate = station.get_channel(0).get_sampling_rate() # assume that channels have the same sampling rate trace_start_time_pairs = [[station.get_channel(channel_pairs[0][0]).get_trace_start_time(), station.get_channel(channel_pairs[0][1]).get_trace_start_time()], [station.get_channel(channel_pairs[1][0]).get_trace_start_time(), station.get_channel(channel_pairs[1][1]).get_trace_start_time()]] # determine automatically if one channel has an inverted waveform with respect to the other signs = [1., 1.] for iPair, pair in enumerate(channel_pairs): antenna_type = det.get_antenna_type(station_id, pair[0]) if("LPDA" in antenna_type): otheta, ophi, rot_theta, rot_azimuth = det.get_antenna_orientation(station_id, pair[0]) otheta2, ophi2, rot_theta2, rot_azimuth2 = det.get_antenna_orientation(station_id, pair[1]) if(np.isclose(np.abs(rot_azimuth - rot_azimuth2), 180 * units.deg, atol=1 * units.deg)): signs[iPair] = -1 if use_correlation: # Correlation if not use_envelope: corr_02 = signal.correlate(station.get_channel(channel_pairs[0][0]).get_trace(), signs[0] * station.get_channel(channel_pairs[0][1]).get_trace()) corr_13 = signal.correlate(station.get_channel(channel_pairs[1][0]).get_trace(), signs[1] * station.get_channel(channel_pairs[1][1]).get_trace()) else: corr_02 = signal.correlate(np.abs(signal.hilbert(station.get_channel(channel_pairs[0][0]).get_trace())), np.abs(signal.hilbert(station.get_channel(channel_pairs[0][1]).get_trace()))) corr_13 = signal.correlate(np.abs(signal.hilbert(station.get_channel(channel_pairs[1][0]).get_trace())), np.abs(signal.hilbert(station.get_channel(channel_pairs[1][1]).get_trace()))) else: # FFT convolution corr_02_fft = fftpack.ifft(-1 * fftpack.fft(station.get_channel(channel_pairs[0][0]).get_trace()).conjugate() * fftpack.fft(station.get_channel(channel_pairs[0][1]).get_trace())) corr_13_fft = fftpack.ifft(-1 * fftpack.fft(station.get_channel(channel_pairs[1][0]).get_trace()).conjugate() * fftpack.fft(station.get_channel(channel_pairs[1][1]).get_trace())) if use_correlation: # Using correlation ll = opt.brute( ll_regular_station, ranges=(slice(ZenLim[0], ZenLim[1], 0.01), slice(AziLim[0], AziLim[1], 0.01)), args=(corr_02, corr_13, sampling_rate, positions_pairs, trace_start_time_pairs), full_output=True, finish=opt.fmin) # slow but does the trick else: ll = opt.brute(ll_regular_station_fft, ranges=(slice(ZenLim[0], ZenLim[1], 0.05), slice(AziLim[0], AziLim[1], 0.05)), args=(corr_02_fft, corr_13_fft, sampling_rate, positions_pairs, trace_start_time_pairs), full_output=True, finish=opt.fmin) # slow but does the trick if self.__debug: import peakutils zenith = ll[0][0] azimuth = ll[0][1] times = [] for pos in positions_pairs: tmp = [geo_utl.get_time_delay_from_direction(zenith, azimuth, pos[0], n=n_index), geo_utl.get_time_delay_from_direction(zenith, azimuth, pos[1], n=n_index)] times.append(tmp) delta_t_02 = times[0][1] - times[0][0] delta_t_13 = times[1][1] - times[1][0] # take different trace start times into account delta_t_02 -= (trace_start_time_pairs[0][1] - trace_start_time_pairs[0][0]) delta_t_13 -= (trace_start_time_pairs[1][1] - trace_start_time_pairs[1][0]) delta_t_02 *= sampling_rate delta_t_13 *= sampling_rate toffset = -(np.arange(0, corr_02.shape[0]) - corr_02.shape[0] / 2) / sampling_rate fig, (ax, ax2) = plt.subplots(2, 1, sharex=True) ax.plot(toffset, corr_02) ax.axvline(delta_t_02 / sampling_rate, label='time', c='k') indices = peakutils.indexes(corr_02, thres=0.8, min_dist=5) ax.plot(toffset[indices], corr_02[indices], 'o') imax = np.argmax(corr_02[indices]) self.logger.debug("offset 02= {:.3f}".format(toffset[indices[imax]] - (delta_t_02 / sampling_rate))) ax2.plot(toffset, corr_13) indices = peakutils.indexes(corr_13, thres=0.8, min_dist=5) ax2.plot(toffset[indices], corr_13[indices], 'o') ax2.axvline(delta_t_13 / sampling_rate, label='time', c='k') ax2.set_xlabel("time") ax2.set_ylabel("Correlation Ch 1/ Ch3", fontsize='small') ax.set_ylabel("Correlation Ch 0/ Ch2", fontsize='small') plt.tight_layout() # plt.close("all") station[stnp.zenith] = max(ZenLim[0], min(ZenLim[1], ll[0][0])) station[stnp.azimuth] = ll[0][1] output_str = "reconstucted angles theta = {:.1f}, phi = {:.1f}".format(station[stnp.zenith] / units.deg, station[stnp.azimuth] / units.deg) if station.has_sim_station(): sim_zen = None sim_az = None if(station.get_sim_station().is_cosmic_ray()): sim_zen = station.get_sim_station()[stnp.zenith] sim_az = station.get_sim_station()[stnp.azimuth] elif(station.get_sim_station().is_neutrino()): # in case of a neutrino simulation, each channel has a slightly different arrival direction -> compute the average sim_zen = [] sim_az = [] for efield in station.get_sim_station().get_electric_fields_for_channels(ray_path_type='direct'): sim_zen.append(efield[efp.zenith]) sim_az.append(efield[efp.azimuth]) sim_zen = np.array(sim_zen) sim_az = hp.get_normalized_angle(np.array(sim_az)) ops = "average incident zenith {:.1f} +- {:.1f}".format(np.mean(sim_zen) / units.deg, np.std(sim_zen) / units.deg) ops += " (individual: " for x in sim_zen: ops += "{:.1f}, ".format(x / units.deg) ops += ")" self.logger.debug(ops) ops = "average incident azimuth {:.1f} +- {:.1f}".format(np.mean(sim_az) / units.deg, np.std(sim_az) / units.deg) ops += " (individual: " for x in sim_az: ops += "{:.1f}, ".format(x / units.deg) ops += ")" self.logger.debug(ops) sim_zen = np.mean(np.array(sim_zen)) sim_az = np.mean(np.array(sim_az)) if(sim_zen is not None): dOmega = hp.get_angle(hp.spherical_to_cartesian(sim_zen, sim_az), hp.spherical_to_cartesian(station[stnp.zenith], station[stnp.azimuth])) output_str += " MC theta = {:.2f}, phi = {:.2f}, dOmega = {:.2f}, dZen = {:.1f}, dAz = {:.1f}".format(sim_zen / units.deg, hp.get_normalized_angle(sim_az) / units.deg, dOmega / units.deg, (station[stnp.zenith] - sim_zen) / units.deg, (station[stnp.azimuth] - hp.get_normalized_angle(sim_az)) / units.deg) self.__zenith.append(sim_zen) self.__azimuth.append(sim_az) self.__delta_zenith.append(station[stnp.zenith] - sim_zen) self.__delta_azimuth.append(station[stnp.azimuth] - hp.get_normalized_angle(sim_az)) self.logger.info(output_str) # Still have to add fit quality parameter to output if self.__debug: import peakutils # access simulated efield and high level parameters sim_present = False if(station.has_sim_station()): if(station.get_sim_station().has_parameter(stnp.zenith)): sim_station = station.get_sim_station() azimuth_orig = sim_station[stnp.azimuth] zenith_orig = sim_station[stnp.zenith] sim_present = True self.logger.debug("True CoREAS zenith {0}, azimuth {1}".format(zenith_orig, azimuth_orig)) self.logger.debug("Result of direction fitting: [zenith, azimuth] {}".format(np.rad2deg(ll[0]))) # Show fit space zen = np.arange(ZenLim[0], ZenLim[1], 1 * units.deg) az = np.arange(AziLim[0], AziLim[1], 2 * units.deg) x_plot = np.zeros(zen.shape[0] * az.shape[0]) y_plot = np.zeros(zen.shape[0] * az.shape[0]) z_plot = np.zeros(zen.shape[0] * az.shape[0]) i = 0 for a in az: for z in zen: # Evaluate fit function for grid if use_correlation: z_plot[i] = ll_regular_station([z, a], corr_02, corr_13, sampling_rate, positions_pairs, trace_start_time_pairs) else: z_plot[i] = ll_regular_station_fft([z, a], corr_02_fft, corr_13_fft, sampling_rate, positions_pairs, trace_start_time_pairs) x_plot[i] = a y_plot[i] = z i += 1 fig, ax = plt.subplots(1, 1) ax.scatter(np.rad2deg(x_plot), np.rad2deg(y_plot), c=z_plot, cmap='gnuplot2_r', lw=0) # ax.imshow(z_plot, cmap='gnuplot2_r', extent=(0, 360, 90, 180)) if sim_present: ax.plot(np.rad2deg(hp.get_normalized_angle(azimuth_orig)), np.rad2deg(zenith_orig), marker='d', c='g', label="True") ax.scatter(np.rad2deg(ll[0][1]), np.rad2deg(ll[0][0]), marker='o', c='k', label='Fit') # ax.colorbar(label='Fit parameter') ax.set_ylabel('Zenith [rad]') ax.set_xlabel('Azimuth [rad]') plt.tight_layout() # plot allowed solution separately for each pair of channels toffset = -(np.arange(0, corr_02.shape[0]) - corr_02.shape[0] / 2.) / sampling_rate indices = peakutils.indexes(corr_02, thres=0.8, min_dist=5) t02s = toffset[indices][np.argsort(corr_02[indices])[::-1]] + (trace_start_time_pairs[0][1] - trace_start_time_pairs[0][0]) toffset = -(np.arange(0, corr_13.shape[0]) - corr_13.shape[0] / 2.) / sampling_rate indices = peakutils.indexes(corr_13, thres=0.8, min_dist=5) t13s = toffset[indices][np.argsort(corr_13[indices])[::-1]] + (trace_start_time_pairs[1][1] - trace_start_time_pairs[1][0]) from scipy import constants c = constants.c * units.m / units.s dx = -6 * units.m def get_deltat13(dt, phi): t = -1. * dt * c / (dx * np.cos(phi) * n_index) t[t < 0] = np.nan return np.arcsin(t) def get_deltat02(dt, phi): t = -1 * dt * c / (dx * np.sin(phi) * n_index) t[t < 0] = np.nan return np.arcsin(t) def getDeltaTCone(r, dt): dist = np.linalg.norm(r) t0 = -dist * n_index / c Phic = np.arccos(dt / t0) # cone angle for allowable solutions self.logger.debug('dist = {}, dt = {}, t0 = {}, phic = {}'.format(dist, dt, t0, Phic)) nr = r / dist # normalize p = np.cross([0, 0, 1], nr) # create a perpendicular normal vector to r p = p / np.linalg.norm(p) q = np.cross(nr, p) # nr, p, and q form an orthonormal basis self.logger.debug('nr = {}\np = {}\nq = {}\n'.format(nr, p, q)) ThetaC = np.linspace(0, 2 * np.pi, 1000) Phis = np.zeros(len(ThetaC)) Thetas = np.zeros(len(ThetaC)) for i, thetac in enumerate(ThetaC): # create a set of vectors that point along the cone defined by r and PhiC rc = nr + np.tan(Phic) * (np.sin(thetac) * p + np.cos(thetac) * q) nrc = rc / np.linalg.norm(rc) theta = np.arccos(nrc[2]) phi = np.arctan2(nrc[1], nrc[0]) Phis[i] = phi Thetas[i] = theta return Phis, Thetas # phis = np.deg2rad(np.linspace(0, 360, 10000)) r0_2 = positions_pairs[0][1] - positions_pairs[0][0] # vector pointing from Ch2 to Ch0 r1_3 = positions_pairs[1][1] - positions_pairs[1][0] # vector pointing from Ch3 to Ch1 self.logger.debug('r02 {}\nr13 {}'.format(r0_2, r1_3)) linestyles = ['-', '--', ':', '-.'] for i, t02 in enumerate(t02s): # theta02 = get_deltat02(t02, phis) phi02, theta02 = getDeltaTCone(r0_2, t02) theta02[theta02 < 0] += np.pi phi02[phi02 < 0] += 2 * np.pi jumppos02 = np.where(np.abs(np.diff(phi02)) >= 5.0) for j, pos in enumerate(jumppos02): phi02 = np.insert(phi02, pos + 1 + j, np.nan) theta02 = np.insert(theta02, pos + 1 + j, np.nan) # mask02 = ~np.isnan(theta02) ax.plot(np.rad2deg(phi02), np.rad2deg(theta02), '{}C3'.format(linestyles[i % 4]), label='c 0+2 dt = {}'.format(t02)) for i, t13 in enumerate(t13s): # theta13 = get_deltat13(t13, phis) phi13, theta13 = getDeltaTCone(r1_3, t13) theta13[theta13 < 0] += np.pi phi13[phi13 < 0] += 2 * np.pi jumppos13 = np.where(np.abs(np.diff(phi13)) >= 5.0) for j, pos in enumerate(jumppos13): phi13 = np.insert(phi13, pos + 1 + j, np.nan) theta13 = np.insert(theta13, pos + 1 + j, np.nan) # mask13 = ~np.isnan(theta13) ax.plot(np.rad2deg(phi13), np.rad2deg(theta13), '{}C2'.format(linestyles[i % 4]), label='c 1+3 dt = {}'.format(t13)) ax.legend(fontsize='small') ax.set_ylim(ZenLim[0] / units.deg, ZenLim[1] / units.deg) ax.set_xlim(AziLim[0] / units.deg, AziLim[1] / units.deg)
def run(self, evt, station, det, debug=False): """ reconstructs quantities for electric field Parameters ---------- evt: event station: station det: detector debug: bool set debug """ for electric_field in station.get_electric_fields(): trace_copy = copy.copy(electric_field.get_trace()) # calculate hilbert envelope envelope = np.abs(signal.hilbert(trace_copy)) envelope_mag = np.linalg.norm(envelope, axis=0) signal_time_bin = np.argmax(envelope_mag) signal_time = electric_field.get_times()[signal_time_bin] electric_field[efp.signal_time] = signal_time # low_pos = np.int(130 * units.ns * electric_field.get_sampling_rate()) up_pos = np.int(210 * units.ns * electric_field.get_sampling_rate()) if(debug): fig, ax = plt.subplots(1, 1) sc = ax.scatter(trace_copy[1, low_pos:up_pos], trace_copy[2, low_pos:up_pos], c=electric_field.get_times()[low_pos:up_pos], s=5) fig.colorbar(sc, ax=ax) ax.set_aspect('equal') ax.set_xlabel("eTheta") ax.set_ylabel("ePhi") fig.tight_layout() low_pos, up_pos = hp.get_interval(envelope_mag, scale=0.5) v_start = trace_copy[:, signal_time_bin] v_avg = np.zeros(3) for i in range(low_pos, up_pos + 1): v = trace_copy[:, i] alpha = hp.get_angle(v_start, v) if(alpha > 0.5 * np.pi): v *= -1 v_avg += v pole = np.arctan2(np.abs(v_avg[2]), np.abs(v_avg[1])) electric_field[efp.polarization_angle] = pole logger.info("average e-field vector = {:.4g}, {:.4g}, {:.4g} -> polarization = {:.1f}deg".format(v_avg[0], v_avg[1], v_avg[2], pole / units.deg)) trace = electric_field.get_trace() if(debug): fig, ax = plt.subplots(1, 1) tt = electric_field.get_times() dt = 1. / electric_field.get_sampling_rate() ax.plot(tt / units.ns, trace[1] / units.mV * units.m) ax.plot(tt / units.ns, trace[2] / units.mV * units.m) ax.plot(tt / units.ns, envelope_mag / units.mV * units.m) ax.vlines([low_pos * dt, up_pos * dt], 0, envelope_mag.max() / units.mV * units.m) ax.vlines([signal_time - self.__signal_window_pre, signal_time + self.__signal_window_post], 0, envelope_mag.max() / units.mV * units.m, linestyles='dashed') times = electric_field.get_times() mask_signal_window = (times > (signal_time - self.__signal_window_pre)) & (times < (signal_time + self.__signal_window_post)) mask_noise_window = np.zeros_like(mask_signal_window, dtype=np.bool) if(self.__noise_window > 0): mask_noise_window[np.int(np.round((-self.__noise_window - 141.) * electric_field.get_sampling_rate())):np.int(np.round(-141. * electric_field.get_sampling_rate()))] = np.ones(np.int(np.round(self.__noise_window * electric_field.get_sampling_rate())), dtype=np.bool) # the last n bins signal_energy_fluence = trace_utilities.get_electric_field_energy_fluence(trace, times, mask_signal_window, mask_noise_window) dt = times[1] - times[0] signal_energy_fluence_error = np.zeros(3) if(np.sum(mask_noise_window)): RMSNoise = np.sqrt(np.mean(trace[:, mask_noise_window] ** 2, axis=1)) signal_energy_fluence_error = (4 * np.abs(signal_energy_fluence / self.__conversion_factor_integrated_signal) * RMSNoise ** 2 * dt + 2 * (self.__signal_window_pre + self.__signal_window_post) * RMSNoise ** 4 * dt) ** 0.5 signal_energy_fluence_error *= self.__conversion_factor_integrated_signal electric_field.set_parameter(efp.signal_energy_fluence, signal_energy_fluence) electric_field.set_parameter_error(efp.signal_energy_fluence, signal_energy_fluence_error) logger.info("f = {} +- {}".format(signal_energy_fluence / units.eV * units.m2, signal_energy_fluence_error / units.eV * units.m2)) # calculate polarization angle from energy fluence x = np.abs(signal_energy_fluence[1]) ** 0.5 y = np.abs(signal_energy_fluence[2]) ** 0.5 sx = signal_energy_fluence_error[1] * 0.5 sy = signal_energy_fluence_error[2] * 0.5 pol_angle = np.arctan2(y, x) pol_angle_error = 1. / (x ** 2 + y ** 2) * (y ** 2 * sx ** 2 + x ** 2 * sy ** 2) ** 0.5 # gaussian error propagation logger.info("polarization angle = {:.1f} +- {:.1f}".format(pol_angle / units.deg, pol_angle_error / units.deg)) electric_field.set_parameter(efp.polarization_angle, pol_angle) electric_field.set_parameter_error(efp.polarization_angle, pol_angle_error) # compute expeted polarization site = det.get_site(station.get_id()) exp_efield = hp.get_lorentzforce_vector(electric_field[efp.zenith], electric_field[efp.azimuth], hp.get_magnetic_field_vector(site)) cs = coordinatesystems.cstrafo(electric_field[efp.zenith], electric_field[efp.azimuth], site=site) exp_efield_onsky = cs.transform_from_ground_to_onsky(exp_efield) exp_pol_angle = np.arctan2(np.abs(exp_efield_onsky[2]), np.abs(exp_efield_onsky[1])) logger.info("expected polarization angle = {:.1f}".format(exp_pol_angle / units.deg)) electric_field.set_parameter(efp.polarization_angle_expectation, exp_pol_angle)