def __init__(self): hax.minitrees.TreeMaker.__init__(self) self.extra_metadata = hax.config['corrections_definitions'] self.corrections_handler = CorrectionsHandler() # We need to pull some stuff from the pax config self.pax_config = load_configuration("XENON1T") self.tpc_channels = self.pax_config['DEFAULT']['channels_in_detector']['tpc'] self.confused_s1_channels = [] self.s1_statistic = ( self.pax_config['BuildInteractions.BasicInteractionProperties']['s1_pattern_statistic'] ) qes = np.array(self.pax_config['DEFAULT']['quantum_efficiencies']) self.s1_pattern_fitter = PatternFitter( filename=utils.data_file_name(self.pax_config['WaveformSimulator']['s1_patterns_file']), zoom_factor=self.pax_config['WaveformSimulator'].get('s1_patterns_zoom_factor', 1), adjust_to_qe=qes[self.tpc_channels], default_errors=(self.pax_config['DEFAULT']['relative_qe_error'] + self.pax_config['DEFAULT']['relative_gain_error']) ) self.top_channels = self.pax_config['DEFAULT']['channels_top'] self.ntop_pmts = len(self.top_channels) # Declare nn stuff self.tfnn_weights = None self.tfnn_model = None self.loaded_nn = None # Run doc self.loaded_run_doc = None self.run_doc = None
class LoneSignals(TreeMaker): __version__ = '0.2' extra_branches = ['peaks.*'] extra_metadata = hax.config['corrections_definitions'] corrections_handler = CorrectionsHandler() def extract_data(self, event): peaks = event.peaks if not len(peaks): return dict() s1_sorted = list( sorted([p for p in event.peaks if p.type == 's1' and p.detector == 'tpc'], key=lambda p: p.area, reverse=True)) s2_sorted = list( sorted([p for p in event.peaks if p.type == 's2' and p.detector == 'tpc'], key=lambda p: p.area, reverse=True)) unknown = [peak for peak in peaks if peak.type == 'unknown' and peak.detector == "tpc"] result = dict(n_pulses=event.n_pulses, n_peaks=len(peaks), n_interactions=len(event.interactions)) result['unknown_tot'] = np.sum([peak.area for peak in unknown]) result['s1_area_tot'] = np.sum([peak.area for peak in s1_sorted]) result['s2_area_tot'] = np.sum([peak.area for peak in s2_sorted]) result['n_s1'] = len(s1_sorted) result['n_s2'] = len(s2_sorted) if len(s1_sorted): result['area_before_largest_s1'] = np.sum( [p.area for p in peaks if p.center_time < s1_sorted[0].center_time]) s1_0_recpos = s1_sorted[0].reconstructed_positions for rp in s1_0_recpos: if (rp.algorithm == 'PosRecTopPatternFit'): s1_0_recpos_pf = rp result['s1_0_x'] = s1_0_recpos_pf.x result['s1_0_y'] = s1_0_recpos_pf.y result['s1_0_posrec_goodness_of_fit'] = s1_0_recpos_pf.goodness_of_fit result['s1_0_area'] = s1_sorted[0].area result['s1_0_center_time'] = s1_sorted[0].center_time result['s1_0_aft'] = s1_sorted[0].area_fraction_top result['s1_0_50p_width'] = s1_sorted[0].range_area_decile[5] result['s1_0_90p_width'] = s1_sorted[0].range_area_decile[9] result['s1_0_rise_time'] = -s1_sorted[0].area_decile_from_midpoint[1] result['s1_0_largest_hit_area'] = s1_sorted[0].largest_hit_area if len(s2_sorted) > 0: result['area_before_largest_s2'] = np.sum(p.area for p in peaks if p.center_time < s2_sorted[0].center_time) s2_0_recpos = s2_sorted[0].reconstructed_positions for rp in s2_0_recpos: if (rp.algorithm == 'PosRecTopPatternFit'): s2_0_recpos_pf = rp result['s2_0_x'] = s2_0_recpos_pf.x result['s2_0_y'] = s2_0_recpos_pf.y result['s2_0_posrec_goodness_of_fit'] = s2_0_recpos_pf.goodness_of_fit if (rp.algorithm == 'PosRecNeuralNet'): s2_0_recpos_pf = rp result['s2_0_x_nn'] = s2_0_recpos_pf.x result['s2_0_y_nn'] = s2_0_recpos_pf.y result['s2_0_posrec_goodness_of_fit_nn'] = s2_0_recpos_pf.goodness_of_fit result['s2_0_area'] = s2_sorted[0].area result['s2_0_center_time'] = s2_sorted[0].center_time result['s2_0_left'] = s2_sorted[0].center_time - s2_sorted[0].left result['s2_0_aft'] = s2_sorted[0].area_fraction_top result['s2_0_50p_width'] = s2_sorted[0].range_area_decile[5] result['s2_0_rise_time'] = -s2_sorted[0].area_decile_from_midpoint[1] result['s2_0_largest_hit_area'] = s2_sorted[0].largest_hit_area # S2 corrections based on X,Y maps, for new s2 AFT cut cvals = [result['s2_0_x_nn'], result['s2_0_y_nn']] result['s2_0_xy_correction_tot'] = (1.0 / self.corrections_handler.get_correction_from_map( "s2_xy_map", self.run_number, cvals)) result['s2_0_xy_correction_top'] = (1.0 / self.corrections_handler.get_correction_from_map( "s2_xy_map", self.run_number, cvals, map_name='map_top')) result['s2_0_xy_correction_bottom'] = (1.0 / self.corrections_handler.get_correction_from_map( "s2_xy_map", self.run_number, cvals, map_name='map_bottom')) result['cs2_0_tot'] = result['s2_0_area'] * result['s2_0_xy_correction_tot'] result['cs2_0_top'] = result['s2_0_area'] * result['s2_0_xy_correction_top'] * result['s2_0_aft'] result['cs2_0_bottom'] = result['s2_0_area'] * result['s2_0_xy_correction_bottom'] * (1 - result['s2_0_aft']) result['cs2_0_aft'] = result['cs2_0_top'] / (result['cs2_0_top'] + result['cs2_0_bottom']) if len(s2_sorted) > 1: s2_1_recpos = s2_sorted[1].reconstructed_positions for rp in s2_1_recpos: if (rp.algorithm == 'PosRecTopPatternFit'): s2_1_recpos_pf = rp result['s2_1_x'] = s2_1_recpos_pf.x result['s2_1_y'] = s2_1_recpos_pf.y result['s2_1_posrec_goodness_of_fit'] = s2_1_recpos_pf.goodness_of_fit if (rp.algorithm == 'PosRecNeuralNet'): s2_1_recpos_pf = rp result['s2_1_x_nn'] = s2_1_recpos_pf.x result['s2_1_y_nn'] = s2_1_recpos_pf.y result['s2_1_posrec_goodness_of_fit_nn'] = s2_1_recpos_pf.goodness_of_fit result['s2_1_area'] = s2_sorted[1].area result['s2_1_center_time'] = s2_sorted[1].center_time result['s2_1_aft'] = s2_sorted[1].area_fraction_top result['s2_1_50p_width'] = s2_sorted[1].range_area_decile[5] result['s2_1_rise_time'] = -s2_sorted[1].area_decile_from_midpoint[1] result['s2_1_largest_hit_area'] = s2_sorted[1].largest_hit_area return result
class PositionReconstruction(TreeMaker): """Stores position-reconstruction-related variables. Provides: - s1_pattern_fit_hax: S1 pattern likelihood computed with corrected position and areas - s1_pattern_fit_hits_hax: S1 pattern likelihood computed with corrected position and hits - s1_pattern_fit_bottom_hax: S1 pattern likelihood computed with corrected position and bottom array area - s1_pattern_fit_bottom_hits_hax: S1 pattern likelihood computed with corrected position and bottom array hits - s1_area_fraction_top_probability_hax: S1 AFT p-value computed with corrected position - s1_area_fraction_top_probability_nothresh: computed using area below S1=10 (instead of hits) - s1_area_fraction_top_binomial: Binomial probability for given S1 AFT - s1_area_fraction_top_binomial_nothresh: Same except using area below S1=10 - x_observed_nn_tf: TensorFlow NN reconstructed x position - y_observed_nn_tf: TensorFlow NN reconstructed y position - r_observed_nn_tf: TensorFlow NN reconstructed r position - r_3d_nn_tf: the corrected interaction r coordinate (data-driven 3d fdc) - x_3d_nn_tf: the corrected interaction x coordinate (data-driven 3d fdc) - y_3d_nn_tf: the corrected interaction y coordinate (data-driven 3d fdc) - z_3d_nn_tf: the corrected interaction z coordinate (data-driven 3d fdc) - r_correction_3d_nn_tf: r_3d_nn_tf - r_observed_nn_tf - z_correction_3d_nn_tf: z_3d_nn_tf - z_observed - s1_area_upper_injection_fraction: s1 area fraction near Rn220 injection points (near PMT 131) - s1_area_lower_injection_fraction: s1 area fraction near Rn220 injection points (near PMT 243) - s2_pattern_fit_nn: s2 pattern fit using nn position """ __version__ = '1.1' extra_branches = ['peaks.area_per_channel[260]', 'peaks.hits_per_channel[260]', 'peaks.n_saturated_per_channel[260]', 'peaks.n_hits', 'peaks.hits_fraction_top', 'peaks.reconstructed_positions', 'interactions.x', 'interactions.y', 'interactions.z'] def __init__(self): hax.minitrees.TreeMaker.__init__(self) self.extra_metadata = hax.config['corrections_definitions'] self.corrections_handler = CorrectionsHandler() # We need to pull some stuff from the pax config self.pax_config = load_configuration("XENON1T") self.tpc_channels = self.pax_config['DEFAULT']['channels_in_detector']['tpc'] self.confused_s1_channels = [] self.s1_statistic = ( self.pax_config['BuildInteractions.BasicInteractionProperties']['s1_pattern_statistic'] ) qes = np.array(self.pax_config['DEFAULT']['quantum_efficiencies']) self.s1_pattern_fitter = PatternFitter( filename=utils.data_file_name(self.pax_config['WaveformSimulator']['s1_patterns_file']), zoom_factor=self.pax_config['WaveformSimulator'].get('s1_patterns_zoom_factor', 1), adjust_to_qe=qes[self.tpc_channels], default_errors=(self.pax_config['DEFAULT']['relative_qe_error'] + self.pax_config['DEFAULT']['relative_gain_error']) ) self.top_channels = self.pax_config['DEFAULT']['channels_top'] self.ntop_pmts = len(self.top_channels) # Declare nn stuff self.tfnn_weights = None self.tfnn_model = None self.loaded_nn = None # Run doc self.loaded_run_doc = None self.run_doc = None def load_nn(self): """For loading NN files""" from keras.models import model_from_json # If we already loaded it up then skip if ((self.tfnn_weights == self.corrections_handler.get_misc_correction( "tfnn_weights", self.run_number)) and (self.tfnn_model == self.corrections_handler.get_misc_correction( "tfnn_model", self.run_number))): return self.tfnn_weights = self.corrections_handler.get_misc_correction( "tfnn_weights", self.run_number) self.tfnn_model = self.corrections_handler.get_misc_correction( "tfnn_model", self.run_number) json_file_nn = open(utils.data_file_name(self.tfnn_model), 'r') loaded_model_json = json_file_nn.read() self.loaded_nn = model_from_json(loaded_model_json) json_file_nn.close() # Get bad PMT List in JSON file: json_file_nn = open(utils.data_file_name(self.tfnn_model), 'r') loaded_model_json_dict = json.load(json_file_nn) self.list_bad_pmts = loaded_model_json_dict['badPMTList'] json_file_nn.close() weights_file = utils.data_file_name(self.tfnn_weights) self.loaded_nn.load_weights(weights_file) def get_data(self, dataset, event_list=None): # If we do switch to new NN later get rid of this stuff and directly use those positions! data, _ = hax.minitrees.load_single_dataset(dataset, ['Corrections', 'Fundamentals']) self.x = data.x_3d_nn.values self.y = data.y_3d_nn.values self.z = data.z_3d_nn.values self.indices = list(data.event_number.values) return hax.minitrees.TreeMaker.get_data(self, dataset, event_list) def load_run_doc(self, run): if run != self.loaded_run_doc: self.run_doc = get_run_info(run) self.loaded_run_doc = run def extract_data(self, event): event_data = { "s1_pattern_fit_hax": None, "s1_pattern_fit_hits_hax": None, "s1_pattern_fit_bottom_hax": None, "s1_pattern_fit_bottom_hits_hax": None, "s2_pattern_fit_nn": None, "s2_pattern_fit_tpf": None, "s1_area_fraction_top_probability_hax": None, "s1_area_fraction_top_probability_nothresh": None, "s1_area_fraction_top_binomial": None, "s1_area_fraction_top_binomial_nothresh": None, "x_observed_nn_tf": None, "y_observed_nn_tf": None, "r_observed_nn_tf": None, "x_3d_nn_tf": None, "y_3d_nn_tf": None, "r_3d_nn_tf": None, "z_3d_nn_tf": None, "r_correction_3d_nn_tf": None, "z_correction_3d_nn_tf": None, "s1_area_upper_injection_fraction": None, "s1_area_lower_injection_fraction": None, "s2_pattern_fit_nn": None, "s2_pattern_fit_tpf": None } # We first need the positions. This minitree is only valid when loading # Corrections since you need that to get the corrected positions if not len(event.interactions): return event_data event_num = event.event_number try: event_index = self.indices.index(event_num) except Exception: return event_data interaction = event.interactions[0] s1 = event.peaks[interaction.s1] s2 = event.peaks[interaction.s2] for rp in s2.reconstructed_positions: if rp.algorithm == "PosRecNeuralNet": event_data['s2_pattern_fit_nn'] = rp.goodness_of_fit elif rp.algorithm == "PosRecTopPatternFit": event_data['s2_pattern_fit_tpf'] = rp.goodness_of_fit # Position reconstruction based on NN from TensorFlow # First Check for MC data, and avoid Tensor Flow if MC. if not self.mc_data: # Temporary for OSG production # Check that correct NN is loaded and change if not self.load_nn() s2apc = np.array(list(s2.area_per_channel)) s2apc_clean = [] for ipmt, s2_t in enumerate(s2apc): if ipmt not in self.list_bad_pmts and ipmt < self.ntop_pmts: s2apc_clean.append(s2_t) s2apc_clean = np.asarray(s2apc_clean) s2apc_clean_norm = s2apc_clean / s2apc_clean.sum() s2apc_clean_norm = s2apc_clean_norm.reshape(1, len(s2apc_clean_norm)) predicted_xy_tensorflow = self.loaded_nn.predict(s2apc_clean_norm) event_data['x_observed_nn_tf'] = predicted_xy_tensorflow[0, 0] / 10. event_data['y_observed_nn_tf'] = predicted_xy_tensorflow[0, 1] / 10. event_data['r_observed_nn_tf'] =\ np.sqrt(event_data['x_observed_nn_tf']**2 + event_data['y_observed_nn_tf']**2) # 3D FDC algo = 'nn_tf' z_observed = interaction.z - interaction.z_correction cvals = [event_data['x_observed_' + algo], event_data['y_observed_' + algo], z_observed] event_data['r_correction_3d_' + algo] = self.corrections_handler.get_correction_from_map( "fdc_3d_tfnn", self.run_number, cvals) event_data['r_3d_' + algo] = (event_data['r_observed_' + algo] + event_data['r_correction_3d_' + algo]) event_data['x_3d_' + algo] = (event_data['x_observed_' + algo] * (event_data['r_3d_' + algo] / event_data['r_observed_' + algo])) event_data['y_3d_' + algo] = (event_data['y_observed_' + algo] * (event_data['r_3d_' + algo] / event_data['r_observed_' + algo])) if abs(z_observed) > abs(event_data['r_correction_3d_' + algo]): event_data['z_3d_' + algo] = -np.sqrt(z_observed ** 2 - event_data['r_correction_3d_' + algo] ** 2) else: event_data['z_3d_' + algo] = z_observed event_data['z_correction_3d_' + algo] = event_data['z_3d_' + algo] - z_observed # s1 area fraction near injection points for Rn220 source area_upper_injection = (s1.area_per_channel[131] + s1.area_per_channel[138] + s1.area_per_channel[146] + s1.area_per_channel[147]) area_lower_injection = (s1.area_per_channel[236] + s1.area_per_channel[237] + s1.area_per_channel[243]) event_data['s1_area_upper_injection_fraction'] = area_upper_injection / s1.area event_data['s1_area_lower_injection_fraction'] = area_lower_injection / s1.area # Want S1 AreaFractionTop Probability aft_prob = self.corrections_handler.get_correction_from_map( "s1_aft_map", self.run_number, [self.x[event_index], self.y[event_index], self.z[event_index]]) aft_args = aft_prob, s1.area, s1.area_fraction_top, s1.n_hits, s1.hits_fraction_top event_data['s1_area_fraction_top_probability_hax'] = s1_area_fraction_top_probability(*aft_args) event_data['s1_area_fraction_top_binomial'] = s1_area_fraction_top_probability(*(aft_args + (10, 'pmf'))) event_data['s1_area_fraction_top_probability_nothresh'] = s1_area_fraction_top_probability(*(aft_args + (0,))) event_data['s1_area_fraction_top_binomial_nothresh'] = s1_area_fraction_top_probability(*(aft_args + (0, 'pmf'))) # Now do s1_pattern_fit apc = np.array(list(s1.area_per_channel)) hpc = np.array(list(s1.hits_per_channel)) # Get saturated channels confused_s1_channels = [] self.load_run_doc(self.run_number) # The original s1 pattern calculation had a bug where dead PMTs were # included. They are not included here. for a, c in enumerate(self.run_doc['processor']['DEFAULT']['gains']): if c == 0: confused_s1_channels.append(a) for a, c in enumerate(s1.n_saturated_per_channel): if c > 0: confused_s1_channels.append(a) try: # Create PMT array of booleans for use in likelihood calculation is_pmt_in = np.ones(len(self.tpc_channels), dtype=bool) # Default True is_pmt_in[confused_s1_channels] = False # Ignore saturated channels event_data['s1_pattern_fit_hax'] = self.s1_pattern_fitter.compute_gof( (self.x[event_index], self.y[event_index], self.z[event_index]), apc[self.tpc_channels], pmt_selection=is_pmt_in, statistic=self.s1_statistic) event_data['s1_pattern_fit_hits_hax'] = self.s1_pattern_fitter.compute_gof( (self.x[event_index], self.y[event_index], self.z[event_index]), hpc[self.tpc_channels], pmt_selection=is_pmt_in, statistic=self.s1_statistic) # Switch to bottom PMTs only is_pmt_in[self.top_channels] = False event_data['s1_pattern_fit_bottom_hax'] = self.s1_pattern_fitter.compute_gof( (self.x[event_index], self.y[event_index], self.z[event_index]), apc[self.tpc_channels], pmt_selection=is_pmt_in, statistic=self.s1_statistic) event_data['s1_pattern_fit_bottom_hits_hax'] = self.s1_pattern_fitter.compute_gof( (self.x[event_index], self.y[event_index], self.z[event_index]), hpc[self.tpc_channels], pmt_selection=is_pmt_in, statistic=self.s1_statistic) except exceptions.CoordinateOutOfRangeException as _: # pax does this too. happens when event out of TPC (usually z) return event_data return event_data
class CorrectedDoubleS1Scatter(TreeMaker): """Applies high level corrections which are used in DoubleS1Scatter analyses. Be carefull, this treemaker was developed for Kr83m analysis. It will probably need modifications for other analysis. The search for double scatter events: made by Ted Berger double decays, afterpulses, and anything else that gets in our way if you have any questions contact Ted Berger ([email protected]) The search proceeds as follows: * interaction[0] (int_0) provides s1_0 and s2_0 * find additional interaction (int_1) to provide s1_1 and s2_1 - loop through interactions (interactions store s1/s2 pairs in descending size order, s2s on fast loop) Choice A) select first interaction with s1 != s1_0 AND s2 != s2_0 Choice B) if Choice A doesn't exist, select first interaction with s1 != s1_0 AND s2 == s2_0 Choice C) if Choice A and B don't exist ignore, this isn't a double scatter event * int_0 and int_1 ordered by s1.center_time to int_a and int_b (int_a has s1 that happened first) The output provides the following variables attributed to specific peaks (s1_a, s2_a, s1_b, s2_b), as well as specific interactions (int_a, int_b). ### Peak Output (for PEAK in [s1_a, s2_a, s1_b, s2_b]): - PEAK: The uncorrected area in pe of the peak - PEAK_center_time: The center_time in ns of the peak ### Interaction Output (for INT in [int_a, int_b]): - INT_x_pax: The x-position of this interaction (primary algorithm chosen by pax, currently TopPatternFit) - INT_y_pax: The y-position of this interaction - INT_z_pax: the z-position of this interaction - INT_z_observed: The z-position of this interaction without correction - INT_drift_time : The drift time of the interaction - INT_x_nn : The x-position of this interaction with NeutralNetwork Analysis - INT_y_nn : The y-position of this interaction with NeutralNetwork Analysis - INT_r_nn : The r-position of this interaction with NeutralNetwork Analysis ### Corrected Signal Output (for INT in [int_a, int_b]): # Data-driven 3D position correction - INT_r_correction_3d_nn : correction value for r position using NN - INT_r_3d_nn : The corrected interaction r coordinate using NN - INT_x_3d_nn : The corrected interaction x coordinate using NN - INT_y_3d_nn : The corrected interaction y coordinate using NN - INT_z_correction_3d_nn : correction value for z position using NN - INT_z_3d_nn : The corrected interaction z coordinate using NN # LCE correction on S1 using NN FDC xyz-corrected position - s1_INT_xyz_correction_nn_fdc_3d : correction value for s1 signals using the INT_nn position : /!\ Two way of doing things for the S1_b signal : - either used the int_a position to correct s1_b signal (since S1_a and S1_b are closed in time) : by default - either used the int_b position to correct s1_b signal (but most of the time the s2_b signal (and thus z position) is badly reconstructed ) - cS1_a and cS1_b : the corrected s1_a signal using int_a_3d_nn corrected position - cS1_b_int_b: the corrected s1_b signal using int_b_3d_nn corrected position ### DoubleScatter Specific Output: - ds_s1_b_n_distinct_channels: number of PMTs contributing to s1_b distinct from the PMTs that contributed to s1_a - ds_s1_dt : delay time between s1_a_center_time and s1_b_center_time - ds_second_s2: 1 if selected interactions have distinct s2s """ __version__ = '2.0' extra_branches = ['peaks.n_contributing_channels', 'peaks.center_time', 'peaks.s2_saturation_correction', 'interactions.s2_lifetime_correction', 'peaks.area_fraction_top', 'peaks.area', 'peaks.reconstructed_positions*', 'interactions.x', 'interactions.y', 'interactions.z', 'interactions.r_correction', 'interactions.z_correction', 'interactions.drift_time', 'start_time', 'peaks.hits*', 'interactions.s1_pattern_fit' ] extra_metadata = hax.config['corrections_definitions'] corrections_handler = CorrectionsHandler() def extract_data(self, event): result = dict() # If there are no interactions cannot do anything if not len(event.interactions): return result # shortcuts for pax classes peaks = event.peaks interactions = event.interactions # Select Interactions for DoubleScatter Event # assume one scatter is interactions[0] int_0 = 0 s1_0 = interactions[int_0].s1 s2_0 = interactions[int_0].s2 # find another scatter otherInts = [0, 0] for i, interaction in enumerate(interactions): if (interaction.s1 != s1_0 and interaction.s2 == s2_0 and otherInts[0] == 0): otherInts[0] = i elif (interaction.s1 != s1_0 and interaction.s2 != s2_0 and otherInts[1] == 0): otherInts[1] = i # Distinction b/w single and double s2 scatters # Cut events without second s1 if otherInts[1] != 0: s1_1 = interactions[otherInts[1]].s1 s2_1 = interactions[otherInts[1]].s2 int_1 = otherInts[1] ds_second_s2 = 1 elif otherInts[0] != 0: s1_1 = interactions[otherInts[0]].s1 s2_1 = interactions[otherInts[0]].s2 int_1 = otherInts[0] ds_second_s2 = 0 else: return dict() # order s1s/interactions by time if peaks[s1_0].center_time <= peaks[s1_1].center_time: s1_a = s1_0 s1_b = s1_1 s2_a = s2_0 s2_b = s2_1 int_a = int_0 int_b = int_1 else: s1_a = s1_1 s1_b = s1_0 s2_a = s2_1 s2_b = s2_0 int_a = int_1 int_b = int_0 # Additional s1s and s2s removed! see v0.1.0 result['s1_a'] = peaks[s1_a].area result['s1_a_center_time'] = peaks[s1_a].center_time result['s1_a_area_fraction_top'] = peaks[s1_a].area_fraction_top result['s2_a'] = peaks[s2_a].area result['s2_a_center_time'] = peaks[s2_a].center_time result['s2_a_bottom'] = (1.0 - peaks[s2_a].area_fraction_top) * peaks[s2_a].area result['s2_a_area_fraction_top'] = peaks[s2_a].area_fraction_top result['s1_b'] = peaks[s1_b].area result['s1_b_center_time'] = peaks[s1_b].center_time result['s1_b_area_fraction_top'] = peaks[s1_b].area_fraction_top result['s2_b'] = peaks[s2_b].area result['s2_b_center_time'] = peaks[s2_b].center_time result['s2_b_bottom'] = (1.0 - peaks[s2_b].area_fraction_top) * peaks[s2_b].area result['s2_b_area_fraction_top'] = peaks[s2_b].area_fraction_top result['ds_second_s2'] = ds_second_s2 # Drift Time result['int_a_drift_time'] = result['s2_a_center_time'] - result['s1_a_center_time'] result['int_b_drift_time'] = result['s2_b_center_time'] - result['s1_b_center_time'] # Pax position (TpF) result['int_a_x_pax'] = interactions[int_a].x result['int_a_y_pax'] = interactions[int_a].y result['int_a_z_pax'] = interactions[int_a].z result['int_b_x_pax'] = interactions[int_b].x result['int_b_y_pax'] = interactions[int_b].y result['int_b_z_pax'] = interactions[int_b].z # Compute DoubleScatter Specific Variables # Select largest hits on each channel in s10 and s11 peaks s1_a_hitChannels = [] s1_a_hitAreas = [] for hit in peaks[s1_a].hits: if hit.is_rejected: continue if hit.channel not in s1_a_hitChannels: s1_a_hitChannels.append(hit.channel) s1_a_hitAreas.append(hit.area) else: hitChannel_i = s1_a_hitChannels.index(hit.channel) if hit.area > s1_a_hitAreas[hitChannel_i]: s1_a_hitAreas[hitChannel_i] = hit.area s1_b_hitChannels = [] s1_b_hitAreas = [] for hit in peaks[s1_b].hits: if hit.is_rejected: continue if hit.channel not in s1_b_hitChannels: s1_b_hitChannels.append(hit.channel) s1_b_hitAreas.append(hit.area) else: hitChannel_i = s1_b_hitChannels.index(hit.channel) if hit.area > s1_b_hitAreas[hitChannel_i]: s1_b_hitAreas[hitChannel_i] = hit.area # count largest-hit channels in s1_b distinct from s1_a ds_s1_b_n_distinct_channels = 0 for i, channel in enumerate(s1_b_hitChannels): if channel not in s1_a_hitChannels: ds_s1_b_n_distinct_channels += 1 result['ds_s1_b_n_distinct_channels'] = ds_s1_b_n_distinct_channels result['ds_s1_dt'] = peaks[s1_b].center_time - peaks[s1_a].center_time # Need the observed ('uncorrected') position. # pax Interaction positions are corrected so lookup the # uncorrected positions in the ReconstructedPosition objects for rp in peaks[s2_a].reconstructed_positions: if rp.algorithm == 'PosRecNeuralNet': result['int_a_x_nn'] = rp.x result['int_a_y_nn'] = rp.y result['int_a_r_nn'] = np.sqrt(rp.x ** 2 + rp.y ** 2) int_a_x_observed = rp.x int_a_y_observed = rp.y for rp in peaks[s2_b].reconstructed_positions: if rp.algorithm == 'PosRecNeuralNet': result['int_b_x_nn'] = rp.x result['int_b_y_nn'] = rp.y result['int_b_r_nn'] = np.sqrt(rp.x ** 2 + rp.y ** 2) int_a_z = interactions[int_a].z - interactions[int_a].z_correction result['int_a_z_observed'] = int_a_z int_b_z = interactions[int_b].z - interactions[int_b].z_correction result['int_b_z_observed'] = int_b_z # Correct S2_a. No correction for S2_b because, S2_b is mostly backgroud events, or S2_b ==S2_a cvals = [int_a_x_observed, int_a_y_observed] result['s2_a_xy_correction_tot'] = (1.0 / self.corrections_handler.get_correction_from_map( "s2_xy_map", self.run_number, cvals)) result['s2_a_xy_correction_top'] = (1.0 / self.corrections_handler.get_correction_from_map( "s2_xy_map", self.run_number, cvals, map_name='map_top')) result['s2_a_xy_correction_bottom'] = (1.0 / self.corrections_handler.get_correction_from_map( "s2_xy_map", self.run_number, cvals, map_name='map_bottom')) # include electron lifetime correction result['s2_lifetime_correction'] = ( self.corrections_handler.get_electron_lifetime_correction( self.run_number, self.run_start, result['int_a_drift_time'], self.mc_data)) # Combine all the s2 corrections for S2_a s2_a_correction = (result['s2_lifetime_correction'] * result['s2_a_xy_correction_tot']) s2_a_top_correction = (result['s2_lifetime_correction'] * result['s2_a_xy_correction_top']) s2_a_bottom_correction = (result['s2_lifetime_correction'] * result['s2_a_xy_correction_bottom']) result['cs2_a'] = peaks[s2_a].area * s2_a_correction result['cs2_a_top'] = peaks[s2_a].area * peaks[s2_a].area_fraction_top * s2_a_top_correction result['cs2_a_bottom'] = peaks[s2_a].area * (1.0 - peaks[s2_a].area_fraction_top) * s2_a_bottom_correction # FDC: Apply the (new) 3D data driven FDC, using NN positions algo = 'nn' # Int_a Position cvals = [result['int_a_x_' + algo], result['int_a_y_' + algo], int_a_z] result['int_a_r_correction_3d_' + algo] = self.corrections_handler.get_correction_from_map( "fdc_3d", self.run_number, cvals) result['int_a_r_3d_' + algo] = result['int_a_r_' + algo] + result['int_a_r_correction_3d_' + algo] result['int_a_x_3d_' + algo] =\ result['int_a_x_' + algo] * (result['int_a_r_3d_' + algo] / result['int_a_r_' + algo]) result['int_a_y_3d_' + algo] =\ result['int_a_y_' + algo] * (result['int_a_r_3d_' + algo] / result['int_a_r_' + algo]) if abs(int_a_z) > abs(result['int_a_r_correction_3d_' + algo]): result['int_a_z_3d_' + algo] = -np.sqrt(int_a_z ** 2 - result['int_a_r_correction_3d_' + algo] ** 2) else: result['int_a_z_3d_' + algo] = int_a_z result['int_a_z_correction_3d_' + algo] = result['int_a_z_3d_' + algo] - int_a_z # Int_b Position cvals = [result['int_b_x_' + algo], result['int_b_y_' + algo], int_b_z] result['int_b_r_correction_3d_' + algo] = self.corrections_handler.get_correction_from_map( "fdc_3d", self.run_number, cvals) result['int_b_r_3d_' + algo] = result['int_b_r_' + algo] + result['int_b_r_correction_3d_' + algo] result['int_b_x_3d_' + algo] =\ result['int_b_x_' + algo] * (result['int_b_r_3d_' + algo] / result['int_b_r_' + algo]) result['int_b_y_3d_' + algo] =\ result['int_b_y_' + algo] * (result['int_b_r_3d_' + algo] / result['int_b_r_' + algo]) if abs(int_b_z) > abs(result['int_b_r_correction_3d_' + algo]): result['int_b_z_3d_' + algo] = -np.sqrt(int_b_z ** 2 - result['int_b_r_correction_3d_' + algo] ** 2) else: result['int_b_z_3d_' + algo] = int_b_z result['int_b_z_correction_3d_' + algo] = result['int_b_z_3d_' + algo] - int_b_z # Apply LCE (light collection efficiency correction to s1) cvals = [result['int_a_x_3d_nn'], result['int_a_y_3d_nn'], result['int_a_z_3d_nn']] # Old LCE (without field correction) result['s1_int_a_xyz_correction_nn_fdc_3d'] = ( 1 / self.corrections_handler.get_correction_from_map( "s1_lce_map_nn_fdc_3d", self.run_number, cvals) ) result['cs1_a_no_field_corr'] = peaks[s1_a].area * result['s1_int_a_xyz_correction_nn_fdc_3d'] result['cs1_b_no_field_corr'] = peaks[s1_b].area * result['s1_int_a_xyz_correction_nn_fdc_3d'] # Apply new corrected LCE (light collection efficiency correction to s1_a and s1_b, including field effects) result['s1_int_a_xyz_true_correction_nn_fdc_3d'] = ( 1 / self.corrections_handler.get_correction_from_map( "s1_corrected_lce_map_nn_fdc_3d", self.run_number, cvals) ) result['cs1_a'] = peaks[s1_a].area * result['s1_int_a_xyz_true_correction_nn_fdc_3d'] result['cs1_b'] = peaks[s1_b].area * result['s1_int_a_xyz_true_correction_nn_fdc_3d'] # Correction of S1_b using int_b possition cvals = [result['int_b_x_3d_nn'], result['int_b_y_3d_nn'], result['int_b_z_3d_nn']] # Old LCE (without field correction) result['s1_int_b_xyz_correction_nn_fdc_3d'] = ( 1 / self.corrections_handler.get_correction_from_map( "s1_lce_map_nn_fdc_3d", self.run_number, cvals) ) result['cs1_b_int_b_no_field_corr'] = peaks[s1_b].area * result['s1_int_b_xyz_correction_nn_fdc_3d'] # Apply new corrected LCE (light collection efficiency correction to s1_a and s1_b, including field effects) result['s1_int_b_xyz_true_correction_nn_fdc_3d'] = ( 1 / self.corrections_handler.get_correction_from_map( "s1_corrected_lce_map_nn_fdc_3d", self.run_number, cvals) ) result['cs1_b_int_b'] = peaks[s1_b].area * result['s1_int_b_xyz_true_correction_nn_fdc_3d'] return result
class Corrections(TreeMaker): """Applies high level corrections which are used in standard analyses. Provides: - Corrected S1 contains xyz-correction: - cs1: The corrected area in pe of the main interaction's S1 using NN 3D FDC - cs1_tpf_2dfdc: Same but for TPF 2D FDC - Corrected S2 contains xy-correction and electron lifetime: - cs2: The corrected area in pe of the main interaction's S2 - cs2_top: The corrected area in pe of the main interaction's S2 from the top array. - cs2_bottom: The corrected area in pe of the main interaction's S2 from the bottom array. - Observed positions, not corrected with FDC maps, for both NN and TPF: - r_observed_tpf: the observed interaction r coordinate (using TPF). - x_observed_tpf: the observed interaction x coordinate (using TPF). - y_observed_tpf: the observed interaction y coordinate (using TPF). - r_observed_nn: the observed interaction r coordinate (using NN). - x_observed_nn: the observed interaction x coordinate (using NN). - y_observed_nn: the observed interaction y coordinate (using NN). - z_observed: the observed interaction z coordinate (before the r, z correction). - Position correction (based on TPF, (old) 2D FDC map): - r: the corrected interaction r coordinate - x: the corrected interaction x coordinate - y: the corrected interaction y coordinate - z: the corrected interaction z coordinate - Data-driven 3D position correction (applied to both NN and TPF observed positions): - r_3d_nn: the corrected interaction r coordinate (using NN). - x_3d_nn: the corrected interaction x coordinate (using NN). - y_3d_nn: the corrected interaction y coordinate (using NN). - z_3d_nn: the corrected interaction z coordinate (using NN). - r_3d_tpf: the corrected interaction r coordinate (using TPF). - x_3d_tpf: the corrected interaction x coordinate (using TPF). - y_3d_tpf: the corrected interaction y coordinate (using TPF). - z_3d_tpf: the corrected interaction z coordinate (using TPF). - Correction values for 'un-doing' single corrections: - s1_xyz_correction_tpf_fdc_2d - s1_xyz_correction_nn_fdc_3d - s2_xy_correction_tot - s2_xy_correction_top - s2_xy_correction_bottom - s2_lifetime_correction - r_correction_3d_nn - r_correction_3d_tpf - r_correction_2d - z_correction_3d_nn - z_correction_3d_tpf - z_correction_2d Notes: - The cs2, cs2_top and cs2_bottom variables are corrected for electron lifetime and x, y dependence. """ __version__ = '1.8' extra_branches = [ 'peaks.s2_saturation_correction', 'interactions.s2_lifetime_correction', 'peaks.area_fraction_top', 'peaks.area', 'peaks.reconstructed_positions*', 'interactions.x', 'interactions.y', 'interactions.z', 'interactions.r_correction', 'interactions.z_correction', 'interactions.drift_time', 'start_time' ] extra_metadata = hax.config['corrections_definitions'] corrections_handler = CorrectionsHandler() def extract_data(self, event): result = dict() # If there are no interactions cannot do anything if not len(event.interactions): return result # Workaround for blinding cut. S2 area and largest_other_s2 needed. interaction = event.interactions[0] s2 = event.peaks[interaction.s2] s1 = event.peaks[interaction.s1] largest_other_indices = get_largest_indices( event.peaks, exclude_indices=(interaction.s1, interaction.s2)) largest_area_of_type = { ptype: event.peaks[i].area for ptype, i in largest_other_indices.items() } result['largest_other_s2'] = largest_area_of_type.get('s2', 0) result['s2'] = s2.area # Need the observed ('uncorrected') position. # pax Interaction positions are corrected so lookup the # uncorrected positions in the ReconstructedPosition objects for rp in s2.reconstructed_positions: if rp.algorithm == 'PosRecNeuralNet': result['x_observed_nn'] = rp.x result['y_observed_nn'] = rp.y result['r_observed_nn'] = np.sqrt(rp.x**2 + rp.y**2) if rp.algorithm == 'PosRecTopPatternFit': result['x_observed_tpf'] = rp.x result['y_observed_tpf'] = rp.y result['r_observed_tpf'] = np.sqrt(rp.x**2 + rp.y**2) r_observed = np.sqrt(rp.x**2 + rp.y**2) result['r_observed'] = r_observed x_observed = rp.x y_observed = rp.y z_observed = interaction.z - interaction.z_correction result['z_observed'] = z_observed # Correct S2 cvals = [x_observed, y_observed] result['s2_xy_correction_tot'] = ( 1.0 / self.corrections_handler.get_correction_from_map( "s2_xy_map", self.run_number, cvals)) result['s2_xy_correction_top'] = ( 1.0 / self.corrections_handler.get_correction_from_map( "s2_xy_map", self.run_number, cvals, map_name='map_top')) result['s2_xy_correction_bottom'] = ( 1.0 / self.corrections_handler.get_correction_from_map( "s2_xy_map", self.run_number, cvals, map_name='map_bottom')) # include electron lifetime correction if self.mc_data: wanted_electron_lifetime = self.corrections_handler.get_misc_correction( "mc_electron_lifetime_liquid", self.run_number) result['s2_lifetime_correction'] = np.exp( (interaction.drift_time / 1e3) / wanted_electron_lifetime) else: try: result['s2_lifetime_correction'] = ( self.corrections_handler.get_electron_lifetime_correction( self.run_start, interaction.drift_time)) except Exception as e: print(e) result['s2_lifetime_correction'] = 1. # Combine all the s2 corrections s2_correction = (result['s2_lifetime_correction'] * result['s2_xy_correction_tot']) s2_top_correction = (result['s2_lifetime_correction'] * result['s2_xy_correction_top']) s2_bottom_correction = (result['s2_lifetime_correction'] * result['s2_xy_correction_bottom']) result['cs2'] = s2.area * s2_correction result['cs2_top'] = s2.area * s2.area_fraction_top * s2_top_correction result['cs2_bottom'] = s2.area * ( 1.0 - s2.area_fraction_top) * s2_bottom_correction # FDC # Apply the (old) 2D FDC (field distortion correction to position) # Because we have different 2D correction maps for different runs we need # to reapply the 2D FDC here (if not we could simply take the Interaction positions # which have already the 2D FDC applied). result[ 'r_correction_2d'] = self.corrections_handler.get_correction_from_map( "fdc_2d", self.run_number, [r_observed, z_observed], map_name='to_true_r') result[ 'z_correction_2d'] = self.corrections_handler.get_correction_from_map( "fdc_2d", self.run_number, [r_observed, z_observed], map_name='to_true_z') result['r'] = r_observed + result['r_correction_2d'] result['x'] = (result['r'] / result['r_observed']) * x_observed result['y'] = (result['r'] / result['r_observed']) * y_observed result['z'] = z_observed + result['z_correction_2d'] # FDC # Apply the (new) 3D data driven FDC, using NN positions and TPF positions for algo in ['nn', 'tpf']: cvals = [ result['x_observed_' + algo], result['y_observed_' + algo], z_observed ] result['r_correction_3d_' + algo] = self.corrections_handler.get_correction_from_map( "fdc_3d", self.run_number, cvals) result['r_3d_' + algo] = result['r_observed_' + algo] + result['r_correction_3d_' + algo] result['x_3d_' + algo] =\ result['x_observed_' + algo] * (result['r_3d_' + algo] / result['r_observed_' + algo]) result['y_3d_' + algo] =\ result['y_observed_' + algo] * (result['r_3d_' + algo] / result['r_observed_' + algo]) if abs(z_observed) > abs(result['r_correction_3d_' + algo]): result['z_3d_' + algo] = -np.sqrt(z_observed**2 - result['r_correction_3d_' + algo]**2) else: result['z_3d_' + algo] = z_observed result['z_correction_3d_' + algo] = result['z_3d_' + algo] - z_observed # Apply LCE (light collection efficiency correction to s1) cvals = [result['x'], result['y'], result['z']] result['s1_xyz_correction_tpf_fdc_2d'] = ( 1 / self.corrections_handler.get_correction_from_map( "s1_lce_map_tpf_fdc_2d", self.run_number, cvals)) result[ 'cs1_tpf_2dfdc'] = s1.area * result['s1_xyz_correction_tpf_fdc_2d'] cvals = [result['x_3d_nn'], result['y_3d_nn'], result['z_3d_nn']] result['s1_xyz_correction_nn_fdc_3d'] = ( 1 / self.corrections_handler.get_correction_from_map( "s1_lce_map_nn_fdc_3d", self.run_number, cvals)) result['cs1'] = s1.area * result['s1_xyz_correction_nn_fdc_3d'] return result
class CorrectedDoubleS1Scatter(TreeMaker): """Applies high level corrections which are used in DoubleS1Scatter analyses. Be carefull, this treemaker was developed for Kr83m analysis. It will probably need modifications for other analysis. The search for double scatter events: made by Ted Berger double decays, afterpulses, and anything else that gets in our way if you have any questions contact Ted Berger ([email protected]) The search proceeds as follows: * interaction[0] (int_0) provides s1_0 and s2_0 * find additional interaction (int_1) to provide s1_1 and s2_1 - loop through interactions (interactions store s1/s2 pairs in descending size order, s2s on fast loop) Choice A) select first interaction with s1 != s1_0 AND s2 != s2_0 Choice B) if Choice A doesn't exist, select first interaction with s1 != s1_0 AND s2 == s2_0 Choice C) if Choice A and B don't exist ignore, this isn't a double scatter event * int_0 and int_1 ordered by s1.center_time to int_a and int_b (int_a has s1 that happened first) The output provides the following variables attributed to specific peaks (s1_a, s2_a, s1_b, s2_b), as well as specific interactions (int_a, int_b). ### Peak Output (for PEAK in [s1_a, s2_a, s1_b, s2_b]): - PEAK: The uncorrected area in pe of the peak - PEAK_center_time: The center_time in ns of the peak ### DoubleScatter Specific Output: - ds_s1_b_n_distinct_channels: number of PMTs contributing to s1_b distinct from the PMTs that contributed to s1_a - ds_s1_dt : delay time between s1_a_center_time and s1_b_center_time - ds_second_s2: 1 if selected interactions have distinct s2s ### Position Corrections : Same as corrections minitree but for int_a and int_b position ### Signal corrections: Same as corrections minitree using int_a positions for position dependant corrections """ __version__ = '2.1' extra_branches = [ 'peaks.n_contributing_channels', 'peaks.center_time', 'peaks.area_per_channel[260]', 'peaks.s2_saturation_correction', 'interactions.s2_lifetime_correction', 'peaks.area_fraction_top', 'peaks.area', 'peaks.reconstructed_positions*', 'interactions.x', 'interactions.y', 'interactions.z', 'interactions.r_correction', 'interactions.z_correction', 'interactions.drift_time', 'start_time', 'peaks.hits*', 'interactions.s1_pattern_fit' ] def __init__(self): hax.minitrees.TreeMaker.__init__(self) self.extra_metadata = hax.config['corrections_definitions'] self.corrections_handler = CorrectionsHandler() self.tfnn_posrec = tfnn_position_reconstruction() def extract_data(self, event): result = dict() # If there are no interactions cannot do anything if not len(event.interactions): return result # shortcuts for pax classes peaks = event.peaks interactions = event.interactions # Select Interactions for DoubleScatter Event # assume one scatter is interactions[0] int_0 = 0 s1_0 = interactions[int_0].s1 s2_0 = interactions[int_0].s2 # find another scatter otherInts = [0, 0] for i, interaction in enumerate(interactions): if (interaction.s1 != s1_0 and interaction.s2 == s2_0 and otherInts[0] == 0): otherInts[0] = i elif (interaction.s1 != s1_0 and interaction.s2 != s2_0 and otherInts[1] == 0): otherInts[1] = i # Distinction b/w single and double s2 scatters # Cut events without second s1 if otherInts[1] != 0: s1_1 = interactions[otherInts[1]].s1 s2_1 = interactions[otherInts[1]].s2 int_1 = otherInts[1] ds_second_s2 = 1 elif otherInts[0] != 0: s1_1 = interactions[otherInts[0]].s1 s2_1 = interactions[otherInts[0]].s2 int_1 = otherInts[0] ds_second_s2 = 0 else: return dict() # order s1s/interactions by time if peaks[s1_0].center_time <= peaks[s1_1].center_time: s1_a = s1_0 s1_b = s1_1 s2_a = s2_0 s2_b = s2_1 int_a = int_0 int_b = int_1 else: s1_a = s1_1 s1_b = s1_0 s2_a = s2_1 s2_b = s2_0 int_a = int_1 int_b = int_0 # Additional s1s and s2s removed! see v0.1.0 result['s1_a'] = peaks[s1_a].area result['s1_a_center_time'] = peaks[s1_a].center_time result['s1_a_area_fraction_top'] = peaks[s1_a].area_fraction_top result['s1_a_range_50p_area'] = peaks[s1_a].range_area_decile[5] result['s2_a'] = peaks[s2_a].area result['s2_a_center_time'] = peaks[s2_a].center_time result['s2_a_bottom'] = ( 1.0 - peaks[s2_a].area_fraction_top) * peaks[s2_a].area result['s2_a_area_fraction_top'] = peaks[s2_a].area_fraction_top result['s2_a_range_50p_area'] = peaks[s2_a].range_area_decile[5] result['s1_b'] = peaks[s1_b].area result['s1_b_center_time'] = peaks[s1_b].center_time result['s1_b_area_fraction_top'] = peaks[s1_b].area_fraction_top result['s1_b_range_50p_area'] = peaks[s1_b].range_area_decile[5] result['s2_b'] = peaks[s2_b].area result['s2_b_center_time'] = peaks[s2_b].center_time result['s2_b_bottom'] = ( 1.0 - peaks[s2_b].area_fraction_top) * peaks[s2_b].area result['s2_b_area_fraction_top'] = peaks[s2_b].area_fraction_top result['s2_b_range_50p_area'] = peaks[s2_b].range_area_decile[5] result['ds_second_s2'] = ds_second_s2 # Drift Time result['int_a_drift_time'] = result['s2_a_center_time'] - result[ 's1_a_center_time'] result['int_b_drift_time'] = result['s2_b_center_time'] - result[ 's1_b_center_time'] # Compute DoubleScatter Specific Variables # Select largest hits on each channel in s10 and s11 peaks s1_a_hitChannels = [] s1_a_hitAreas = [] for hit in peaks[s1_a].hits: if hit.is_rejected: continue if hit.channel not in s1_a_hitChannels: s1_a_hitChannels.append(hit.channel) s1_a_hitAreas.append(hit.area) else: hitChannel_i = s1_a_hitChannels.index(hit.channel) if hit.area > s1_a_hitAreas[hitChannel_i]: s1_a_hitAreas[hitChannel_i] = hit.area s1_b_hitChannels = [] s1_b_hitAreas = [] for hit in peaks[s1_b].hits: if hit.is_rejected: continue if hit.channel not in s1_b_hitChannels: s1_b_hitChannels.append(hit.channel) s1_b_hitAreas.append(hit.area) else: hitChannel_i = s1_b_hitChannels.index(hit.channel) if hit.area > s1_b_hitAreas[hitChannel_i]: s1_b_hitAreas[hitChannel_i] = hit.area # count largest-hit channels in s1_b distinct from s1_a ds_s1_b_n_distinct_channels = 0 for i, channel in enumerate(s1_b_hitChannels): if channel not in s1_a_hitChannels: ds_s1_b_n_distinct_channels += 1 result['ds_s1_b_n_distinct_channels'] = ds_s1_b_n_distinct_channels result['ds_s1_dt'] = peaks[s1_b].center_time - peaks[s1_a].center_time # Need the observed ('uncorrected') position. # pax Interaction positions are corrected so lookup the # uncorrected positions in the ReconstructedPosition objects for rp in peaks[s2_a].reconstructed_positions: if rp.algorithm == 'PosRecNeuralNet': result['int_a_x_observed_nn'] = rp.x result['int_a_y_observed_nn'] = rp.y result['int_a_r_observed_nn'] = np.sqrt(rp.x**2 + rp.y**2) elif rp.algorithm == 'PosRecTopPatternFit': result['int_a_x_observed_tpf'] = rp.x result['int_a_y_observed_tpf'] = rp.y result['int_a_r_observed_tpf'] = np.sqrt(rp.x**2 + rp.y**2) for rp in peaks[s2_b].reconstructed_positions: if rp.algorithm == 'PosRecNeuralNet': result['int_b_x_observed_nn'] = rp.x result['int_b_y_observed_nn'] = rp.y result['int_b_r_observed_nn'] = np.sqrt(rp.x**2 + rp.y**2) elif rp.algorithm == 'PosRecTopPatternFit': result['int_b_x_observed_tpf'] = rp.x result['int_b_y_observed_tpf'] = rp.y result['int_b_r_observed_tpf'] = np.sqrt(rp.x**2 + rp.y**2) int_a_z = interactions[int_a].z - interactions[int_a].z_correction result['int_a_z_observed'] = int_a_z int_b_z = interactions[int_b].z - interactions[int_b].z_correction result['int_b_z_observed'] = int_b_z int_signal = ['int_a_', 'int_b_'] for int_s in int_signal: # Position reconstruction based on NN from TensorFlow # First Check for MC data, and avoid Tensor Flow if MC. if not self.mc_data: # Temporary for OSG production # Calculate TF_NN reconstructed position predicted_xy_tensorflow = self.tfnn_posrec( list(peaks[s2_a].area_per_channel), self.run_number) result[int_s + 'x_observed_nn_tf'] = predicted_xy_tensorflow[0, 0] / 10. result[int_s + 'y_observed_nn_tf'] = predicted_xy_tensorflow[0, 1] / 10. result[int_s+'r_observed_nn_tf'] =\ np.sqrt(result[int_s+'x_observed_nn_tf']**2 + result[int_s+'y_observed_nn_tf']**2) # 3D FDC NN_TF algo = 'nn_tf' cvals = [ result[int_s + 'x_observed_' + algo], result[int_s + 'y_observed_' + algo], result[int_s + 'z_observed'] ] result[ int_s + 'r_correction_3d_' + algo] = self.corrections_handler.get_correction_from_map( "fdc_3d_tfnn", self.run_number, cvals) result[int_s + 'r_3d_' + algo] = (result[int_s + 'r_observed_' + algo] + result[int_s + 'r_correction_3d_' + algo]) result[int_s + 'x_3d_' + algo] = (result[int_s + 'x_observed_' + algo] * (result[int_s + 'r_3d_' + algo] / result[int_s + 'r_observed_' + algo])) result[int_s + 'y_3d_' + algo] = (result[int_s + 'y_observed_' + algo] * (result[int_s + 'r_3d_' + algo] / result[int_s + 'r_observed_' + algo])) if abs(result[int_s + 'z_observed']) > abs( result[int_s + 'r_correction_3d_' + algo]): result[int_s + 'z_3d_' + algo] = -np.sqrt(result[int_s + 'z_observed']**2 - result[int_s + 'r_correction_3d_' + algo]**2) else: result[int_s + 'z_3d_' + algo] = result[int_s + 'z_observed'] result[int_s + 'z_correction_3d_' + algo] = result[int_s + 'z_3d_' + algo] - result[int_s + 'z_observed'] # Apply the 3D data driven NN_FDC, for NN positions and TPF positions for algo in ['nn', 'tpf']: cvals = [ result[int_s + 'x_observed_' + algo], result[int_s + 'y_observed_' + algo], result[int_s + 'z_observed'] ] result[ int_s + 'r_correction_3d_' + algo] = self.corrections_handler.get_correction_from_map( "fdc_3d", self.run_number, cvals) result[int_s+'r_3d_' + algo] =\ result[int_s+'r_observed_' + algo] + result[int_s+'r_correction_3d_' + algo] result[int_s+'x_3d_' + algo] = result[int_s+'x_observed_' + algo] \ * (result[int_s+'r_3d_' + algo] / result[int_s+'r_observed_' + algo]) result[int_s+'y_3d_' + algo] = result[int_s+'y_observed_' + algo] \ * (result[int_s+'r_3d_' + algo] / result[int_s+'r_observed_' + algo]) if abs(result[int_s + 'z_observed']) > abs( result[int_s + 'r_correction_3d_' + algo]): result[int_s + 'z_3d_' + algo] = -np.sqrt(result[int_s + 'z_observed']**2 - result[int_s + 'r_correction_3d_' + algo]**2) else: result[int_s + 'z_3d_' + algo] = result[int_s + 'z_observed'] result[int_s + 'z_correction_3d_' + algo] = result[int_s + 'z_3d_' + algo] - result[int_s + 'z_observed'] # include electron lifetime correction result['s2_lifetime_correction'] = ( self.corrections_handler.get_electron_lifetime_correction( self.run_number, self.run_start, result['int_a_drift_time'], self.mc_data)) # Correction only with int_a int_s_default = 'int_a_' for algo in ['nn_tf', 'nn', 'tpf']: # Correct S2 result[int_s_default + 'r_observed_' + algo] = np.sqrt( result[int_s_default + 'x_observed_' + algo]**2 + result[int_s_default + 'y_observed_' + algo]**2) cvals = [ result[int_s_default + 'x_observed_' + algo], result[int_s_default + 'y_observed_' + algo] ] result[int_s_default+'s2_xy_correction_tot_' + algo] = \ (1.0 / self.corrections_handler.get_correction_from_map("s2_xy_map", self.run_number, cvals)) result[int_s_default + 's2_xy_correction_top_' + algo] = ( 1.0 / self.corrections_handler.get_correction_from_map( "s2_xy_map", self.run_number, cvals, map_name='map_top')) result[int_s_default + 's2_xy_correction_bottom_' + algo] = ( 1.0 / self.corrections_handler.get_correction_from_map( "s2_xy_map", self.run_number, cvals, map_name='map_bottom')) # Combine all the s2 corrections result['cs2_a_' + algo] = peaks[s2_a].area * result['s2_lifetime_correction'] \ * result[int_s_default+'s2_xy_correction_tot_' + algo] result['cs2_a_top_' + algo] = \ peaks[s2_a].area * peaks[s2_a].area_fraction_top * \ result['s2_lifetime_correction'] * result[int_s_default+'s2_xy_correction_top_' + algo] result['cs2_a_bottom_' + algo] = \ peaks[s2_a].area * (1.0 - peaks[s2_a].area_fraction_top) * \ result['s2_lifetime_correction'] * result[int_s_default+'s2_xy_correction_bottom_' + algo] # Correct S1_a cvals = [ result[int_s_default + 'x_3d_' + algo], result[int_s_default + 'y_3d_' + algo], result[int_s_default + 'z_3d_' + algo] ] result[int_s_default+'s1_xyz_correction_fdc_3d_' + algo] = \ (1 / self.corrections_handler.get_correction_from_map( "s1_lce_map_nn_fdc_3d", self.run_number, cvals)) result['cs1_a_no_field_corr_' + algo] = peaks[s1_a].area * \ result[int_s_default+'s1_xyz_correction_fdc_3d_' + algo] # Apply corrected LCE (light collection efficiency correction to s1, including field effects) result[int_s_default+'s1_xyz_true_correction_fdc_3d' + algo] = \ (1 / self.corrections_handler.get_correction_from_map( "s1_corrected_lce_map_nn_fdc_3d", self.run_number, cvals)) result['cs1_a_' + algo] = peaks[s1_a].area * result[ int_s_default + 's1_xyz_true_correction_fdc_3d' + algo] # Correct S1_b result[int_s_default+'s1_xyz_correction_fdc_3d_' + algo] = \ (1 / self.corrections_handler.get_correction_from_map( "s1_lce_map_nn_fdc_3d", self.run_number, cvals)) result['cs1_b_no_field_corr_' + algo] = peaks[s1_b].area * \ result[int_s_default+'s1_xyz_correction_fdc_3d_' + algo] # Apply corrected LCE (light collection efficiency correction to s1, including field effects) result[int_s_default+'s1_xyz_true_correction_fdc_3d' + algo] = \ (1 / self.corrections_handler.get_correction_from_map( "s1_corrected_lce_map_nn_fdc_3d", self.run_number, cvals)) result['cs1_b_' + algo] = peaks[s1_b].area * result[ int_s_default + 's1_xyz_true_correction_fdc_3d' + algo] # default cS1 and cS2 values default_algo = 'nn_tf' result['cs1_a'] = result['cs1_a_' + default_algo] result['cs1_b'] = result['cs1_b_' + default_algo] result['cs2_a'] = result['cs2_a_' + default_algo] result['cs2_a_top'] = result['cs2_a_top_' + default_algo] result['cs2_a_bottom'] = result['cs2_a_bottom_' + default_algo] return result
def __init__(self): hax.minitrees.TreeMaker.__init__(self) self.extra_metadata = hax.config['corrections_definitions'] self.corrections_handler = CorrectionsHandler() self.tfnn_posrec = tfnn_position_reconstruction()