Esempio n. 1
0
    def __init__(self):

        hax.minitrees.TreeMaker.__init__(self)
        self.extra_metadata = hax.config['corrections_definitions']
        self.corrections_handler = CorrectionsHandler()

        # We need to pull some stuff from the pax config
        self.pax_config = load_configuration("XENON1T")
        self.tpc_channels = self.pax_config['DEFAULT']['channels_in_detector']['tpc']
        self.confused_s1_channels = []
        self.s1_statistic = (
            self.pax_config['BuildInteractions.BasicInteractionProperties']['s1_pattern_statistic']
        )
        qes = np.array(self.pax_config['DEFAULT']['quantum_efficiencies'])

        self.s1_pattern_fitter = PatternFitter(
            filename=utils.data_file_name(self.pax_config['WaveformSimulator']['s1_patterns_file']),
            zoom_factor=self.pax_config['WaveformSimulator'].get('s1_patterns_zoom_factor', 1),
            adjust_to_qe=qes[self.tpc_channels],
            default_errors=(self.pax_config['DEFAULT']['relative_qe_error'] +
                            self.pax_config['DEFAULT']['relative_gain_error'])
        )

        self.top_channels = self.pax_config['DEFAULT']['channels_top']
        self.ntop_pmts = len(self.top_channels)

        # Declare nn stuff
        self.tfnn_weights = None
        self.tfnn_model = None
        self.loaded_nn = None

        # Run doc
        self.loaded_run_doc = None
        self.run_doc = None
Esempio n. 2
0
class SimpleModel():
    """Implements the simple forward model for ABC project.
    
       The forward model used here is the most basic test case.
       It draws from the per-PMT S2 LCE maps to provide a
       hitpattern for a given x,y. Assumes all top PMTs live.
       Also the total number of detected photo-electrons needs to
       be specified, this is set constant by default.
    """
    def __init__(self, zoom_multiplier=1):
        # Get some settings from the XENON1T detector configuration
        config = load_configuration('XENON1T')

        # The per-PMT S2 LCE maps (and zoom factor which is a technical detail)
        lce_maps = config['WaveformSimulator']['s2_patterns_file']
        lce_map_zoom = config['WaveformSimulator']['s2_patterns_zoom_factor']

        # Simulate the right PMT response
        qes = np.array(config['DEFAULT']['quantum_efficiencies'])
        top_pmts = config['DEFAULT']['channels_top']
        errors = config['DEFAULT']['relative_qe_error'] + config['DEFAULT'][
            'relative_gain_error']

        # Set up the PatternFitter which sample the LCE maps
        self.pf = PatternFitter(filename=utils.data_file_name(lce_maps),
                                zoom_factor=lce_map_zoom * zoom_multiplier,
                                adjust_to_qe=qes[top_pmts],
                                default_errors=errors)

    def __call__(self, x, y, n_obs=500, batch_size=1, random_state=None):
        """Returns a hitpattern of n_obs photo-electrons
           for given x, y position.
        """

        return n_obs * self.pf.expected_pattern((x, y))
Esempio n. 3
0
    def __init__(self, zoom_multiplier=1):
        # Get some settings from the XENON1T detector configuration
        config = load_configuration('XENON1T')

        # The per-PMT S2 LCE maps (and zoom factor which is a technical detail)
        lce_maps = config['WaveformSimulator']['s2_patterns_file']
        lce_map_zoom = config['WaveformSimulator']['s2_patterns_zoom_factor']

        # Simulate the right PMT response
        qes = np.array(config['DEFAULT']['quantum_efficiencies'])
        top_pmts = config['DEFAULT']['channels_top']
        errors = config['DEFAULT']['relative_qe_error'] + config['DEFAULT'][
            'relative_gain_error']

        # Set up the PatternFitter which sample the LCE maps
        self.pf = PatternFitter(filename=utils.data_file_name(lce_maps),
                                zoom_factor=lce_map_zoom * zoom_multiplier,
                                adjust_to_qe=qes[top_pmts],
                                default_errors=errors)
Esempio n. 4
0
    def startup(self):
        # Call original startup function
        PosRecTopPatternFit.startup(self)

        # Get the Fax config
        c = self.processor.simulator.config
        qes = np.array(c['quantum_efficiencies'])

        # Change the pattern fitter instance so it uses TPFF
        self.pf = PatternFitter(
            filename=utils.data_file_name(c['s2_fitted_patterns_file']),
            zoom_factor=c.get('s2_fitted_patterns_zoom_factor', 1),
            adjust_to_qe=qes[c['channels_top']],
            default_errors=c['relative_qe_error'] + c['relative_gain_error'])
Esempio n. 5
0
    def __init__(self, config_to_init):
        c = self.config = config_to_init

        # Should we repeat events?
        if 'event_repetitions' not in c:
            c['event_repetitions'] = 1

        # Primary excimer fraction from Nest Version 098
        # See G4S1Light.cc line 298
        density = c['liquid_density'] / (units.g / units.cm**3)
        excfrac = 0.4 - 0.11131 * density - 0.0026651 * density**2  # primary / secondary excimers
        excfrac = 1 / (1 + excfrac)  # primary / all excimers
        # primary / all excimers that produce a photon:
        excfrac /= 1 - (1 - excfrac) * (1 - c['s1_ER_recombination_fraction'])
        c['s1_ER_primary_excimer_fraction'] = excfrac
        log.debug('Inferred s1_ER_primary_excimer_fraction %s' % excfrac)

        # Recombination time from NEST 2014
        # 3.5 seems fishy, they fit an exponential to data, but in the code they use a non-exponential distribution...
        efield = (c['drift_field'] / (units.V / units.cm))
        c['s1_ER_recombination_time'] = 3.5 / 0.18 * (
            1 / 20 + 0.41) * math.exp(-0.009 * efield)
        log.debug('Inferred s1_ER_recombination_time %s ns' %
                  c['s1_ER_recombination_time'])

        # Which channels stand to receive any photons?
        channels_for_photons = c['channels_in_detector']['tpc']
        if c['pmt_0_is_fake']:
            channels_for_photons = [
                ch for ch in channels_for_photons if ch != 0
            ]
        if c.get('magically_avoid_dead_pmts', False):
            channels_for_photons = [
                ch for ch in channels_for_photons if c['gains'][ch] > 0
            ]
        if c.get('magically_avoid_s1_excluded_pmts', False) and \
           'channels_excluded_for_s1' in c:
            channels_for_photons = [
                ch for ch in channels_for_photons
                if ch not in c['channels_excluded_for_s1']
            ]
        c['channels_for_photons'] = channels_for_photons

        # Determine sensible length of a pmt pulse to simulate
        dt = c['sample_duration']
        if c['pe_pulse_model'] == 'exponential':
            c['samples_before_pulse_center'] = math.ceil(
                c['pulse_width_cutoff'] * c['pmt_rise_time'] / dt)
            c['samples_after_pulse_center'] = math.ceil(
                c['pulse_width_cutoff'] * c['pmt_fall_time'] / dt)
        else:
            # Build the custom PMT pulse model
            ts = np.array(c['pe_pulse_ts'])
            ys = np.array(c['pe_pulse_ys'])

            # Integrate and normalize it
            # Note we're storing the integrated pulse, while the user gives the regular pulse.
            c['pe_pulse_function'] = interp1d(ts,
                                              np.cumsum(ys) / np.sum(ys),
                                              bounds_error=False,
                                              fill_value=(0, 1))

        log.debug(
            'Simulating %s samples before and %s samples after PMT pulse centers.'
            % (c['samples_before_pulse_center'],
               c['samples_after_pulse_center']))

        # Load real noise data from file, if requested
        if c['real_noise_file']:
            self.noise_data = np.load(
                utils.data_file_name(c['real_noise_file']))['arr_0']
            # The silly XENON100 PMT offset again: it's relevant for indexing the array of noise data
            # (which is one row per channel)
            self.channel_offset = 1 if c['pmt_0_is_fake'] else 0

        # Load light yields
        self.s1_light_yield_map = InterpolatingMap(
            utils.data_file_name(c['s1_light_yield_map']))
        self.s2_light_yield_map = InterpolatingMap(
            utils.data_file_name(c['s2_light_yield_map']))

        # Load transverse field (r,z) distortion map
        if c.get('rz_position_distortion_map'):
            self.rz_position_distortion_map = InterpolatingMap(
                utils.data_file_name(c['rz_position_distortion_map']))
        else:
            self.rz_position_distortion_map = None

        # Init s2 per pmt lce map
        qes = np.array(c['quantum_efficiencies'])
        if c.get('s2_patterns_file', None) is not None:
            self.s2_patterns = PatternFitter(
                filename=utils.data_file_name(c['s2_patterns_file']),
                zoom_factor=c.get('s2_patterns_zoom_factor', 1),
                adjust_to_qe=qes[c['channels_top']],
                default_errors=c['relative_qe_error'] +
                c['relative_gain_error'])
        else:
            self.s2_patterns = None

        ##
        # Load pdf for single photoelectron, if available
        ##
        if c.get('photon_area_distribution'):
            # Extract the spe pdf from a csv file into a pandas dataframe
            spe_shapes = pd.read_csv(
                utils.data_file_name(c['photon_area_distribution']))

            # Create a converter array from uniform random numbers to SPE gains (one interpolator per channel)
            # Scale the distributions so that they have an SPE mean of 1 and then calculate the cdf
            # We have set the distribution of the off channels to be explicitly 0 as a precaution
            # as of now these channels are
            # 1, 2, 12, 26, 34, 62, 65, 79, 86, 88, 102, 118, 130, 134, 135, 139,
            # 148, 150, 152, 162, 178, 183, 190, 198, 206, 213, 214, 234, 239, 244

            uniform_to_pe_arr = []
            for ch in spe_shapes.columns[
                    1:]:  # skip the first element which is the 'charge' header
                if spe_shapes[ch].sum() > 0:
                    mean_spe = (spe_shapes['charge'] *
                                spe_shapes[ch]).sum() / spe_shapes[ch].sum()
                    scaled_bins = spe_shapes['charge'] / mean_spe
                    cdf = np.cumsum(spe_shapes[ch]) / np.sum(spe_shapes[ch])
                else:
                    # if sum is 0, just make some dummy axes to pass to interpolator
                    cdf = np.linspace(0, 1, 10)
                    scaled_bins = np.zeros_like(cdf)

                uniform_to_pe_arr.append(interp1d(cdf, scaled_bins))
            if uniform_to_pe_arr != []:
                self.uniform_to_pe_arr = np.array(uniform_to_pe_arr)
            else:
                self.uniform_to_pe_arr = None

        else:
            self.uniform_to_pe_arr = None

        # Init s1 pattern maps
        # We're assuming the map is MC-derived, so we adjust for QE (just like for the S2 maps)
        log.debug("Initializing s1 patterns...")
        if c.get('s1_patterns_file', None) is not None:
            self.s1_patterns = PatternFitter(
                filename=utils.data_file_name(c['s1_patterns_file']),
                zoom_factor=c.get('s1_patterns_zoom_factor', 1),
                adjust_to_qe=qes[c['channels_in_detector']['tpc']],
                default_errors=c['relative_qe_error'] +
                c['relative_gain_error'])
        else:
            self.s1_patterns = None

        ##
        # Luminescence time distribution precomputation
        ##

        # For which gas gaps do we have to compute the luminescence time distribution?
        gas_gap_warping_map = c.get('gas_gap_warping_map', None)
        base_dg = c['elr_gas_gap_length']
        if gas_gap_warping_map is not None:
            with open(utils.data_file_name(gas_gap_warping_map),
                      mode='rb') as infile:
                mh = pickle.load(infile)
            self.gas_gap_length = lambda x, y: base_dg + mh.lookup([x], [y]
                                                                   ).item()
            self.luminescence_converters_dgs = np.linspace(
                mh.histogram.min(), mh.histogram.max(),
                c.get('n_luminescence_time_converters', 20)) + base_dg
        else:
            self.gas_gap_length = lambda x, y: base_dg
            self.luminescence_converters_dgs = np.array([base_dg])

        self.luminescence_converters = []

        # Calculate particle number density in the gas (ideal gas law)
        number_density_gas = c['pressure'] / (units.boltzmannConstant *
                                              c['temperature'])

        # Slope of the drift velocity vs field relation
        alpha = c['gas_drift_velocity_slope'] / number_density_gas

        @np.vectorize
        def yield_per_dr(E):
            # Gives something proportional to the yield, not the yield itself!
            y = E / (units.kV / units.cm) - 0.8 * c['pressure'] / units.bar
            return max(y, 0)

        rA = c['anode_field_domination_distance']
        rW = c['anode_wire_radius']

        for dg in self.luminescence_converters_dgs:
            dl = c['gate_to_anode_distance'] - dg
            rL = dg

            # Voltage over the gas gap
            V = c['anode_voltage'] / (
                1 + dl / dg / c['lxe_dielectric_constant']
            )  # From eq1 in se note, * dg

            # Field in the gas gap. r is distance from anode center: start at r=rL
            E0 = V / (rL - rA + rA * (np.log(rA) - np.log(rW)))

            @np.vectorize
            def Er(r):
                if r < rW:
                    return 0
                elif rW <= r < rA:
                    return E0 * rA / r
                else:
                    return E0

            # Small numeric calculation to get emission time cdf
            R = np.linspace(rL, rW, 1000)
            E = Er(R)
            RDOT = alpha * E
            T = np.cumsum(-np.diff(R)[0] / RDOT)  # dt = dx / v
            yield_density = yield_per_dr(
                E) * RDOT  # density/dt = density/dx * dx/dt
            yield_density /= yield_density.sum()

            # Invert CDF using interpolator
            uniform_to_emission_time = interp1d(np.cumsum(yield_density),
                                                T,
                                                fill_value=0,
                                                bounds_error=False)

            self.luminescence_converters.append(uniform_to_emission_time)

        self.clear_signals_queue()
Esempio n. 6
0
class PositionReconstruction(TreeMaker):
    """Stores position-reconstruction-related variables.

    Provides:
       - s1_pattern_fit_hax: S1 pattern likelihood computed with corrected
                             position and areas
       - s1_pattern_fit_hits_hax: S1 pattern likelihood computed with corrected
                                  position and hits
       - s1_pattern_fit_bottom_hax: S1 pattern likelihood computed with corrected
                                    position and bottom array area
       - s1_pattern_fit_bottom_hits_hax: S1 pattern likelihood computed with corrected
                                         position and bottom array hits

       - s1_area_fraction_top_probability_hax: S1 AFT p-value computed with corrected position
       - s1_area_fraction_top_probability_nothresh: computed using area below S1=10 (instead of hits)
       - s1_area_fraction_top_binomial: Binomial probability for given S1 AFT
       - s1_area_fraction_top_binomial_nothresh: Same except using area below S1=10

       - x_observed_nn_tf: TensorFlow NN reconstructed x position
       - y_observed_nn_tf: TensorFlow NN reconstructed y position
       - r_observed_nn_tf: TensorFlow NN reconstructed r position

       - r_3d_nn_tf: the corrected interaction r coordinate (data-driven 3d fdc)
       - x_3d_nn_tf: the corrected interaction x coordinate (data-driven 3d fdc)
       - y_3d_nn_tf: the corrected interaction y coordinate (data-driven 3d fdc)
       - z_3d_nn_tf: the corrected interaction z coordinate (data-driven 3d fdc)
       - r_correction_3d_nn_tf: r_3d_nn_tf - r_observed_nn_tf
       - z_correction_3d_nn_tf: z_3d_nn_tf - z_observed

       - s1_area_upper_injection_fraction: s1 area fraction near Rn220 injection points (near PMT 131)
       - s1_area_lower_injection_fraction: s1 area fraction near Rn220 injection points (near PMT 243)

       - s2_pattern_fit_nn: s2 pattern fit using nn position
    """
    __version__ = '1.1'
    extra_branches = ['peaks.area_per_channel[260]',
                      'peaks.hits_per_channel[260]',
                      'peaks.n_saturated_per_channel[260]',
                      'peaks.n_hits', 'peaks.hits_fraction_top', 'peaks.reconstructed_positions',
                      'interactions.x', 'interactions.y', 'interactions.z']

    def __init__(self):

        hax.minitrees.TreeMaker.__init__(self)
        self.extra_metadata = hax.config['corrections_definitions']
        self.corrections_handler = CorrectionsHandler()

        # We need to pull some stuff from the pax config
        self.pax_config = load_configuration("XENON1T")
        self.tpc_channels = self.pax_config['DEFAULT']['channels_in_detector']['tpc']
        self.confused_s1_channels = []
        self.s1_statistic = (
            self.pax_config['BuildInteractions.BasicInteractionProperties']['s1_pattern_statistic']
        )
        qes = np.array(self.pax_config['DEFAULT']['quantum_efficiencies'])

        self.s1_pattern_fitter = PatternFitter(
            filename=utils.data_file_name(self.pax_config['WaveformSimulator']['s1_patterns_file']),
            zoom_factor=self.pax_config['WaveformSimulator'].get('s1_patterns_zoom_factor', 1),
            adjust_to_qe=qes[self.tpc_channels],
            default_errors=(self.pax_config['DEFAULT']['relative_qe_error'] +
                            self.pax_config['DEFAULT']['relative_gain_error'])
        )

        self.top_channels = self.pax_config['DEFAULT']['channels_top']
        self.ntop_pmts = len(self.top_channels)

        # Declare nn stuff
        self.tfnn_weights = None
        self.tfnn_model = None
        self.loaded_nn = None

        # Run doc
        self.loaded_run_doc = None
        self.run_doc = None

    def load_nn(self):
        """For loading NN files"""
        from keras.models import model_from_json

        # If we already loaded it up then skip
        if ((self.tfnn_weights == self.corrections_handler.get_misc_correction(
                "tfnn_weights", self.run_number)) and
            (self.tfnn_model == self.corrections_handler.get_misc_correction(
                "tfnn_model", self.run_number))):
            return

        self.tfnn_weights = self.corrections_handler.get_misc_correction(
            "tfnn_weights", self.run_number)
        self.tfnn_model = self.corrections_handler.get_misc_correction(
            "tfnn_model", self.run_number)

        json_file_nn = open(utils.data_file_name(self.tfnn_model), 'r')
        loaded_model_json = json_file_nn.read()
        self.loaded_nn = model_from_json(loaded_model_json)
        json_file_nn.close()

        # Get bad PMT List in JSON file:
        json_file_nn = open(utils.data_file_name(self.tfnn_model), 'r')
        loaded_model_json_dict = json.load(json_file_nn)
        self.list_bad_pmts = loaded_model_json_dict['badPMTList']
        json_file_nn.close()

        weights_file = utils.data_file_name(self.tfnn_weights)
        self.loaded_nn.load_weights(weights_file)

    def get_data(self, dataset, event_list=None):
        # If we do switch to new NN later get rid of this stuff and directly use those positions!
        data, _ = hax.minitrees.load_single_dataset(dataset, ['Corrections', 'Fundamentals'])
        self.x = data.x_3d_nn.values
        self.y = data.y_3d_nn.values
        self.z = data.z_3d_nn.values

        self.indices = list(data.event_number.values)

        return hax.minitrees.TreeMaker.get_data(self, dataset, event_list)

    def load_run_doc(self, run):
        if run != self.loaded_run_doc:
            self.run_doc = get_run_info(run)
            self.loaded_run_doc = run

    def extract_data(self, event):

        event_data = {
            "s1_pattern_fit_hax": None,
            "s1_pattern_fit_hits_hax": None,
            "s1_pattern_fit_bottom_hax": None,
            "s1_pattern_fit_bottom_hits_hax": None,
            "s2_pattern_fit_nn": None,
            "s2_pattern_fit_tpf": None,
            "s1_area_fraction_top_probability_hax": None,
            "s1_area_fraction_top_probability_nothresh": None,
            "s1_area_fraction_top_binomial": None,
            "s1_area_fraction_top_binomial_nothresh": None,
            "x_observed_nn_tf": None,
            "y_observed_nn_tf": None,
            "r_observed_nn_tf": None,
            "x_3d_nn_tf": None,
            "y_3d_nn_tf": None,
            "r_3d_nn_tf": None,
            "z_3d_nn_tf": None,
            "r_correction_3d_nn_tf": None,
            "z_correction_3d_nn_tf": None,
            "s1_area_upper_injection_fraction": None,
            "s1_area_lower_injection_fraction": None,
            "s2_pattern_fit_nn": None,
            "s2_pattern_fit_tpf": None
        }

        # We first need the positions. This minitree is only valid when loading
        # Corrections since you need that to get the corrected positions
        if not len(event.interactions):
            return event_data

        event_num = event.event_number

        try:
            event_index = self.indices.index(event_num)
        except Exception:
            return event_data

        interaction = event.interactions[0]
        s1 = event.peaks[interaction.s1]
        s2 = event.peaks[interaction.s2]

        for rp in s2.reconstructed_positions:
            if rp.algorithm == "PosRecNeuralNet":
                event_data['s2_pattern_fit_nn'] = rp.goodness_of_fit
            elif rp.algorithm == "PosRecTopPatternFit":
                event_data['s2_pattern_fit_tpf'] = rp.goodness_of_fit

        # Position reconstruction based on NN from TensorFlow
        # First Check for MC data, and avoid Tensor Flow if MC.
        if not self.mc_data:  # Temporary for OSG production
            # Check that correct NN is loaded and change if not
            self.load_nn()

            s2apc = np.array(list(s2.area_per_channel))
            s2apc_clean = []

            for ipmt, s2_t in enumerate(s2apc):
                if ipmt not in self.list_bad_pmts and ipmt < self.ntop_pmts:
                    s2apc_clean.append(s2_t)

            s2apc_clean = np.asarray(s2apc_clean)
            s2apc_clean_norm = s2apc_clean / s2apc_clean.sum()
            s2apc_clean_norm = s2apc_clean_norm.reshape(1, len(s2apc_clean_norm))

            predicted_xy_tensorflow = self.loaded_nn.predict(s2apc_clean_norm)
            event_data['x_observed_nn_tf'] = predicted_xy_tensorflow[0, 0] / 10.
            event_data['y_observed_nn_tf'] = predicted_xy_tensorflow[0, 1] / 10.
            event_data['r_observed_nn_tf'] =\
                np.sqrt(event_data['x_observed_nn_tf']**2 + event_data['y_observed_nn_tf']**2)

            # 3D FDC
            algo = 'nn_tf'
            z_observed = interaction.z - interaction.z_correction
            cvals = [event_data['x_observed_' + algo], event_data['y_observed_' + algo], z_observed]
            event_data['r_correction_3d_' + algo] = self.corrections_handler.get_correction_from_map(
                "fdc_3d_tfnn", self.run_number, cvals)

            event_data['r_3d_' + algo] = (event_data['r_observed_' + algo] +
                                          event_data['r_correction_3d_' + algo])
            event_data['x_3d_' + algo] = (event_data['x_observed_' + algo] *
                                          (event_data['r_3d_' + algo] /
                                           event_data['r_observed_' + algo]))
            event_data['y_3d_' + algo] = (event_data['y_observed_' + algo] *
                                          (event_data['r_3d_' + algo] /
                                           event_data['r_observed_' + algo]))

            if abs(z_observed) > abs(event_data['r_correction_3d_' + algo]):
                event_data['z_3d_' + algo] = -np.sqrt(z_observed ** 2 -
                                                      event_data['r_correction_3d_' + algo] ** 2)
            else:
                event_data['z_3d_' + algo] = z_observed

            event_data['z_correction_3d_' + algo] = event_data['z_3d_' + algo] - z_observed

        # s1 area fraction near injection points for Rn220 source
        area_upper_injection = (s1.area_per_channel[131] + s1.area_per_channel[138] +
                                s1.area_per_channel[146] + s1.area_per_channel[147])
        area_lower_injection = (s1.area_per_channel[236] + s1.area_per_channel[237] +
                                s1.area_per_channel[243])

        event_data['s1_area_upper_injection_fraction'] = area_upper_injection / s1.area
        event_data['s1_area_lower_injection_fraction'] = area_lower_injection / s1.area

        # Want S1 AreaFractionTop Probability
        aft_prob = self.corrections_handler.get_correction_from_map(
            "s1_aft_map", self.run_number, [self.x[event_index], self.y[event_index], self.z[event_index]])

        aft_args = aft_prob, s1.area, s1.area_fraction_top, s1.n_hits, s1.hits_fraction_top

        event_data['s1_area_fraction_top_probability_hax'] = s1_area_fraction_top_probability(*aft_args)
        event_data['s1_area_fraction_top_binomial'] = s1_area_fraction_top_probability(*(aft_args + (10, 'pmf')))

        event_data['s1_area_fraction_top_probability_nothresh'] = s1_area_fraction_top_probability(*(aft_args + (0,)))
        event_data['s1_area_fraction_top_binomial_nothresh'] = s1_area_fraction_top_probability(*(aft_args + (0, 'pmf')))

        # Now do s1_pattern_fit
        apc = np.array(list(s1.area_per_channel))
        hpc = np.array(list(s1.hits_per_channel))

        # Get saturated channels
        confused_s1_channels = []
        self.load_run_doc(self.run_number)

        # The original s1 pattern calculation had a bug where dead PMTs were
        # included. They are not included here.
        for a, c in enumerate(self.run_doc['processor']['DEFAULT']['gains']):
            if c == 0:
                confused_s1_channels.append(a)
        for a, c in enumerate(s1.n_saturated_per_channel):
            if c > 0:
                confused_s1_channels.append(a)

        try:

            # Create PMT array of booleans for use in likelihood calculation
            is_pmt_in = np.ones(len(self.tpc_channels), dtype=bool)  # Default True
            is_pmt_in[confused_s1_channels] = False  # Ignore saturated channels

            event_data['s1_pattern_fit_hax'] = self.s1_pattern_fitter.compute_gof(
                (self.x[event_index], self.y[event_index], self.z[event_index]),
                apc[self.tpc_channels],
                pmt_selection=is_pmt_in,
                statistic=self.s1_statistic)

            event_data['s1_pattern_fit_hits_hax'] = self.s1_pattern_fitter.compute_gof(
                (self.x[event_index], self.y[event_index], self.z[event_index]),
                hpc[self.tpc_channels],
                pmt_selection=is_pmt_in,
                statistic=self.s1_statistic)

            # Switch to bottom PMTs only
            is_pmt_in[self.top_channels] = False

            event_data['s1_pattern_fit_bottom_hax'] = self.s1_pattern_fitter.compute_gof(
                (self.x[event_index], self.y[event_index], self.z[event_index]),
                apc[self.tpc_channels],
                pmt_selection=is_pmt_in,
                statistic=self.s1_statistic)

            event_data['s1_pattern_fit_bottom_hits_hax'] = self.s1_pattern_fitter.compute_gof(
                (self.x[event_index], self.y[event_index], self.z[event_index]),
                hpc[self.tpc_channels],
                pmt_selection=is_pmt_in,
                statistic=self.s1_statistic)

        except exceptions.CoordinateOutOfRangeException as _:
            # pax does this too. happens when event out of TPC (usually z)
            return event_data

        return event_data