Exemple #1
0
    def __init__(self):

        hax.minitrees.TreeMaker.__init__(self)
        self.extra_metadata = hax.config['corrections_definitions']
        self.corrections_handler = CorrectionsHandler()

        # We need to pull some stuff from the pax config
        self.pax_config = load_configuration("XENON1T")
        self.tpc_channels = self.pax_config['DEFAULT']['channels_in_detector']['tpc']
        self.confused_s1_channels = []
        self.s1_statistic = (
            self.pax_config['BuildInteractions.BasicInteractionProperties']['s1_pattern_statistic']
        )
        qes = np.array(self.pax_config['DEFAULT']['quantum_efficiencies'])

        self.s1_pattern_fitter = PatternFitter(
            filename=utils.data_file_name(self.pax_config['WaveformSimulator']['s1_patterns_file']),
            zoom_factor=self.pax_config['WaveformSimulator'].get('s1_patterns_zoom_factor', 1),
            adjust_to_qe=qes[self.tpc_channels],
            default_errors=(self.pax_config['DEFAULT']['relative_qe_error'] +
                            self.pax_config['DEFAULT']['relative_gain_error'])
        )

        self.top_channels = self.pax_config['DEFAULT']['channels_top']
        self.ntop_pmts = len(self.top_channels)

        # Declare nn stuff
        self.tfnn_weights = None
        self.tfnn_model = None
        self.loaded_nn = None

        # Run doc
        self.loaded_run_doc = None
        self.run_doc = None
Exemple #2
0
    def startup(self):
        # Call original startup function
        PosRecTopPatternFit.startup(self)

        # Get the Fax config
        c = self.processor.simulator.config
        qes = np.array(c['quantum_efficiencies'])

        # Change the pattern fitter instance so it uses TPFF
        self.pf = PatternFitter(
            filename=utils.data_file_name(c['s2_fitted_patterns_file']),
            zoom_factor=c.get('s2_fitted_patterns_zoom_factor', 1),
            adjust_to_qe=qes[c['channels_top']],
            default_errors=c['relative_qe_error'] + c['relative_gain_error'])
Exemple #3
0
    def __init__(self, zoom_multiplier=1):
        # Get some settings from the XENON1T detector configuration
        config = load_configuration('XENON1T')

        # The per-PMT S2 LCE maps (and zoom factor which is a technical detail)
        lce_maps = config['WaveformSimulator']['s2_patterns_file']
        lce_map_zoom = config['WaveformSimulator']['s2_patterns_zoom_factor']

        # Simulate the right PMT response
        qes = np.array(config['DEFAULT']['quantum_efficiencies'])
        top_pmts = config['DEFAULT']['channels_top']
        errors = config['DEFAULT']['relative_qe_error'] + config['DEFAULT'][
            'relative_gain_error']

        # Set up the PatternFitter which sample the LCE maps
        self.pf = PatternFitter(filename=utils.data_file_name(lce_maps),
                                zoom_factor=lce_map_zoom * zoom_multiplier,
                                adjust_to_qe=qes[top_pmts],
                                default_errors=errors)
Exemple #4
0
    def __init__(self, config_to_init):
        c = self.config = config_to_init

        # Should we repeat events?
        if 'event_repetitions' not in c:
            c['event_repetitions'] = 1

        # Primary excimer fraction from Nest Version 098
        # See G4S1Light.cc line 298
        density = c['liquid_density'] / (units.g / units.cm**3)
        excfrac = 0.4 - 0.11131 * density - 0.0026651 * density**2  # primary / secondary excimers
        excfrac = 1 / (1 + excfrac)  # primary / all excimers
        # primary / all excimers that produce a photon:
        excfrac /= 1 - (1 - excfrac) * (1 - c['s1_ER_recombination_fraction'])
        c['s1_ER_primary_excimer_fraction'] = excfrac
        log.debug('Inferred s1_ER_primary_excimer_fraction %s' % excfrac)

        # Recombination time from NEST 2014
        # 3.5 seems fishy, they fit an exponential to data, but in the code they use a non-exponential distribution...
        efield = (c['drift_field'] / (units.V / units.cm))
        c['s1_ER_recombination_time'] = 3.5 / 0.18 * (
            1 / 20 + 0.41) * math.exp(-0.009 * efield)
        log.debug('Inferred s1_ER_recombination_time %s ns' %
                  c['s1_ER_recombination_time'])

        # Which channels stand to receive any photons?
        channels_for_photons = c['channels_in_detector']['tpc']
        if c['pmt_0_is_fake']:
            channels_for_photons = [
                ch for ch in channels_for_photons if ch != 0
            ]
        if c.get('magically_avoid_dead_pmts', False):
            channels_for_photons = [
                ch for ch in channels_for_photons if c['gains'][ch] > 0
            ]
        if c.get('magically_avoid_s1_excluded_pmts', False) and \
           'channels_excluded_for_s1' in c:
            channels_for_photons = [
                ch for ch in channels_for_photons
                if ch not in c['channels_excluded_for_s1']
            ]
        c['channels_for_photons'] = channels_for_photons

        # Determine sensible length of a pmt pulse to simulate
        dt = c['sample_duration']
        if c['pe_pulse_model'] == 'exponential':
            c['samples_before_pulse_center'] = math.ceil(
                c['pulse_width_cutoff'] * c['pmt_rise_time'] / dt)
            c['samples_after_pulse_center'] = math.ceil(
                c['pulse_width_cutoff'] * c['pmt_fall_time'] / dt)
        else:
            # Build the custom PMT pulse model
            ts = np.array(c['pe_pulse_ts'])
            ys = np.array(c['pe_pulse_ys'])

            # Integrate and normalize it
            # Note we're storing the integrated pulse, while the user gives the regular pulse.
            c['pe_pulse_function'] = interp1d(ts,
                                              np.cumsum(ys) / np.sum(ys),
                                              bounds_error=False,
                                              fill_value=(0, 1))

        log.debug(
            'Simulating %s samples before and %s samples after PMT pulse centers.'
            % (c['samples_before_pulse_center'],
               c['samples_after_pulse_center']))

        # Load real noise data from file, if requested
        if c['real_noise_file']:
            self.noise_data = np.load(
                utils.data_file_name(c['real_noise_file']))['arr_0']
            # The silly XENON100 PMT offset again: it's relevant for indexing the array of noise data
            # (which is one row per channel)
            self.channel_offset = 1 if c['pmt_0_is_fake'] else 0

        # Load light yields
        self.s1_light_yield_map = InterpolatingMap(
            utils.data_file_name(c['s1_light_yield_map']))
        self.s2_light_yield_map = InterpolatingMap(
            utils.data_file_name(c['s2_light_yield_map']))

        # Load transverse field (r,z) distortion map
        if c.get('rz_position_distortion_map'):
            self.rz_position_distortion_map = InterpolatingMap(
                utils.data_file_name(c['rz_position_distortion_map']))
        else:
            self.rz_position_distortion_map = None

        # Init s2 per pmt lce map
        qes = np.array(c['quantum_efficiencies'])
        if c.get('s2_patterns_file', None) is not None:
            self.s2_patterns = PatternFitter(
                filename=utils.data_file_name(c['s2_patterns_file']),
                zoom_factor=c.get('s2_patterns_zoom_factor', 1),
                adjust_to_qe=qes[c['channels_top']],
                default_errors=c['relative_qe_error'] +
                c['relative_gain_error'])
        else:
            self.s2_patterns = None

        ##
        # Load pdf for single photoelectron, if available
        ##
        if c.get('photon_area_distribution'):
            # Extract the spe pdf from a csv file into a pandas dataframe
            spe_shapes = pd.read_csv(
                utils.data_file_name(c['photon_area_distribution']))

            # Create a converter array from uniform random numbers to SPE gains (one interpolator per channel)
            # Scale the distributions so that they have an SPE mean of 1 and then calculate the cdf
            # We have set the distribution of the off channels to be explicitly 0 as a precaution
            # as of now these channels are
            # 1, 2, 12, 26, 34, 62, 65, 79, 86, 88, 102, 118, 130, 134, 135, 139,
            # 148, 150, 152, 162, 178, 183, 190, 198, 206, 213, 214, 234, 239, 244

            uniform_to_pe_arr = []
            for ch in spe_shapes.columns[
                    1:]:  # skip the first element which is the 'charge' header
                if spe_shapes[ch].sum() > 0:
                    mean_spe = (spe_shapes['charge'] *
                                spe_shapes[ch]).sum() / spe_shapes[ch].sum()
                    scaled_bins = spe_shapes['charge'] / mean_spe
                    cdf = np.cumsum(spe_shapes[ch]) / np.sum(spe_shapes[ch])
                else:
                    # if sum is 0, just make some dummy axes to pass to interpolator
                    cdf = np.linspace(0, 1, 10)
                    scaled_bins = np.zeros_like(cdf)

                uniform_to_pe_arr.append(interp1d(cdf, scaled_bins))
            if uniform_to_pe_arr != []:
                self.uniform_to_pe_arr = np.array(uniform_to_pe_arr)
            else:
                self.uniform_to_pe_arr = None

        else:
            self.uniform_to_pe_arr = None

        # Init s1 pattern maps
        # We're assuming the map is MC-derived, so we adjust for QE (just like for the S2 maps)
        log.debug("Initializing s1 patterns...")
        if c.get('s1_patterns_file', None) is not None:
            self.s1_patterns = PatternFitter(
                filename=utils.data_file_name(c['s1_patterns_file']),
                zoom_factor=c.get('s1_patterns_zoom_factor', 1),
                adjust_to_qe=qes[c['channels_in_detector']['tpc']],
                default_errors=c['relative_qe_error'] +
                c['relative_gain_error'])
        else:
            self.s1_patterns = None

        ##
        # Luminescence time distribution precomputation
        ##

        # For which gas gaps do we have to compute the luminescence time distribution?
        gas_gap_warping_map = c.get('gas_gap_warping_map', None)
        base_dg = c['elr_gas_gap_length']
        if gas_gap_warping_map is not None:
            with open(utils.data_file_name(gas_gap_warping_map),
                      mode='rb') as infile:
                mh = pickle.load(infile)
            self.gas_gap_length = lambda x, y: base_dg + mh.lookup([x], [y]
                                                                   ).item()
            self.luminescence_converters_dgs = np.linspace(
                mh.histogram.min(), mh.histogram.max(),
                c.get('n_luminescence_time_converters', 20)) + base_dg
        else:
            self.gas_gap_length = lambda x, y: base_dg
            self.luminescence_converters_dgs = np.array([base_dg])

        self.luminescence_converters = []

        # Calculate particle number density in the gas (ideal gas law)
        number_density_gas = c['pressure'] / (units.boltzmannConstant *
                                              c['temperature'])

        # Slope of the drift velocity vs field relation
        alpha = c['gas_drift_velocity_slope'] / number_density_gas

        @np.vectorize
        def yield_per_dr(E):
            # Gives something proportional to the yield, not the yield itself!
            y = E / (units.kV / units.cm) - 0.8 * c['pressure'] / units.bar
            return max(y, 0)

        rA = c['anode_field_domination_distance']
        rW = c['anode_wire_radius']

        for dg in self.luminescence_converters_dgs:
            dl = c['gate_to_anode_distance'] - dg
            rL = dg

            # Voltage over the gas gap
            V = c['anode_voltage'] / (
                1 + dl / dg / c['lxe_dielectric_constant']
            )  # From eq1 in se note, * dg

            # Field in the gas gap. r is distance from anode center: start at r=rL
            E0 = V / (rL - rA + rA * (np.log(rA) - np.log(rW)))

            @np.vectorize
            def Er(r):
                if r < rW:
                    return 0
                elif rW <= r < rA:
                    return E0 * rA / r
                else:
                    return E0

            # Small numeric calculation to get emission time cdf
            R = np.linspace(rL, rW, 1000)
            E = Er(R)
            RDOT = alpha * E
            T = np.cumsum(-np.diff(R)[0] / RDOT)  # dt = dx / v
            yield_density = yield_per_dr(
                E) * RDOT  # density/dt = density/dx * dx/dt
            yield_density /= yield_density.sum()

            # Invert CDF using interpolator
            uniform_to_emission_time = interp1d(np.cumsum(yield_density),
                                                T,
                                                fill_value=0,
                                                bounds_error=False)

            self.luminescence_converters.append(uniform_to_emission_time)

        self.clear_signals_queue()