Exemplo n.º 1
0
 def make_single_pulse_event(self, **kwargs):
     event = datastructure.Event(
         n_channels=10,
         start_time=0,
         length=100,
         sample_duration=self.pax.config['DEFAULT']['sample_duration']
     )
     event.pulses.append(datastructure.Pulse(**kwargs))
     return event
Exemplo n.º 2
0
 def get_events(self):
     for event_i in range(self.number_of_events):
         self.t.GetEntry(event_i)
         root_event = self.t.events
         event = datastructure.Event(n_channels=root_event.n_channels,
                                     start_time=root_event.start_time,
                                     sample_duration=root_event.sample_duration,
                                     stop_time=root_event.stop_time)
         self.set_python_object_attrs(root_event, event,
                                      self.config['fields_to_ignore'])
         yield event
Exemplo n.º 3
0
    def test_hitfinder(self):
        # Integration test for the hitfinder
        self.pax = core.Processor(
            config_names='XENON100',
            just_testing=True,
            config_dict={
                'pax': {
                    'plugin_group_names': ['test'],
                    'encoder_plugin':
                    None,
                    'decoder_plugin':
                    None,
                    'test':
                    ['PulseProperties.PulseProperties', 'HitFinder.FindHits']
                },
                'HitFinder.FindHits': {
                    'left_extension': 0,
                    'right_extension': 0
                }
            })
        for test_w, hit_bounds, pulse_min, pulse_max in (
                # Keep in mind the hitfinder flips the pulse...
            [np.zeros(100), [], 0, 0],
            [np.ones(100), [], 0, 0],
            [-3 * np.ones(100), [], 0, 0],
            [self.peak_at(70, amplitude=-100, width=4), [[70, 73]], 0, 100],
            [
                self.peak_at(70, amplitude=-100, width=4) +
                self.peak_at(80, amplitude=10, width=4), [[70, 73]], -10, 100
            ],
            [
                self.peak_at(70, amplitude=-100, width=4) +
                self.peak_at(80, amplitude=-100, width=4), [[70, 73],
                                                            [80, 83]], 0, 100
            ],
        ):
            e = datastructure.Event(
                n_channels=self.pax.config['DEFAULT']['n_channels'],
                start_time=0,
                sample_duration=self.pax.config['DEFAULT']['sample_duration'],
                stop_time=int(1e6),
                pulses=[
                    dict(left=0,
                         raw_data=np.array(test_w).astype(np.int16),
                         channel=1)
                ])
            e = self.pax.process_event(e)
            self.assertEqual(hit_bounds, [[hit['left'], hit['right']]
                                          for hit in e.all_hits])
            self.assertEqual(pulse_min, e.pulses[0].minimum)
            self.assertEqual(pulse_max, e.pulses[0].maximum)

        delattr(self, 'pax')
Exemplo n.º 4
0
 def convert_record(self, class_to_load_to, record):
     # We defined a nice custom init for event... ahem... now we have to do
     # cumbersome stuff...
     if class_to_load_to == datastructure.Event:
         result = datastructure.Event(
             n_channels=self.config['n_channels'],
             start_time=record['start_time'],
             stop_time=record['stop_time'],
             sample_duration=record['sample_duration'])
     else:
         result = class_to_load_to()
     for k, v in self._numpy_record_to_dict(record).items():
         # If result doesn't have this attribute, ignore it
         # This happens for n_peaks etc. and attributes that have been removed
         if hasattr(result, k):
             setattr(result, k, v)
     return result
Exemplo n.º 5
0
 def test_concatenation(self):
     for pulse_bounds, concatenated_pulse_bounds in (
         ([[0, 1], [4, 5]], [[0, 1], [4, 5]]),
         ([[0, 1], [2, 5]], [[0, 5]]),
         ([[0, 0], [1, 5]], [[0, 5]]),
         ([[0, 0], [1, 2], [3, 3], [4, 5]], [[0, 5]]),
         ([[0, 0], [1, 2], [3, 5]], [[0, 5]]),
         ([[0, 0], [1, 1], [3, 5]], [[0, 1], [3, 5]]),
         ([], []),
     ):
         e = datastructure.Event(
             n_channels=self.plugin.config['n_channels'],
             start_time=0,
             sample_duration=self.plugin.config['sample_duration'],
             stop_time=int(1e6),
             pulses=[
                 dict(left=l, right=r, channel=1) for l, r in pulse_bounds
             ])
         e = self.plugin.transform_event(e)
         found_pulse_bounds = [[p.left, p.right] for p in e.pulses]
         self.assertEqual(concatenated_pulse_bounds, found_pulse_bounds)
Exemplo n.º 6
0
 def get_all_events_in_current_file(self):
     for line in self.current_file:
         yield datastructure.Event(**json.loads(line))
Exemplo n.º 7
0
    def make_pax_event(self):
        """Simulate PMT response to the queued photon signals
        Returns None if no photons have been queued else returns start_time (in units, ie ns), pmt waveform matrix
        # TODO: Account for random initial digitizer state wrt interaction? Where?
        """
        log.debug("Now performing hitpattern to waveform conversion")
        start_time = int(time.time() * units.s)

        # Find out the duration of the event
        all_times = np.concatenate(
            list(self.arrival_times_per_channel.values()))
        if not len(all_times):
            log.warning("No photons to simulate: making a noise-only event")
            max_time = 0
        else:
            max_time = np.concatenate(
                list(self.arrival_times_per_channel.values())).max()

        event = datastructure.Event(
            n_channels=self.config['n_channels'],
            start_time=start_time,
            stop_time=start_time +
            int(max_time + 2 * self.config['event_padding']),
            sample_duration=self.config['sample_duration'])
        # Ensure the event length is even (else it cannot be written to XED)
        if event.length() % 2 != 0:
            event.stop_time += self.config['sample_duration']

        # Convenience variables
        dt = self.config['sample_duration']
        dv = self.config['digitizer_voltage_range'] / 2**(
            self.config['digitizer_bits'])

        start_index = 0
        end_index = event.length() - 1
        pulse_length = end_index - start_index + 1

        # Setup things for real noise simulation
        if self.config['real_noise_sample_size']:
            noise_sample_len = self.config['real_noise_sample_size']
            available_noise_samples = self.noise_data.shape[
                1] // noise_sample_len
            needed_noise_samples = int(
                math.ceil(pulse_length / noise_sample_len))

            noise_sample_mode = self.config.get('real_noise_sample_mode',
                                                'incoherent')
            if noise_sample_mode == 'coherent':
                # Choose a single set of noise sample numbers for the event
                chosen_noise_sample_numbers = np.random.randint(
                    0, available_noise_samples - 1, needed_noise_samples)

                roll_number = np.random.randint(noise_sample_len)

        # Setup the lone-hit arrival_times_per_channel
        lone_hit_arrival_times_per_channels = self.lone_hits(max_time)

        # Build waveform channel by channel
        for channel, photon_detection_times in self.arrival_times_per_channel.items(
        ):
            # If the channel is dead, fake, or not in the TPC, we don't do anything.
            if (self.config['gains'][channel] == 0
                    or (self.config['pmt_0_is_fake'] and channel == 0) or
                    channel not in self.config['channels_in_detector']['tpc']):
                continue

            photon_detection_times = np.array(photon_detection_times)

            # Add double photoelectron emission
            if len(photon_detection_times):
                n_dpe = np.random.binomial(
                    len(photon_detection_times),
                    p=self.config['p_double_pe_emision'])
                if n_dpe:
                    dpe_times = np.random.choice(photon_detection_times,
                                                 n_dpe,
                                                 replace=False)
                    photon_detection_times = np.concatenate(
                        [photon_detection_times, dpe_times])

            log.debug(
                "Simulating %d photons in channel %d (gain=%s, gain_sigma=%s)"
                % (len(photon_detection_times), channel,
                   self.config['gains'][channel],
                   self.config['gain_sigmas'][channel]))

            # Combine the lone-hits with the normal PMT pulses
            photon_detection_times = np.concatenate(
                (photon_detection_times,
                 lone_hit_arrival_times_per_channels[channel]))

            gains = self.get_gains(channel, len(photon_detection_times))

            # Add PMT afterpulses
            ap_times = []
            ap_amplifications = []
            ap_gains = []

            # Get the afterpulse settings for this channel:
            #  1. If we have a specific afterpulse config for this channel, we use it
            #  2. Else we fall back to a default configuration
            #  3. If this does not exist, we don't make any afterpulses
            all_ap_data = self.config.get('pmt_afterpulse_types', [])
            if 'each_pmt_afterpulse_types' in self.config and channel in self.config[
                    'each_pmt_afterpulse_types']:
                all_ap_data = self.config['each_pmt_afterpulse_types'][channel]

            for ap_data in all_ap_data.values():
                if not ap_data:
                    continue
                # print(ap_data)

                # How many photons will make this kind of afterpulse?
                n_afterpulses = np.random.binomial(
                    n=len(photon_detection_times), p=ap_data['p'])
                if not n_afterpulses:
                    continue

                # Find the time and gain of the afterpulses
                dist_kwargs = ap_data['time_parameters']
                dist_kwargs['size'] = n_afterpulses
                ap_times.extend(
                    np.random.choice(photon_detection_times,
                                     size=n_afterpulses,
                                     replace=False) +
                    getattr(np.random, ap_data['time_distribution'])(
                        **dist_kwargs))

                # Afterpulse gains can be different from regular gains: sample an amplification factor
                if 'amp_mean' in ap_data:
                    ap_amplifications.extend(
                        truncated_gauss_rvs(my_mean=ap_data['amp_mean'],
                                            my_std=ap_data['amp_rms'],
                                            left_boundary=0,
                                            right_boundary=float('inf'),
                                            n_rvs=n_afterpulses))
                else:
                    ap_amplifications.extend([1.] * n_afterpulses)

                ap_gains.extend(self.get_gains(channel, n_afterpulses))

            # Combine the afterpulses with the normal PMT pulses
            ap_gains = [x * y for x, y in zip(ap_gains, ap_amplifications)]
            gains = np.concatenate((gains, ap_gains))
            photon_detection_times = np.concatenate(
                (photon_detection_times, ap_times))

            # Add padding, sort (eh.. or were we already sorted? and is sorting necessary at all??)
            pmt_pulse_centers = np.sort(photon_detection_times +
                                        self.config['event_padding'])

            # Build the waveform pulse by pulse (bin by bin was slow, hope this is faster)

            # Compute offset & center index for each pe-pulse
            # 'index' refers to the (hypothetical) event waveform, as usual
            pmt_pulse_centers = np.array(pmt_pulse_centers, dtype=np.int)
            offsets = pmt_pulse_centers % dt
            center_index = (
                pmt_pulse_centers -
                offsets) / dt  # Absolute index in waveform of pe-pulse center
            center_index = center_index.astype(np.int)

            # Simulate an event-long waveform in this channel
            # Remember start padding has already been added to times, so just one padding in end_index
            current_wave = np.zeros(pulse_length)

            for i, _ in enumerate(pmt_pulse_centers):
                # Add some current for this photon pulse
                # Compute the integrated pmt pulse at various samples, then
                # do their diffs/dt
                generated_pulse = self.pmt_pulse_current(gain=gains[i],
                                                         offset=offsets[i])

                # +1 due to np.diff in pmt_pulse_current   #????
                left_index = center_index[i] - start_index + 1
                left_index -= int(self.config['samples_before_pulse_center'])
                righter_index = center_index[i] - start_index + 1
                righter_index += int(self.config['samples_after_pulse_center'])

                # Abandon the pulse if it goes the left/right boundaries
                if len(generated_pulse) != righter_index - left_index:
                    raise RuntimeError(
                        "Generated pulse is %s samples long, can't be inserted between %s and %s"
                        % (len(generated_pulse), left_index, righter_index))
                elif left_index < 0:
                    log.debug("Invalid left index %s: can't be negative" %
                              left_index)
                    continue
                elif righter_index >= len(current_wave):
                    log.debug(
                        "Invalid right index %s: can't be longer than length of wave (%s)!"
                        % (righter_index, len(current_wave)))
                    continue

                current_wave[left_index:righter_index] += generated_pulse

            # Did you order some Gaussian current noise with that?
            if self.config['gauss_noise_sigmas']:
                # if the baseline fluc. is defined for each channel
                # use that in prior
                noise_sigma_current = self.config['gauss_noise_sigmas'][
                    channel] * self.config['gains'][channel] / dt
                current_wave += np.random.normal(0, noise_sigma_current,
                                                 len(current_wave))
            elif self.config['gauss_noise_sigma']:
                # / dt is for charge -> current conversion, as in pmt_pulse_current
                noise_sigma_current = self.config[
                    'gauss_noise_sigma'] * self.config['gains'][channel] / dt,
                current_wave += np.random.normal(0, noise_sigma_current,
                                                 len(current_wave))

            # Convert from PMT current to ADC counts
            adc_wave = current_wave
            adc_wave *= self.config[
                'pmt_circuit_load_resistor']  # Now in voltage
            adc_wave *= self.config[
                'external_amplification']  # Now in voltage after amplifier
            adc_wave /= dv  # Now in float ADC counts above baseline
            adc_wave = np.trunc(adc_wave)  # Now in integer ADC counts "" ""
            # Could round instead of trunc... who cares?

            # PMT signals are negative excursions, so flip them.
            adc_wave = -adc_wave

            # Did you want to superpose onto real noise samples?
            if self.config['real_noise_file']:
                if noise_sample_mode != 'coherent':
                    # For each channel, choose different noise sample numbers
                    chosen_noise_sample_numbers = np.random.randint(
                        0, available_noise_samples - 1, needed_noise_samples)

                    roll_number = np.random.randint(noise_sample_len)

                # Extract the chosen noise samples and concatenate them
                # Have to use a listcomp here, unless you know a way to select multiple slices in numpy?
                #  -- yeah making an index list with np.arange would work, but honestly??
                real_noise = np.concatenate([
                    self.noise_data[channel - self.channel_offset]
                    [nsn * noise_sample_len:(nsn + 1) * noise_sample_len]
                    for nsn in chosen_noise_sample_numbers
                ])

                # Roll the noise samples by a fraction of the sample size,
                # to avoid same artifacts falling at the same point every time
                np.roll(real_noise, roll_number, axis=0)

                # Adjust the noise amplitude if needed, then add it to the ADC wave
                noise_amplitude = self.config.get('adjust_noise_amplitude',
                                                  {}).get(str(channel), 1)
                if noise_amplitude != 1:
                    # Determine a rough baseline for the noise, then adjust towards it
                    baseline = np.mean(real_noise[:min(len(real_noise), 50)])
                    real_noise = baseline + noise_amplitude * (real_noise -
                                                               baseline)
                adc_wave += real_noise[:pulse_length]

            else:
                # If you don't want to superpose onto real noise,
                # we should add a reference baseline
                adc_wave += self.config['digitizer_reference_baseline']

            # Digitizers have finite number of bits per channel, so clip the signal.
            adc_wave = np.clip(adc_wave, 0, 2**(self.config['digitizer_bits']))

            event.pulses.append(
                datastructure.Pulse(channel=channel,
                                    left=start_index,
                                    raw_data=adc_wave.astype(np.int16)))

        log.debug("Simulated pax event of %s samples length and %s pulses "
                  "created." % (event.length(), len(event.pulses)))
        self.clear_signals_queue()
        return event
Exemplo n.º 8
0
 def decode_event(self, event_msgpack):
     event_dict = msgpack.unpackb(event_msgpack)
     # MessagePack returns byte keys, which we can't pass as keyword arguments
     # Unfortunately we have to duplicate this code in data_model too
     event_dict = {k.decode('ascii'): v for k, v in event_dict.items()}
     return datastructure.Event(**event_dict)