Example #1
0
    def compute(self, raw_records_coin_nv):
        # Do not trust in DAQ + strax.baseline to leave the
        # out-of-bounds samples to zero.
        r = strax.raw_to_records(raw_records_coin_nv)
        del raw_records_coin_nv

        r = strax.sort_by_time(r)
        strax.zero_out_of_bounds(r)
        strax.baseline(r,
                       baseline_samples=self.config['baseline_samples_nv'],
                       flip=True)
        strax.integrate(r)

        strax.zero_out_of_bounds(r)

        hits = strax.find_hits(
            r, min_amplitude=self.config['hit_min_amplitude_nv'])

        le, re = self.config['save_outside_hits_nv']
        r = strax.cut_outside_hits(r,
                                   hits,
                                   left_extension=le,
                                   right_extension=re)
        strax.zero_out_of_bounds(r)

        rlinks = strax.record_links(r)
        r = clean_up_empty_records(r, rlinks, only_last=True)
        return r
Example #2
0
    def compute(self, raw_records):

        # Convert everything to the records data type -- adds extra fields.
        records = strax.raw_to_records(raw_records)
        del raw_records

        # calculate baseline and baseline rms
        strax.baseline(records,
                       baseline_samples=self.config['baseline_samples'],
                       flip=True)

        # find all hits
        hits = strax.find_hits(
            records,
            min_amplitude=self.hit_thresholds,
            min_height_over_noise=self.config['hit_min_height_over_noise'],
        )

        # sort hits by record_i and time, then find LED hit and afterpulse
        # hits within the same record
        hits_ap = find_ap(
            hits,
            records,
            LED_window_left=self.config['LED_window_left'],
            LED_window_right=self.config['LED_window_right'],
            hit_left_extension=self.hit_left_extension,
            hit_right_extension=self.hit_right_extension,
        )

        hits_ap['area_pe'] = hits_ap['area'] * self.to_pe[hits_ap['channel']]
        hits_ap['height_pe'] = hits_ap['height'] * self.to_pe[
            hits_ap['channel']]

        return hits_ap
Example #3
0
    def compute(self, raw_records_coin_nv):
        # Do not trust in DAQ + strax.baseline to leave the
        # out-of-bounds samples to zero.
        r = strax.raw_to_records(raw_records_coin_nv)
        del raw_records_coin_nv

        r = strax.sort_by_time(r)
        strax.zero_out_of_bounds(r)
        strax.baseline(r,
                       baseline_samples=self.baseline_samples,
                       flip=True)

        if self.config['min_samples_alt_baseline_nv']:
            m = r['pulse_length'] > self.config['min_samples_alt_baseline_nv']
            if np.any(m):
                # Correcting baseline after PMT saturated signals
                r[m] = median_baseline(r[m])

        strax.integrate(r)

        strax.zero_out_of_bounds(r)

        hits = strax.find_hits(r, min_amplitude=self.hit_thresholds)

        le, re = self.config['save_outside_hits_nv']
        r = strax.cut_outside_hits(r, hits, left_extension=le, right_extension=re)
        strax.zero_out_of_bounds(r)

        return r
Example #4
0
    def compute(self, chunk_i):
        pre, current, post = self._chunk_paths(chunk_i)
        if chunk_i == 0 and pre:
            pre = False
            print(f"There should be no {pre} dir for chunk 0: ignored")
        records = np.concatenate(
            ([self._load_chunk(pre, kind='pre')] if pre else []) +
            [self._load_chunk(current)] +
            ([self._load_chunk(post, kind='post')] if post else []))

        strax.baseline(records)
        strax.integrate(records)

        if len(records):
            # Convert time to time in ns since unix epoch.
            # Ensure the offset is a whole digitizer sample
            t0 = int(self.config["run_start_time"] * int(1e9))
            dt = records[0]['dt']
            t0 = dt * (t0 // dt)
            records["time"] += t0

            timespan_sec = (records[-1]['time'] - records[0]['time']) / 1e9
            print(f'{chunk_i}: read {records.nbytes/1e6:.2f} MB '
                  f'({len(records)} records, '
                  f'{timespan_sec:.2f} sec) from readers')
        else:
            print(f'{chunk_i}: read an empty chunk!')

        return records
Example #5
0
def raw_records_matrix(context, run_id, raw_records, time_range):
    # Convert raw to records. We may not be able to baseline correctly
    # at the start of the range due to missing zeroth fragments
    records = strax.raw_to_records(raw_records)
    strax.baseline(records, allow_sloppy_chunking=True)

    return context.records_matrix(run_id=run_id,
                                  records=records,
                                  time_range=time_range)
Example #6
0
 def finish_results():
     nonlocal results
     records = np.concatenate(results)
     # In strax data, records are always stored
     # sorted, baselined and integrated
     records = strax.sort_by_time(records)
     strax.baseline(records)
     strax.integrate(records)
     results = []
     return records
Example #7
0
 def compute(self, raw_records_aqmon):
     rec = strax.raw_to_records(raw_records_aqmon)
     strax.sort_by_time(rec)
     strax.zero_out_of_bounds(rec)
     strax.baseline(rec,
                    baseline_samples=self.config['baseline_samples_aqmon'],
                    flip=True)
     aqmon_hits = strax.find_hits(
         rec, min_amplitude=self.config['hit_min_amplitude_aqmon'])
     return aqmon_hits
Example #8
0
    def compute(self, raw_records_nv, start, end):
        strax.zero_out_of_bounds(raw_records_nv)

        # First we have to split rr into records and lone records:
        # Please note that we consider everything as a lone record which
        # does not satisfy the coincidence requirement
        intervals = coincidence(raw_records_nv,
                                self.config['coincidence_level_recorder_nv'],
                                self.config['resolving_time_recorder_nv'])
        # Always save the first and last resolving_time nanoseconds (e.g. 600 ns)  since we cannot guarantee the gap
        # size to be larger. (We cannot use an OverlapingWindow plugin either since it requires disjoint objects.)
        if len(intervals):
            intervals_with_bounds = np.zeros((len(intervals) + 2, 2),
                                             dtype=np.int64)
            intervals_with_bounds[1:-1, :] = intervals
            intervals_with_bounds[0, :] = start, min(
                start + self.config['resolving_time_recorder_nv'],
                intervals[0, 0])
            intervals_with_bounds[-1, :] = max(
                end - self.config['resolving_time_recorder_nv'],
                intervals[-1, 1]), end
            del intervals
        else:
            intervals_with_bounds = np.zeros((0, 2), dtype=np.int64)

        neighbors = strax.record_links(raw_records_nv)
        mask = pulse_in_interval(raw_records_nv, neighbors,
                                 *np.transpose(intervals_with_bounds))
        rr, lone_records = straxen.mask_and_not(raw_records_nv, mask)

        # Compute some properties of the lone_records:
        # We compute only for lone_records baseline etc. since
        # raw_records_nv will be deleted, otherwise we could not change
        # the settings and reprocess the data in case of raw_records_nv
        lr = strax.raw_to_records(lone_records)
        del lone_records

        lr = strax.sort_by_time(lr)
        strax.zero_out_of_bounds(lr)
        strax.baseline(
            lr,
            baseline_samples=self.config['nbaseline_samples_lone_records_nv'],
            flip=True)
        strax.integrate(lr)
        lrs, lr = compute_lone_records(lr, self.config['channel_map']['nveto'],
                                       self.config['n_lone_records_nv'])
        lrs['time'] = start
        lrs['endtime'] = end

        return {
            'raw_records_coin_nv': rr,
            'lone_raw_records_nv': lr,
            'lone_raw_record_statistics_nv': lrs
        }
Example #9
0
    def final_results(self, record_j):
        records = self.record_buffer[:record_j]  # Copy the records from buffer
        records = strax.sort_by_time(
            records)  # Must keep this for sorted output
        strax.baseline(records)
        strax.integrate(records)

        _truth = self.truth_buffer[self.truth_buffer['fill']]
        # Return truth without 'fill' field
        truth = np.zeros(len(_truth),
                         dtype=instruction_dtype + truth_extra_dtype)
        for name in truth.dtype.names:
            truth[name] = _truth[name]

        return dict(raw_records=records, truth=truth)
Example #10
0
    def compute(self, chunk_i):
        fp = self._chunk_path(chunk_i)
        records = self._load_chunk(fp)

        strax.baseline(records)
        strax.integrate(records)

        if len(records):
            timespan_sec = (records[-1]['time'] - records[0]['time']) / 1e9
            print(f'{chunk_i}: read {records.nbytes/1e6:.2f} MB '
                  f'({len(records)} records, '
                  f'{timespan_sec:.2f} live seconds)')
        else:
            print(f'{chunk_i}: read an empty chunk!')

        return records
Example #11
0
    def compute(self, chunk_i):
        pre, current, post = self._chunk_paths(chunk_i)
        records = np.concatenate(
            ([self._load_chunk(pre, kind='pre')] if pre else []) +
            [self._load_chunk(current)] +
            ([self._load_chunk(post, kind='post')] if post else []))

        strax.baseline(records)
        strax.integrate(records)

        timespan_sec = (records[-1]['time'] - records[0]['time']) / 1e9
        print(f'{chunk_i}: read {records.nbytes/1e6:.2f} MB '
              f'({len(records)} records, '
              f'{timespan_sec:.2f} sec) from readers')

        return records
Example #12
0
def raw_records_matrix(context, run_id, raw_records, time_range,
                       ignore_max_sample_warning=False,
                       max_samples=DEFAULT_MAX_SAMPLES,
                       **kwargs):
    # Convert raw to records. We may not be able to baseline correctly
    # at the start of the range due to missing zeroth fragments
    records = strax.raw_to_records(raw_records)
    strax.baseline(records, allow_sloppy_chunking=True)
    strax.zero_out_of_bounds(records)

    return context.records_matrix(run_id=run_id,
                                  records=records,
                                  time_range=time_range,
                                  max_samples=max_samples,
                                  ignore_max_sample_warning=ignore_max_sample_warning,
                                  **kwargs)
Example #13
0
    def compute(self, raw_records_aqmon):
        not_allowed_channels = (set(np.unique(raw_records_aqmon['channel'])) -
                                set(self.aqmon_channels))
        if not_allowed_channels:
            raise ValueError(
                f'Unknown channel {not_allowed_channels}. Only know {self.aqmon_channels}'
            )

        if self.check_raw_record_aqmon_overlaps:
            straxen.check_overlaps(raw_records_aqmon,
                                   n_channels=max(AqmonChannels).value + 1)

        records = strax.raw_to_records(raw_records_aqmon)
        strax.zero_out_of_bounds(records)
        strax.baseline(records,
                       baseline_samples=self.baseline_samples_aqmon,
                       flip=True)
        aqmon_hits = self.find_aqmon_hits_per_channel(records)
        aqmon_hits = strax.sort_by_time(aqmon_hits)
        return aqmon_hits
Example #14
0
    def compute(self, chunk_i):
        pre, current, post = self._chunk_paths(chunk_i)
        if chunk_i == 0 and pre:
            pre = False
            print(f"There should be no {pre} dir for chunk 0: ignored")
        records = np.concatenate(
            ([self._load_chunk(pre, kind='pre')] if pre else []) +
            [self._load_chunk(current)] +
            ([self._load_chunk(post, kind='post')] if post else []))

        strax.baseline(records)
        strax.integrate(records)

        if len(records):
            timespan_sec = (records[-1]['time'] - records[0]['time']) / 1e9
            print(f'{chunk_i}: read {records.nbytes/1e6:.2f} MB '
                  f'({len(records)} records, '
                  f'{timespan_sec:.2f} sec) from readers')
        else:
            print(f'{chunk_i}: read an empty chunk!')

        return records
Example #15
0
    def compute(self, raw_records_coin_nv):
        # Do not trust in DAQ + strax.baseline to leave the
        # out-of-bounds samples to zero.
        r = strax.raw_to_records(raw_records_coin_nv)
        del raw_records_coin_nv

        r = strax.sort_by_time(r)
        strax.zero_out_of_bounds(r)
        strax.baseline(r, baseline_samples=self.baseline_samples, flip=True)

        strax.integrate(r)

        strax.zero_out_of_bounds(r)

        hits = strax.find_hits(r, min_amplitude=self.hit_thresholds)

        le, re = self.config['save_outside_hits_nv']
        r = strax.cut_outside_hits(r,
                                   hits,
                                   left_extension=le,
                                   right_extension=re)
        strax.zero_out_of_bounds(r)

        return r
Example #16
0
    def compute(self, raw_records, start, end):
        if self.config['check_raw_record_overlaps']:
            check_overlaps(raw_records, n_channels=3000)

        # Throw away any non-TPC records; this should only happen for XENON1T
        # converted data
        raw_records = raw_records[
            raw_records['channel'] < self.config['n_tpc_pmts']]

        # Convert everything to the records data type -- adds extra fields.
        r = strax.raw_to_records(raw_records)
        del raw_records

        # Do not trust in DAQ + strax.baseline to leave the
        # out-of-bounds samples to zero.
        # TODO: better to throw an error if something is nonzero
        strax.zero_out_of_bounds(r)

        strax.baseline(
            r,
            baseline_samples=self.config['baseline_samples'],
            allow_sloppy_chunking=self.config['allow_sloppy_chunking'],
            flip=True)

        strax.integrate(r)

        pulse_counts = count_pulses(r, self.config['n_tpc_pmts'])
        pulse_counts['time'] = start
        pulse_counts['endtime'] = end

        if len(r) and self.hev_enabled:

            r, r_vetoed, veto_regions = software_he_veto(
                r,
                self.to_pe,
                end,
                area_threshold=self.config['tail_veto_threshold'],
                veto_length=self.config['tail_veto_duration'],
                veto_res=self.config['tail_veto_resolution'],
                pass_veto_extend=self.config['tail_veto_pass_extend'],
                pass_veto_fraction=self.config['tail_veto_pass_fraction'],
                max_veto_value=self.config['max_veto_value'])

            # In the future, we'll probably want to sum the waveforms
            # inside the vetoed regions, so we can still save the "peaks".
            del r_vetoed

        else:
            veto_regions = np.zeros(0, dtype=strax.hit_dtype)

        if len(r):
            # Find hits
            # -- before filtering,since this messes with the with the S/N
            hits = strax.find_hits(r,
                                   min_amplitude=straxen.hit_min_amplitude(
                                       self.config['hit_min_amplitude']))

            if self.config['pmt_pulse_filter']:
                # Filter to concentrate the PMT pulses
                strax.filter_records(r,
                                     np.array(self.config['pmt_pulse_filter']))

            le, re = self.config['save_outside_hits']
            r = strax.cut_outside_hits(r,
                                       hits,
                                       left_extension=le,
                                       right_extension=re)

            # Probably overkill, but just to be sure...
            strax.zero_out_of_bounds(r)

        return dict(records=r,
                    pulse_counts=pulse_counts,
                    veto_regions=veto_regions)
Example #17
0
    def compute(self, raw_records_nv, start, end):
        if self.config['check_raw_record_overlaps_nv']:
            straxen.check_overlaps(raw_records_nv, n_channels=3000)
        # Cover the case if we do not want to have any coincidence:
        if self.config['coincidence_level_recorder_nv'] <= 1:
            rr = raw_records_nv
            lr = np.zeros(0, dtype=self.dtype['lone_raw_records_nv'])
            lrs = np.zeros(0,
                           dtype=self.dtype['lone_raw_record_statistics_nv'])
            return {
                'raw_records_coin_nv': rr,
                'lone_raw_records_nv': lr,
                'lone_raw_record_statistics_nv': lrs
            }

        # Search for hits to define coincidence intervals:
        temp_records = strax.raw_to_records(raw_records_nv)
        temp_records = strax.sort_by_time(temp_records)
        strax.zero_out_of_bounds(temp_records)
        strax.baseline(temp_records,
                       baseline_samples=self.baseline_samples,
                       flip=True)
        hits = strax.find_hits(temp_records, min_amplitude=self.hit_thresholds)
        del temp_records

        # First we have to split rr into records and lone records:
        # Please note that we consider everything as a lone record which
        # does not satisfy the coincidence requirement
        intervals = find_coincidence(
            hits, self.config['coincidence_level_recorder_nv'],
            self.config['resolving_time_recorder_nv'],
            self.config['pre_trigger_time_nv'])
        del hits

        # Always save the first and last resolving_time nanoseconds (e.g. 600 ns)  since we cannot guarantee the gap
        # size to be larger. (We cannot use an OverlapingWindow plugin either since it requires disjoint objects.)
        if len(intervals):
            intervals_with_bounds = np.zeros(len(intervals) + 2,
                                             dtype=strax.time_fields)
            intervals_with_bounds['time'][1:-1] = intervals['time']
            intervals_with_bounds['endtime'][1:-1] = intervals['endtime']
            intervals_with_bounds['time'][0] = start
            intervals_with_bounds['endtime'][0] = min(
                start + self.config['resolving_time_recorder_nv'],
                intervals['time'][0])
            intervals_with_bounds['time'][-1] = max(
                end - self.config['resolving_time_recorder_nv'],
                intervals['endtime'][-1])
            intervals_with_bounds['endtime'][-1] = end
            del intervals
        else:
            intervals_with_bounds = np.zeros((0, 2), dtype=strax.time_fields)

        neighbors = strax.record_links(raw_records_nv)
        mask = pulse_in_interval(
            raw_records_nv,
            neighbors,
            intervals_with_bounds['time'],
            intervals_with_bounds['endtime'],
        )

        rr, lone_records = straxen.mask_and_not(raw_records_nv, mask)

        # Compute some properties of the lone_records:
        # We compute only for lone_records baseline etc. since
        # raw_records_nv will be deleted, otherwise we could not change
        # the settings and reprocess the data in case of raw_records_nv
        lr = strax.raw_to_records(lone_records)
        del lone_records

        lr = strax.sort_by_time(lr)
        strax.zero_out_of_bounds(lr)
        strax.baseline(lr, baseline_samples=self.baseline_samples, flip=True)
        strax.integrate(lr)
        lrs, lr = compute_lone_records(lr, self.config['channel_map']['nveto'],
                                       self.config['n_lone_records_nv'])
        lrs['time'] = start
        lrs['endtime'] = end

        return {
            'raw_records_coin_nv': rr,
            'lone_raw_records_nv': lr,
            'lone_raw_record_statistics_nv': lrs
        }
Example #18
0
def pax_to_records(input_filename,
                   samples_per_record=strax.DEFAULT_RECORD_LENGTH):
    """Return pulse records array from pax zip input_filename"""
    from pax import core  # Pax is not a dependency
    mypax = core.Processor(
        'XENON1T',
        config_dict=dict(
            pax=dict(look_for_config_in_runs_db=False,
                     plugin_group_names=['input'],
                     encoder_plugin=None,
                     input_name=input_filename),
            # Fast startup: skip loading big maps
            WaveformSimulator=dict(s1_light_yield_map='placeholder_map.json',
                                   s2_light_yield_map='placeholder_map.json',
                                   s1_patterns_file=None,
                                   s2_patterns_file=None)))

    def get_events():
        for e in mypax.get_events():
            yield mypax.process_event(e)

    # We loop over the events twice for convenience
    # Yeah, this is probably not optimal
    pulse_lengths = np.array(
        [p.length for e in get_events() for p in e.pulses])

    n_records = strax.records_needed(pulse_lengths, samples_per_record).sum()
    records = np.zeros(n_records, dtype=strax.record_dtype(samples_per_record))

    output_record_index = 0  # Record offset in data
    for event in get_events():
        for p in event.pulses:

            n_records = strax.records_needed(p.length, samples_per_record)
            for rec_i in range(n_records):
                r = records[output_record_index]
                r['time'] = (event.start_time + p.left * 10 +
                             rec_i * samples_per_record * 10)
                r['channel'] = p.channel
                r['pulse_length'] = p.length
                r['record_i'] = rec_i
                r['dt'] = 10

                # How much are we storing in this record?
                if rec_i != n_records - 1:
                    # There's more chunks coming, so we store a full chunk
                    n_store = samples_per_record
                    assert p.length > samples_per_record * (rec_i + 1)
                else:
                    # Just enough to store the rest of the data
                    # Note it's not p.length % samples_per_record!!!
                    # (that would be zero if we have to store a full record)
                    n_store = p.length - samples_per_record * rec_i

                assert 0 <= n_store <= samples_per_record
                r['length'] = n_store

                offset = rec_i * samples_per_record
                r['data'][:n_store] = p.raw_data[offset:offset + n_store]
                output_record_index += 1

    mypax.shutdown()

    # In strax data, records are always stored
    # sorted, baselined and integrated
    records = strax.sort_by_time(records)
    strax.baseline(records)
    strax.integrate(records)
    return records
Example #19
0
 def get_nim_edge(raw_records, threshold=500):
     records = strax.raw_to_records(raw_records)
     strax.baseline(records)
     hits = strax.find_hits(records, min_amplitude=threshold)
     return hits
Example #20
0
def plot_pulses(context,
                raw_records,
                run_id,
                time_range,
                plot_hits=False,
                plot_median=False,
                max_plots=20,
                store_pdf=False,
                path='',
                detector_ending=''):
    """
    Plots nveto pulses for a list of records.
    :param context: Context to be used.
    :param plot_hits: If True plot hit boundaries including the left
        and right extension as orange shaded regions.
    :param plot_median: If true plots pulses sample median as dotted
        line.
    :param max_plots: Limits the number of figures. If you would like
        to plot more pulses you should put the plots in a PDF.
    :param store_pdf: If true figures are put to a PDF instead of
        plotting them to your notebook. The file name is automatically
        generated including the time range and run_id.
    :param path: Relative path where the PDF should be stored. By default
        it is the directory of the notebook.
    :param detector_ending: Ending of the corresponding detector. Empty
        string for TPC '_nv' for neutron-veto and '_mv' muon-veto. 
    """
    # Register records plugin to get settings
    p = context.get_single_plugin(run_id, 'records' + detector_ending)

    # Compute strax baseline and baseline_rms:
    records = strax.raw_to_records(raw_records)
    records = strax.sort_by_time(records)
    strax.zero_out_of_bounds(records)

    baseline_key = [
        key for key in p.config.keys() if 'baseline_samples' in key
    ][0]

    if isinstance(p.config[baseline_key], int):
        baseline_samples = p.config[baseline_key]
    else:
        baseline_samples = straxen.get_correction_from_cmt(
            run_id, p.config[baseline_key])

    strax.baseline(records, baseline_samples=baseline_samples, flip=True)

    nfigs = 1
    if store_pdf and time_range is None:
        raise ValueError(f'Specify time range!')
    if store_pdf:
        from matplotlib.backends.backend_pdf import PdfPages
        fname = f'pulses_{run_id}_{time_range[0]}_{time_range[1]}.pdf'
        fname = os.path.join(path, fname)
        pdf = PdfPages(fname)

    hits = None  # needed for delete if false

    for inds in _yield_pulse_indices(raw_records):
        # Grouped our pulse so now plot:
        rr_pulse = raw_records[inds]
        r_pulse = records[inds]

        fig, axes = straxen.plot_single_pulse(rr_pulse, run_id)
        if detector_ending == '_nv':
            # We only store for the nv digitizer baseline values:
            axes.axhline(rr_pulse[0]['baseline'],
                         ls='dashed',
                         color='k',
                         label=f'D. Bas.: {rr_pulse[0]["baseline"]} ADC')

        baseline = r_pulse[0]['baseline']
        baseline_rms = r_pulse[0]['baseline_rms']
        axes.axhline(
            baseline,
            ls='solid',
            color='k',
            label=
            f'Strax Bas. +/-RMS:\n ({baseline:.2f}+/-{baseline_rms:.2f}) ADC')
        xlim = axes.get_xlim()
        axes.fill_between(xlim, [baseline + baseline_rms] * 2,
                          [baseline - baseline_rms] * 2,
                          color='gray',
                          alpha=0.4)

        # check type of p.hit_thresholds
        if isinstance(p.hit_thresholds, int):
            thr = p.hit_thresholds
        elif isinstance(p.hit_thresholds, np.ndarray):
            thr = p.hit_thresholds[rr_pulse['channel']][0]

        if plot_median:
            # Plot median if asked.
            # Have to make pulse again:
            pulse = straxen.matplotlib_utils._make_pulse(rr_pulse)
            median = np.median(pulse)
            axes.axhline(median,
                         ls='dotted',
                         color='k',
                         label=f'Median Bas.: {median:.0f} ADC')

            axes.axhline(median - thr, ls='dotted', color='orange')

        if plot_hits:
            min_amplitude = thr

            axes.axhline(baseline - min_amplitude,
                         color='orange',
                         label='Hitfinder threshold')

            hits = strax.find_hits(r_pulse, min_amplitude=min_amplitude)
            if detector_ending != '_he':
                # We don't have 'save_outside_hits_he' at all!
                le, re = p.config['save_outside_hits' + detector_ending]
            else:
                le, re = p.config['save_outside_hits']
            start = (hits['time'] - r_pulse[0]['time']) / r_pulse[0]['dt'] - le
            end = (strax.endtime(hits) -
                   r_pulse[0]['time']) / r_pulse[0]['dt'] + re

            ylim = axes.get_ylim()
            for s, e in zip(start, end):
                plt.fill_between((s, e), *ylim, alpha=0.2, color='orange')
            axes.set_ylim(*ylim)

        plt.legend()
        axes.set_xlim(*xlim)

        if store_pdf:
            plt.close()
            pdf.savefig(fig)

        nfigs += 1
        if max_plots is not None and nfigs > max_plots:
            break

    if store_pdf:
        pdf.close()
    del records, hits