コード例 #1
0
    def compute(self, peaklets):
        if not len(peaklets):
            return peaklets[:0]

        if self.config['s2_merge_max_gap'] < 0:
            # Do not merge at all
            merged_s2s = np.zeros(0, dtype=peaklets.dtype)
        else:
            # Find all groups of peaklets separated by < the gap
            cluster_starts, cluster_stops = strax.find_peak_groups(
                peaklets, self.config['s2_merge_max_gap'])

            start_merge_at, end_merge_at = self.get_merge_instructions(
                peaklets['time'],
                strax.endtime(peaklets),
                areas=peaklets['area'],
                types=peaklets['type'],
                cluster_starts=cluster_starts,
                cluster_stops=cluster_stops,
                max_duration=self.config['s2_merge_max_duration'],
                max_area=self.config['s2_merge_max_area'])

            merged_s2s = strax.merge_peaks(
                peaklets,
                start_merge_at,
                end_merge_at,
                max_buffer=int(self.config['s2_merge_max_duration'] //
                               peaklets['dt'].min()))
            merged_s2s['type'] = 2
            strax.compute_widths(merged_s2s)

        return merged_s2s
コード例 #2
0
    def compute(self, records):
        r = records
        hits = strax.find_hits(r, threshold=self.config['hit_threshold'])
        hits = strax.sort_by_time(hits)

        peaks = strax.find_peaks(
            hits,
            self.config['to_pe'],
            result_dtype=self.dtype,
            gap_threshold=self.config['peak_gap_threshold'],
            left_extension=self.config['peak_left_extension'],
            right_extension=self.config['peak_right_extension'],
            min_channels=self.config['peak_min_chan'],
            min_area=self.config['peak_min_area'],
            max_duration=self.config['peak_max_duration'])
        strax.sum_waveform(peaks, r, adc_to_pe=self.config['to_pe'])
        peaks = peaks[peaks['dt'] > 0]  # removes strange edge case
        peaks = strax.split_peaks(peaks,
                                  r,
                                  self.config['to_pe'],
                                  min_height=self.config['split_min_height'],
                                  min_ratio=self.config['split_min_ratio'])

        strax.compute_widths(peaks)

        return peaks
コード例 #3
0
    def __call__(self, peaks, records, to_pe, data_type,
                 next_ri=None, do_iterations=1, min_area=0, **kwargs):
        if not len(records) or not len(peaks) or not do_iterations:
            return peaks

        # Build the *args tuple for self.find_split_points from kwargs
        # since numba doesn't support **kwargs
        args_options = []
        for i, (k, value) in enumerate(self.find_split_args_defaults):
            if k in kwargs:
                value = kwargs[k]
            if k == 'threshold':
                # The 'threshold' option is a user-specified function
                value = value(peaks)
            args_options.append(value)
        args_options = tuple(args_options)

        # Check for spurious options
        argnames = [k for k, _ in self.find_split_args_defaults]
        for k in kwargs:
            if k not in argnames:
                raise TypeError(f"Unknown argument {k} for {self.__class__}")

        is_split = np.zeros(len(peaks), dtype=np.bool_)

        split_function = {'peaks': self._split_peaks,
                          'hitlets': self._split_hitlets}
        if data_type not in split_function:
            raise ValueError(f'Data_type "{data_type}" is not supported.')

        new_peaks = split_function[data_type](
            # Numba doesn't like self as argument, but it's ok with functions...
            split_finder=self.find_split_points,
            peaks=peaks,
            is_split=is_split,
            orig_dt=records[0]['dt'],
            min_area=min_area,
            args_options=tuple(args_options),
            result_dtype=peaks.dtype)

        if is_split.sum() != 0:
            # Found new peaks: compute basic properties
            if data_type == 'peaks':
                strax.sum_waveform(new_peaks, records, to_pe)
            elif data_type == 'hitlets':
                # Add record fields here
                strax.update_new_hitlets(new_peaks, records, next_ri, to_pe)

            strax.compute_widths(new_peaks)

            # ... and recurse (if needed)
            new_peaks = self(new_peaks, records, to_pe, data_type, next_ri,
                             do_iterations=do_iterations - 1,
                             min_area=min_area, **kwargs)
            peaks = strax.sort_by_time(np.concatenate([peaks[~is_split],
                                                       new_peaks]))

        return peaks
コード例 #4
0
ファイル: nveto_hitlets.py プロジェクト: tunnell/straxen
    def compute(self, records_nv, start, end):
        # Search again for hits in records:
        hits = strax.find_hits(
            records_nv, min_amplitude=self.config['hit_min_amplitude_nv'])

        # Merge concatenate overlapping  within a channel. This is important
        # in case hits were split by record boundaries. In case we
        # accidentally concatenate two PMT signals we split them later again.
        hits = strax.concat_overlapping_hits(
            hits, self.config['save_outside_hits_nv'],
            self.config['channel_map']['nveto'], start, end)
        hits = strax.sort_by_time(hits)

        # Now convert hits into temp_hitlets including the data field:
        if len(hits):
            nsamples = hits['length'].max()
        else:
            nsamples = 0
        temp_hitlets = np.zeros(
            len(hits), strax.hitlet_with_data_dtype(n_samples=nsamples))

        # Generating hitlets and copying relevant information from hits to hitlets.
        # These hitlets are not stored in the end since this array also contains a data
        # field which we will drop later.
        strax.refresh_hit_to_hitlets(hits, temp_hitlets)
        del hits

        # Get hitlet data and split hitlets:
        strax.get_hitlets_data(temp_hitlets, records_nv, to_pe=self.to_pe)

        temp_hitlets = strax.split_peaks(
            temp_hitlets,
            records_nv,
            self.to_pe,
            data_type='hitlets',
            algorithm='local_minimum',
            min_height=self.config['min_split_nv'],
            min_ratio=self.config['min_split_ratio_nv'])

        # Compute other hitlet properties:
        # We have to loop here 3 times over all hitlets...
        strax.hitlet_properties(temp_hitlets)
        entropy = strax.conditional_entropy(temp_hitlets,
                                            template='flat',
                                            square_data=False)
        temp_hitlets['entropy'][:] = entropy
        strax.compute_widths(temp_hitlets)

        # Remove data field:
        hitlets = np.zeros(len(temp_hitlets), dtype=strax.hitlet_dtype())
        drop_data_field(temp_hitlets, hitlets)

        return hitlets
コード例 #5
0
def test_compute_widths(peak_length, data_length, n_widths):
    """
    Test strax.compute_widths
    """
    peaks = get_filled_peaks(peak_length, data_length, n_widths)

    # Make a copy of peaks to test that they don't remain the same later
    pre_peaks = peaks.copy()
    strax.compute_widths(peaks)

    assert len(pre_peaks) == len(peaks), "Lost peaks"
    if np.sum(peaks['area'] > 0) > 10:
        mess = ("Highly unlikely that from at least 10 positive area peaks "
                "none were able to compute the width")
        assert np.any(peaks['width'] != pre_peaks['width']), mess
コード例 #6
0
    def compute(self, peaklets, lone_hits):
        if self.config['merge_without_s1']:
            peaklets = peaklets[peaklets['type'] != 1]

        if len(peaklets) <= 1:
            return np.zeros(0, dtype=peaklets.dtype)

        gap_thresholds = self.config['s2_merge_gap_thresholds']
        max_gap = gap_thresholds[0][1]
        max_area = 10**gap_thresholds[-1][0]

        if max_gap < 0:
            # Do not merge at all
            return np.zeros(0, dtype=peaklets.dtype)
        else:
            # Max gap and area should be set by the gap thresholds
            # to avoid contradictions
            start_merge_at, end_merge_at = self.get_merge_instructions(
                peaklets['time'],
                strax.endtime(peaklets),
                areas=peaklets['area'],
                types=peaklets['type'],
                gap_thresholds=gap_thresholds,
                max_duration=self.config['s2_merge_max_duration'],
                max_gap=max_gap,
                max_area=max_area,
            )
            merged_s2s = strax.merge_peaks(
                peaklets,
                start_merge_at,
                end_merge_at,
                max_buffer=int(self.config['s2_merge_max_duration'] //
                               np.gcd.reduce(peaklets['dt'])),
            )
            merged_s2s['type'] = 2

            # Updated time and length of lone_hits and sort again:
            lh = np.copy(lone_hits)
            del lone_hits
            lh_time_shift = (lh['left'] - lh['left_integration']) * lh['dt']
            lh['time'] = lh['time'] - lh_time_shift
            lh['length'] = (lh['right_integration'] - lh['left_integration'])
            lh = strax.sort_by_time(lh)
            strax.add_lone_hits(merged_s2s, lh, self.to_pe)

            strax.compute_widths(merged_s2s)

        return merged_s2s
コード例 #7
0
ファイル: plugins.py プロジェクト: XeBoris/straxen
    def compute(self, records):
        r = records
        hits = strax.find_hits(r)  # TODO: Duplicate work
        hits = strax.sort_by_time(hits)

        peaks = strax.find_peaks(hits, to_pe, result_dtype=self.dtype)
        strax.sum_waveform(peaks, r, to_pe)

        peaks = strax.split_peaks(peaks, r, to_pe)

        strax.compute_widths(peaks)

        if self.config['diagnose_sorting']:
            assert np.diff(r['time']).min() >= 0, "Records not sorted"
            assert np.diff(hits['time']).min() >= 0, "Hits not sorted"
            assert np.all(peaks['time'][1:] >= strax.endtime(peaks)[:-1]
                          ), "Peaks not disjoint"

        return peaks
コード例 #8
0
    def compute(self, peaklets):
        if not len(peaklets):
            return peaklets[:0]

        if self.config['s2_merge_max_gap'] < 0:
            # Do not merge at all
            merged_s2s = np.zeros(0, dtype=peaklets.dtype)
        else:
            # Find all groups of peaklets separated by < the gap
            cluster_starts, cluster_stops = strax.find_peak_groups(
                peaklets, self.config['s2_merge_max_gap'])

            start_merge_at, end_merge_at = self.get_merge_instructions(
                peaklets['time'],
                strax.endtime(peaklets),
                areas=peaklets['area'],
                types=peaklets['type'],
                cluster_starts=cluster_starts,
                cluster_stops=cluster_stops,
                max_duration=self.config['s2_merge_max_duration'],
                max_area=self.config['s2_merge_max_area'])

            merged_s2s = strax.merge_peaks(
                peaklets,
                start_merge_at,
                end_merge_at,
                max_buffer=int(self.config['s2_merge_max_duration'] //
                               peaklets['dt'].min()))
            merged_s2s['type'] = 2
            strax.compute_widths(merged_s2s)

        if len(merged_s2s) == 0:
            # Strax does not handle the case of no merged S2s well
            # If there are none in the entire dataset, it will just keep
            # waiting in Peaks forever.
            # Thus, this ugly hack of passing a single fake merged S2
            # in the middle of the chunk, which is removed later
            merged_s2s = np.zeros(1, merged_s2s.dtype)
            q = merged_s2s[0]
            q['type'] = FAKE_MERGED_S2_TYPE
            q['time'] = (peaklets[0]['time'] + strax.endtime(peaklets[0])) / 2
            q['dt'] = 1
        return merged_s2s
コード例 #9
0
    def compute(self, records):
        r = records

        hits = strax.find_hits(r)

        # Remove hits in zero-gain channels
        # they should not affect the clustering!
        hits = hits[self.to_pe[hits['channel']] != 0]

        hits = strax.sort_by_time(hits)

        peaks = strax.find_peaks(
            hits,
            self.to_pe,
            gap_threshold=self.config['peak_gap_threshold'],
            left_extension=self.config['peak_left_extension'],
            right_extension=self.config['peak_right_extension'],
            min_channels=self.config['peak_min_pmts'],
            result_dtype=self.dtype)
        strax.sum_waveform(peaks, r, self.to_pe)

        peaks = strax.split_peaks(
            peaks,
            r,
            self.to_pe,
            min_height=self.config['peak_split_min_height'],
            min_ratio=self.config['peak_split_min_ratio'])

        strax.compute_widths(peaks)

        if self.config['diagnose_sorting']:
            assert np.diff(r['time']).min() >= 0, "Records not sorted"
            assert np.diff(hits['time']).min() >= 0, "Hits not sorted"
            assert np.all(peaks['time'][1:] >= strax.endtime(peaks)[:-1]
                          ), "Peaks not disjoint"

        return peaks
コード例 #10
0
    def compute(self, records, start, end):
        r = records

        hits = strax.find_hits(r, min_amplitude=self.hit_thresholds)

        # Remove hits in zero-gain channels
        # they should not affect the clustering!
        hits = hits[self.to_pe[hits['channel']] != 0]

        hits = strax.sort_by_time(hits)

        # Use peaklet gap threshold for initial clustering
        # based on gaps between hits
        peaklets = strax.find_peaks(
            hits,
            self.to_pe,
            gap_threshold=self.config['peaklet_gap_threshold'],
            left_extension=self.config['peak_left_extension'],
            right_extension=self.config['peak_right_extension'],
            min_channels=self.config['peak_min_pmts'],
            result_dtype=self.dtype_for('peaklets'),
            max_duration=self.config['peaklet_max_duration'],
        )

        # Make sure peaklets don't extend out of the chunk boundary
        # This should be very rare in normal data due to the ADC pretrigger
        # window.
        self.clip_peaklet_times(peaklets, start, end)

        # Get hits outside peaklets, and store them separately.
        # fully_contained is OK provided gap_threshold > extension,
        # which is asserted inside strax.find_peaks.
        is_lone_hit = strax.fully_contained_in(hits, peaklets) == -1
        lone_hits = hits[is_lone_hit]
        strax.integrate_lone_hits(
            lone_hits,
            records,
            peaklets,
            save_outside_hits=(self.config['peak_left_extension'],
                               self.config['peak_right_extension']),
            n_channels=len(self.to_pe))

        # Compute basic peak properties -- needed before natural breaks
        hits = hits[~is_lone_hit]
        # Define regions outside of peaks such that _find_hit_integration_bounds
        # is not extended beyond a peak.
        outside_peaks = self.create_outside_peaks_region(peaklets, start, end)
        strax.find_hit_integration_bounds(
            hits,
            outside_peaks,
            records,
            save_outside_hits=(self.config['peak_left_extension'],
                               self.config['peak_right_extension']),
            n_channels=len(self.to_pe),
            allow_bounds_beyond_records=True,
        )

        # Transform hits to hitlets for naming conventions. A hit refers
        # to the central part above threshold a hitlet to the entire signal
        # including the left and right extension.
        # (We are not going to use the actual hitlet data_type here.)
        hitlets = hits
        del hits

        hitlet_time_shift = (hitlets['left'] -
                             hitlets['left_integration']) * hitlets['dt']
        hitlets['time'] = hitlets['time'] - hitlet_time_shift
        hitlets['length'] = (hitlets['right_integration'] -
                             hitlets['left_integration'])
        hitlets = strax.sort_by_time(hitlets)
        rlinks = strax.record_links(records)

        strax.sum_waveform(peaklets, hitlets, r, rlinks, self.to_pe)

        strax.compute_widths(peaklets)

        # Split peaks using low-split natural breaks;
        # see https://github.com/XENONnT/straxen/pull/45
        # and https://github.com/AxFoundation/strax/pull/225
        peaklets = strax.split_peaks(
            peaklets,
            hitlets,
            r,
            rlinks,
            self.to_pe,
            algorithm='natural_breaks',
            threshold=self.natural_breaks_threshold,
            split_low=True,
            filter_wing_width=self.config['peak_split_filter_wing_width'],
            min_area=self.config['peak_split_min_area'],
            do_iterations=self.config['peak_split_iterations'])

        # Saturation correction using non-saturated channels
        # similar method used in pax
        # see https://github.com/XENON1T/pax/pull/712
        # Cases when records is not writeable for unclear reason
        # only see this when loading 1T test data
        # more details on https://numpy.org/doc/stable/reference/generated/numpy.ndarray.flags.html
        if not r['data'].flags.writeable:
            r = r.copy()

        if self.config['saturation_correction_on']:
            peak_list = peak_saturation_correction(
                r,
                rlinks,
                peaklets,
                hitlets,
                self.to_pe,
                reference_length=self.config['saturation_reference_length'],
                min_reference_length=self.
                config['saturation_min_reference_length'])

            # Compute the width again for corrected peaks
            strax.compute_widths(peaklets, select_peaks_indices=peak_list)

        # Compute tight coincidence level.
        # Making this a separate plugin would
        # (a) doing hitfinding yet again (or storing hits)
        # (b) increase strax memory usage / max_messages,
        #     possibly due to its currently primitive scheduling.
        hit_max_times = np.sort(
            hitlets['time'] +
            hitlets['dt'] * hit_max_sample(records, hitlets) +
            hitlet_time_shift  # add time shift again to get correct maximum
        )
        peaklet_max_times = (
            peaklets['time'] +
            np.argmax(peaklets['data'], axis=1) * peaklets['dt'])
        tight_coincidence_channel = get_tight_coin(
            hit_max_times, hitlets['channel'], peaklet_max_times,
            self.config['tight_coincidence_window_left'],
            self.config['tight_coincidence_window_right'], self.channel_range)

        peaklets['tight_coincidence'] = tight_coincidence_channel

        if self.config['diagnose_sorting'] and len(r):
            assert np.diff(r['time']).min(initial=1) >= 0, "Records not sorted"
            assert np.diff(
                hitlets['time']).min(initial=1) >= 0, "Hits/Hitlets not sorted"
            assert np.all(peaklets['time'][1:] >= strax.endtime(peaklets)[:-1]
                          ), "Peaks not disjoint"

        # Update nhits of peaklets:
        counts = strax.touching_windows(hitlets, peaklets)
        counts = np.diff(counts, axis=1).flatten()
        peaklets['n_hits'] = counts

        return dict(peaklets=peaklets, lone_hits=lone_hits)
コード例 #11
0
    def compute(self, records, start, end):
        r = records

        hits = strax.find_hits(r,
                               min_amplitude=straxen.hit_min_amplitude(
                                   self.config['hit_min_amplitude']))

        # Remove hits in zero-gain channels
        # they should not affect the clustering!
        hits = hits[self.to_pe[hits['channel']] != 0]

        hits = strax.sort_by_time(hits)

        # Use peaklet gap threshold for initial clustering
        # based on gaps between hits
        peaklets = strax.find_peaks(
            hits,
            self.to_pe,
            gap_threshold=self.config['peaklet_gap_threshold'],
            left_extension=self.config['peak_left_extension'],
            right_extension=self.config['peak_right_extension'],
            min_channels=self.config['peak_min_pmts'],
            result_dtype=self.dtype_for('peaklets'))

        # Make sure peaklets don't extend out of the chunk boundary
        # This should be very rare in normal data due to the ADC pretrigger
        # window.
        self.clip_peaklet_times(peaklets, start, end)

        # Get hits outside peaklets, and store them separately.
        # fully_contained is OK provided gap_threshold > extension,
        # which is asserted inside strax.find_peaks.
        lone_hits = hits[strax.fully_contained_in(hits, peaklets) == -1]
        strax.integrate_lone_hits(
            lone_hits,
            records,
            peaklets,
            save_outside_hits=(self.config['peak_left_extension'],
                               self.config['peak_right_extension']),
            n_channels=len(self.to_pe))

        # Compute basic peak properties -- needed before natural breaks
        strax.sum_waveform(peaklets, r, self.to_pe)
        strax.compute_widths(peaklets)

        # Split peaks using low-split natural breaks;
        # see https://github.com/XENONnT/straxen/pull/45
        # and https://github.com/AxFoundation/strax/pull/225
        peaklets = strax.split_peaks(
            peaklets,
            r,
            self.to_pe,
            algorithm='natural_breaks',
            threshold=self.natural_breaks_threshold,
            split_low=True,
            filter_wing_width=self.config['peak_split_filter_wing_width'],
            min_area=self.config['peak_split_min_area'],
            do_iterations=self.config['peak_split_iterations'])

        # Saturation correction using non-saturated channels
        # similar method used in pax
        # see https://github.com/XENON1T/pax/pull/712
        if self.config['saturation_correction_on']:
            peak_saturation_correction(
                r,
                peaklets,
                self.to_pe,
                reference_length=self.config['saturation_reference_length'],
                min_reference_length=self.
                config['saturation_min_reference_length'])

        # Compute tight coincidence level.
        # Making this a separate plugin would
        # (a) doing hitfinding yet again (or storing hits)
        # (b) increase strax memory usage / max_messages,
        #     possibly due to its currently primitive scheduling.
        hit_max_times = np.sort(hits['time'] +
                                hits['dt'] * hit_max_sample(records, hits))
        peaklet_max_times = (
            peaklets['time'] +
            np.argmax(peaklets['data'], axis=1) * peaklets['dt'])
        peaklets['tight_coincidence'] = get_tight_coin(
            hit_max_times, peaklet_max_times,
            self.config['tight_coincidence_window_left'],
            self.config['tight_coincidence_window_right'])

        if self.config['diagnose_sorting'] and len(r):
            assert np.diff(r['time']).min(initial=1) >= 0, "Records not sorted"
            assert np.diff(hits['time']).min(initial=1) >= 0, "Hits not sorted"
            assert np.all(peaklets['time'][1:] >= strax.endtime(peaklets)[:-1]
                          ), "Peaks not disjoint"

        # Update nhits of peaklets:
        counts = strax.touching_windows(hits, peaklets)
        counts = np.diff(counts, axis=1).flatten()
        counts += 1
        peaklets['n_hits'] = counts

        return dict(peaklets=peaklets, lone_hits=lone_hits)
コード例 #12
0
def test_peak_overflow(
    records,
    gap_factor,
    right_extension,
    gap_threshold,
    max_duration,
):
    """
    Test that we handle dt overflows in peaks correctly. To this end, we
        just create some sets of records and copy that set of records
        for a few times. That way we may end up with a very long
        artificial set of hits that can be used in the peak building. By
        setting the peak finding parameters to very strange conditions
        we are able to replicate the behaviour where a peak would become
        so large that it cannot be written out correctly due to integer
        overflow of the dt field,
    :param records: records
    :param gap_factor: to create very extended sets of records, just
        add a factor that can be used to multiply the time field with,
        to more quickly arrive to a very long pulse-train
    :param max_duration: max_duration option for strax.find_peaks
    :param right_extension: option for strax.find_peaks
    :param gap_threshold: option for strax.find_peaks
    :return: None
    """

    # Set this here, no need to test left and right independently
    left_extension = 0
    # Make a single big peak to contain all the records
    peak_dtype = np.zeros(0, strax.peak_dtype()).dtype
    # NB! This is only for before #403, now peaks are int32 so
    # this test would take forever with int32.
    magic_overflow_time = np.iinfo(np.int16).max * peak_dtype['data'].shape[0]

    def retrun_1(x):
        """
        Return 1 for all of the input that can be used as a parameter
            for the splitting in natural breaks
        :param x: any type of array
        :return: ones * len(array)
        """
        ret = np.ones(len(x))
        return ret

    r = records
    if not len(r) or len(r['channel']) == 1:
        # Hard to test integer overflow for empty records or with
        # records only from a single channel
        return

    # Copy the pulse train of the records. We are going to copy the same
    # set of records many times now.
    t_max = strax.endtime(r).max()
    print('make buffer')
    n_repeat = int(1.5 * magic_overflow_time + t_max * gap_factor) // int(
        t_max * gap_factor) + 1
    time_offset = np.linspace(0,
                              1.5 * magic_overflow_time + t_max * gap_factor,
                              n_repeat,
                              dtype=np.int64)
    r_buffer = np.tile(r, n_repeat // len(r) + 1)[:len(time_offset)]
    assert len(r_buffer) == len(time_offset)
    r_buffer['time'] = r_buffer['time'] + time_offset
    assert strax.endtime(
        r_buffer[-1]) - r_buffer['time'].min() > magic_overflow_time
    r = r_buffer.copy()
    del r_buffer
    print(f'Array is {r.nbytes/1e6} MB, good luck')

    # Do peak finding!
    print(f'Find hits')
    hits = strax.find_hits(r, min_amplitude=0)
    assert len(hits)
    hits = strax.sort_by_time(hits)

    # Dummy to_pe
    to_pe = np.ones(max(r['channel']) + 1)

    try:
        print('Find peaks')
        # Find peaks, we might end up with negative dt here!
        p = strax.find_peaks(
            hits,
            to_pe,
            gap_threshold=gap_threshold,
            left_extension=left_extension,
            right_extension=right_extension,
            max_duration=max_duration,
            # Due to these settings, we will start merging
            # whatever strax can get its hands on
            min_area=0.,
            min_channels=1,
        )
    except AssertionError as e:
        if not gap_threshold > left_extension + right_extension:
            print(f'Great, we are getting the assertion statement for the '
                  f'incongruent extensions')
            return
        elif not left_extension + max_duration + right_extension < magic_overflow_time:
            # Ending up here is the ultimate goal of the tests. This
            # means we are hitting github.com/AxFoundation/strax/issues/397
            print(f'Great, the test worked, we are getting the assertion '
                  f'statement for the int overflow')
            return
        else:
            # The error is caused by something else, we need to re-raise
            raise e

    print(f'Peaklet array is {p.nbytes / 1e6} MB, good luck')
    if len(p) == 0:
        print(f'rec length {len(r)}')
    assert len(p)
    assert np.all(p['dt'] > 0)

    # Double check that this error should have been raised.
    if not gap_threshold > left_extension + right_extension:
        raise ValueError(f'No assertion error raised! Working with'
                         f'{gap_threshold} {left_extension + right_extension}')

    # Compute basics
    hits = strax.find_hits(r, np.ones(10000))
    hits['left_integration'] = hits['left']
    hits['right_integration'] = hits['right']
    rlinks = strax.record_links(r)
    strax.sum_waveform(p, hits, r, rlinks, to_pe)
    strax.compute_widths(p)

    try:
        print('Split peaks')
        peaklets = strax.split_peaks(p,
                                     hits,
                                     r,
                                     rlinks,
                                     to_pe,
                                     algorithm='natural_breaks',
                                     threshold=retrun_1,
                                     split_low=True,
                                     filter_wing_width=70,
                                     min_area=0,
                                     do_iterations=2)
    except AssertionError as e:
        if not left_extension + max_duration + right_extension < magic_overflow_time:
            # Ending up here is the ultimate goal of the tests. This
            # means we are hitting github.com/AxFoundation/strax/issues/397
            print(f'Great, the test worked, we are getting the assertion '
                  f'statement for the int overflow')
            raise RuntimeError(
                'We were not properly warned of the imminent peril we are '
                'facing. This error means that the peak_finding is not '
                'protected against integer overflow in the dt field. Where is '
                'our white knight in shining armour to protected from this '
                'imminent doom:\n'
                'github.com/AxFoundation/strax/issues/397') from e
        # We failed for another reason, we need to re-raise
        raise e

    assert len(peaklets)
    assert len(peaklets) <= len(r)
    # Integer overflow will manifest itself here again:
    assert np.all(peaklets['dt'] > 0)