Esempio n. 1
0
def test_nveto_event_building(hitlets,
                              coincidence):
    """
    In this test we test the code of
    straxen.plugins.veto_evnets.find_veto_events
    """
    hitlets = strax.sort_by_time(hitlets)

    event_intervals = straxen.plugins.nveto_recorder.find_coincidence(hitlets,
                                                                      coincidence,
                                                                      300)

    mes = 'Found overlapping events returned by "coincidence".'
    assert np.all(event_intervals['endtime'][:-1] - event_intervals['time'][1:] < 0), mes

    # Get hits which touch the event window, this can lead to ambiguities
    # which we will solve subsequently.
    hitlets_ids_in_event = strax.touching_windows(hitlets, event_intervals)
    # First check for empty events, since ambiguity check will merge intervals:
    mes = f'Found an empty event without any hitlets: {hitlets_ids_in_event}.'
    assert np.all(np.diff(hitlets_ids_in_event) != 0), mes

    # Solve ambiguities (merge overlapping intervals)
    interval_truth = _test_ambiguity(hitlets_ids_in_event)
    hitlets_ids_in_event = straxen.plugins.veto_events._solve_ambiguity(hitlets_ids_in_event)

    mes = f'Found ambigious event for {hitlets_ids_in_event} with turth {interval_truth}'
    assert np.all(hitlets_ids_in_event == interval_truth), mes

    # Check if events satisfy the coincidence requirement:
    mes = f'Found an event with less than 3 hitelts. {hitlets_ids_in_event}'
    assert np.all(np.diff(hitlets_ids_in_event) >= coincidence), mes
Esempio n. 2
0
def test_time_selection(d, second_time, second_dt):
    """
    Test that both 'touching' and 'fully_contained' give the same
        results as 'strax.fully_contained_in' and
        'strax.touching_windows' respectively
    :param d: test-data from get_dummy_data
    :param second_dt: the ofset w.r.t. the first
    :return: None
    """
    container = np.zeros(1, dtype=strax.time_fields)
    container['time'] = second_time
    container['endtime'] = second_time + second_dt
    time_range = (second_time, second_time + second_dt)

    # Fully contained in
    selected_data = strax.apply_selection(d,
                                          time_range=time_range,
                                          time_selection='fully_contained')
    contained = strax.fully_contained_in(d, container)
    selected_data_fc = d[contained != -1]
    assert np.all(selected_data == selected_data_fc)

    # TW
    selected_data = strax.apply_selection(d,
                                          time_range=time_range,
                                          time_selection='touching')
    windows = strax.touching_windows(d, container, window=0)
    assert np.diff(windows[0]) == len(selected_data)
    if len(windows) and len(selected_data):
        assert np.all(selected_data == d[windows[0][0]:windows[0][1]])
Esempio n. 3
0
def _plot_truth(data, start_end, t_range):
    plt.title('Instructions')
    for pk, pi in enumerate(
            range(*strax.touching_windows(data, start_end)[0])):
        tpeak = data[pi]
        hatch_cycle = ['/', '*', '+', '|']
        _t_range = tpeak[['time', 'endtime']]
        x = np.array(list(_t_range))
        y = tpeak['n_pe'] / np.diff(x)
        ct = tpeak['t_mean_photon']
        stype = tpeak['type']
        plt.gca()
        plt.fill_between([
            x[0] / 1e9,
            ct / 1e9,
            x[-1] / 1e9,
        ], [0, 0, 0], [0, 2 * y[0], 0],
                         color={
                             1: 'blue',
                             2: 'green',
                             0: 'gray',
                             6: 'orange',
                             4: 'purple',
                         }[stype],
                         label=f'Peak S{stype}. {tpeak["n_pe"]} PE',
                         alpha=0.4,
                         hatch=hatch_cycle[pk])
        plt.ylabel('Intensity [PE/ns]')
    for t in t_range:
        axvline(t / 1e9, label=f't = {t}')

    plt.legend(loc='lower left', fontsize='x-small')
Esempio n. 4
0
    def compute_ambience(self, lone_hits, peaks, current_peak):
        # 1. Initialization
        result = np.zeros(len(current_peak), self.dtype)

        # 2. Define time window for each peak, we will find small peaks & lone hits within these time windows
        roi = np.zeros(len(current_peak), dtype=strax.time_fields)
        roi['time'] = current_peak['center_time'] - self.config[
            'ambience_time_window_backward']
        roi['endtime'] = current_peak['center_time']

        # 3. Calculate number and area sum of lonehits before a peak
        touching_windows = strax.touching_windows(lone_hits, roi)
        # Calculating ambience
        self.lonehits_ambience(current_peak, lone_hits, touching_windows,
                               result['n_lh_before'], result['s_lh_before'],
                               self.config['ambience_divide_t'])

        # 4. Calculate number and area sum of small S0, S1, S2 before a peak
        radius = -1
        for stype, area in zip([0, 1, 2],
                               self.config['ambience_area_parameters']):
            mask_pre = (peaks['type'] == stype) & (peaks['area'] < area)
            touching_windows = strax.touching_windows(peaks[mask_pre], roi)
            # Calculating ambience
            self.peaks_ambience(current_peak, peaks[mask_pre],
                                touching_windows, radius,
                                result[f'n_s{stype}_before'],
                                result[f's_s{stype}_before'],
                                self.config['ambience_divide_t'],
                                self.config['ambience_divide_r'])

        # 5. Calculate number and area sum of small S2 near(in (x,y) space) a S2 peak
        mask_pre = (peaks['type'] == 2) & (
            peaks['area'] < self.config['ambience_area_parameters'][2])
        touching_windows = strax.touching_windows(peaks[mask_pre], roi)
        # Calculating ambience
        self.peaks_ambience(current_peak, peaks[mask_pre], touching_windows,
                            self.config['ambient_radius'], result['n_s2_near'],
                            result['s_s2_near'],
                            self.config['ambience_divide_t'],
                            self.config['ambience_divide_r'])

        # 6. Set time and endtime for peaks
        result['time'] = current_peak['time']
        result['endtime'] = strax.endtime(current_peak)
        return result
Esempio n. 5
0
    def set_result_for_veto(self, result_buffer: np.ndarray,
                            event_window: np.ndarray,
                            veto_intervals: np.ndarray,
                            veto_name: str) -> None:
        """
        Fill the result buffer inplace. Goal is to find vetos with
        <veto_name> that are either during, before or after the
         current event_window.

        :param result_buffer: The buffer to fill inplace
        :param event_window: start/stop boundaries of the event to consider.
            Should be an array with ['time'] and ['endtime'] which can be
            based on event start/end times or S1/S2 times
        :param veto_intervals: veto intervals datatype
        :param veto_name: The name of the veto to fill the result buffer for
        :return: Nothing, results are filled in place
        """
        # Set defaults to be some very long time
        result_buffer[
            f'time_to_previous_{veto_name}'] = self.time_no_aqmon_veto_found
        result_buffer[
            f'time_to_next_{veto_name}'] = self.time_no_aqmon_veto_found

        selected_intervals = veto_intervals[veto_intervals['veto_type'] ==
                                            f'{veto_name}_veto']
        if not len(selected_intervals):
            return

        vetos_during_event = strax.touching_windows(selected_intervals,
                                                    event_window)

        # Figure out the vetos *during* an event
        for event_i, veto_window in enumerate(vetos_during_event):
            if veto_window[1] - veto_window[0]:
                vetos_in_window = selected_intervals[
                    veto_window[0]:veto_window[1]].copy()
                starts = np.clip(vetos_in_window['time'],
                                 event_window[event_i]['time'],
                                 event_window[event_i]['endtime'])
                stops = np.clip(vetos_in_window['endtime'],
                                event_window[event_i]['time'],
                                event_window[event_i]['endtime'])
                # Now sum over all the stops-starts that are clipped
                # within the duration of the event
                result_buffer[event_i][f'veto_{veto_name}_overlap'] = np.sum(
                    stops - starts)

        # Find the next and previous veto's
        times_to_prev, times_to_next = self.abs_time_to_prev_next(
            event_window, selected_intervals)
        mask_prev = times_to_prev > 0
        result_buffer[f'time_to_previous_{veto_name}'][
            mask_prev] = times_to_prev[mask_prev]

        max_next = times_to_next > 0
        result_buffer[f'time_to_next_{veto_name}'][max_next] = times_to_next[
            max_next]
Esempio n. 6
0
def test_touching_windows(things, containers, window):
    result = strax.touching_windows(things, containers, window=window)
    assert len(result) == len(containers)
    if len(result):
        assert np.all((0 <= result) & (result <= len(things)))

    for c_i, container in enumerate(containers):
        i_that_touch = np.arange(*result[c_i])
        for t_i, thing in enumerate(things):
            if (strax.endtime(thing) <= container['time'] - window
                    or thing['time'] >= strax.endtime(container) + window):
                assert t_i not in i_that_touch
            else:
                assert t_i in i_that_touch
Esempio n. 7
0
def replace_merged(orig, merge):
    """Return sorted array of 'merge' and members of 'orig' that do not touch
    any of merge
    :param orig: Array of interval-like objects (e.g. peaks)
    :param merge: Array of interval-like objects (e.g. peaks)
    """
    if not len(merge):
        return orig

    skip_windows = strax.touching_windows(orig, merge)
    skip_n = np.diff(skip_windows, axis=1).sum()
    result = np.zeros(len(orig) - skip_n + len(merge), dtype=orig.dtype)
    _replace_merged(result, orig, merge, skip_windows)
    return result
Esempio n. 8
0
def find_veto_events(
    hitlets: np.ndarray,
    coincidence_level: int,
    resolving_time: int,
    left_extension: int,
    event_number_key: str = 'event_number_nv',
    n_channel: int = 120,
) -> ty.Tuple[np.ndarray, np.ndarray]:
    """
    Function which find the veto events as a nfold concidence in a given
    resolving time window. All hitlets which touch the event window
    contribute.

    :param hitlets: Hitlets which shall be used for event creation.
    :param coincidence_level: int, coincidence level.
    :param resolving_time: int, resolving window for coincidence in ns.
    :param left_extension: int, left event extension in ns.
    :param event_number_key: str, field name for the event number
    :param n_channel: int, number of channels in detector.
    :returns: events, hitelt_ids_per_event
    """
    # Find intervals which satisfy requirement:
    intervals = straxen.plugins.nveto_recorder.coincidence(
        hitlets,
        coincidence_level,
        resolving_time,
        left_extension,
    )

    # Create some preliminary events:
    event_intervals = np.zeros(len(intervals), dtype=strax.time_fields)
    event_intervals['time'] = intervals[:, 0]
    event_intervals['endtime'] = intervals[:, 1]

    # Find all hitlets which touch the coincidence windows:
    # (we cannot use fully_contained in here since some muon signals
    # may be larger than 300 ns)
    hitlets_ids_in_event = strax.touching_windows(hitlets, event_intervals)

    # For some rare cases long signals may touch two intervals, in that
    # case we merge the intervals in the subsequent function:
    hitlets_ids_in_event = _solve_ambiguity(hitlets_ids_in_event)

    # Now we can create the veto events:
    events = np.zeros(len(hitlets_ids_in_event),
                      dtype=veto_event_dtype(event_number_key, n_channel))
    _make_event(hitlets, hitlets_ids_in_event, events)
    return events, hitlets_ids_in_event
Esempio n. 9
0
def test_create_outside_peaks_region(time):
    time = np.sort(time)
    time_intervals = np.zeros(len(time)//2, strax.time_dt_fields)
    time_intervals['time'] = time[::2]
    time_intervals['length'] = time[1::2] - time[::2]
    time_intervals['dt'] = 1

    st = straxen.contexts.demo()
    p = st.get_single_plugin('0', 'peaklets')
    outside = p.create_outside_peaks_region(time_intervals, 0, np.max(time))

    touching = strax.touching_windows(outside, time_intervals, window=0)

    for tw in touching:
        print(tw)
        assert np.diff(tw) == 0, 'Intervals overlap although they should not!'
Esempio n. 10
0
def get_deepwindows(windows, peaks_a, peaks_b, matching_fuzz):
    """Do it the non-numba way, should work as well but is slower"""
    _deep_windows = []
    for l1, r1 in windows:
        this_window = [-1, -1]
        if r1 - l1:
            match = strax.touching_windows(peaks_a,
                                           peaks_b[l1:r1],
                                           window=matching_fuzz)
            if len(match) > 0:
                this_window = match[0]
            else:
                pass
        _deep_windows.append(this_window)
    deep_windows = np.array(_deep_windows, dtype=(np.int64, np.int64))
    return deep_windows
Esempio n. 11
0
    def compute(self, truth, events):
        unique_numbers = np.unique(truth['event_number'])
        res = np.zeros(len(unique_numbers), self.dtype)
        res['truth_number'] = unique_numbers
        fill_start_end(truth, res)
        assert np.all(res['endtime'] > res['time'])
        assert np.all(np.diff(res['time']) > 0)

        tw = strax.touching_windows(events, res)
        tw_start = tw[:, 0]
        tw_end = tw[:, 1] - 1
        found = tw_end - tw_start > 0
        diff = np.diff(tw, axis=1)[:, 0]

        res['start_match'][found] = events[tw_start[found]]['event_number']
        res['end_match'][found] = events[tw_end[found]]['event_number']
        res['outcome'] = self.outcomes(diff)
        res['start_match'][~found] = pema.matching.INT_NAN
        res['end_match'][~found] = pema.matching.INT_NAN
        return res
Esempio n. 12
0
    def compute(self, peaks):
        windows = strax.touching_windows(peaks,
                                         peaks,
                                         window=self.config['nearby_window'])
        n_left, n_tot = self.find_n_competing(
            peaks, windows, fraction=self.config['min_area_fraction'])

        t_to_prev_peak = (np.ones(len(peaks), dtype=np.int64) *
                          self.config['peak_max_proximity_time'])
        t_to_prev_peak[1:] = peaks['time'][1:] - peaks['endtime'][:-1]

        t_to_next_peak = t_to_prev_peak.copy()
        t_to_next_peak[:-1] = peaks['time'][1:] - peaks['endtime'][:-1]

        return dict(n_competing=n_tot,
                    n_competing_left=n_left,
                    t_to_prev_peak=t_to_prev_peak,
                    t_to_next_peak=t_to_next_peak,
                    t_to_nearest_peak=np.minimum(t_to_prev_peak,
                                                 t_to_next_peak))
Esempio n. 13
0
def test_deepwindows(data_length, truth_length, max_duration, n_data_types,
                     n_truth_types, matching_fuzz):
    data, truth = _create_dummy_records(
        data_length,
        n_data_types,
        truth_length,
        n_truth_types,
        max_duration,
    )
    allpeaks1 = truth.copy()
    allpeaks2 = data.copy()

    windows = strax.touching_windows(allpeaks1,
                                     allpeaks2,
                                     window=matching_fuzz)
    deepwindows_simple = get_deepwindows(windows, allpeaks1, allpeaks2,
                                         matching_fuzz)
    deepwindows_numba = pema.matching.get_deepwindows(windows, allpeaks1,
                                                      allpeaks2, matching_fuzz)
    if len(deepwindows_simple) > 0 and len(deepwindows_numba) > 0:
        assert np.all(deepwindows_simple == deepwindows_numba)
    assert len(deepwindows_simple) == len(deepwindows_numba)
Esempio n. 14
0
def test_tag_peaks(peaks, veto_intervals):
    peaks_in_vetos = strax.touching_windows(peaks, veto_intervals)

    tags = np.zeros(len(peaks))
    straxen.plugins.peak_processing.tag_peaks(tags, peaks_in_vetos, 1)

    # Make an additional dummy array to test if function worked:
    dtype = []
    dtype += strax.time_dt_fields
    dtype += [(('peak tag', 'tag'), np.int8)]
    tagged_peaks = np.zeros(len(peaks), dtype)
    tagged_peaks['time'] = peaks['time']
    tagged_peaks['length'] = peaks['length']
    tagged_peaks['dt'] = 1
    tagged_peaks['tag'] = tags

    split_tagged_peaks = strax.split_touching_windows(tagged_peaks,
                                                      veto_intervals)

    for split_peaks in split_tagged_peaks:
        if not len(split_peaks):
            continue
        assert np.all(split_peaks['tag'] ==
                      1), f'Not all peaks were tagged properly {split_peaks}'
Esempio n. 15
0
    def compute(self, records, start, end):
        r = records

        hits = strax.find_hits(r, min_amplitude=self.hit_thresholds)

        # Remove hits in zero-gain channels
        # they should not affect the clustering!
        hits = hits[self.to_pe[hits['channel']] != 0]

        hits = strax.sort_by_time(hits)

        # Use peaklet gap threshold for initial clustering
        # based on gaps between hits
        peaklets = strax.find_peaks(
            hits,
            self.to_pe,
            gap_threshold=self.config['peaklet_gap_threshold'],
            left_extension=self.config['peak_left_extension'],
            right_extension=self.config['peak_right_extension'],
            min_channels=self.config['peak_min_pmts'],
            result_dtype=self.dtype_for('peaklets'),
            max_duration=self.config['peaklet_max_duration'],
        )

        # Make sure peaklets don't extend out of the chunk boundary
        # This should be very rare in normal data due to the ADC pretrigger
        # window.
        self.clip_peaklet_times(peaklets, start, end)

        # Get hits outside peaklets, and store them separately.
        # fully_contained is OK provided gap_threshold > extension,
        # which is asserted inside strax.find_peaks.
        is_lone_hit = strax.fully_contained_in(hits, peaklets) == -1
        lone_hits = hits[is_lone_hit]
        strax.integrate_lone_hits(
            lone_hits,
            records,
            peaklets,
            save_outside_hits=(self.config['peak_left_extension'],
                               self.config['peak_right_extension']),
            n_channels=len(self.to_pe))

        # Compute basic peak properties -- needed before natural breaks
        hits = hits[~is_lone_hit]
        # Define regions outside of peaks such that _find_hit_integration_bounds
        # is not extended beyond a peak.
        outside_peaks = self.create_outside_peaks_region(peaklets, start, end)
        strax.find_hit_integration_bounds(
            hits,
            outside_peaks,
            records,
            save_outside_hits=(self.config['peak_left_extension'],
                               self.config['peak_right_extension']),
            n_channels=len(self.to_pe),
            allow_bounds_beyond_records=True,
        )

        # Transform hits to hitlets for naming conventions. A hit refers
        # to the central part above threshold a hitlet to the entire signal
        # including the left and right extension.
        # (We are not going to use the actual hitlet data_type here.)
        hitlets = hits
        del hits

        hitlet_time_shift = (hitlets['left'] -
                             hitlets['left_integration']) * hitlets['dt']
        hitlets['time'] = hitlets['time'] - hitlet_time_shift
        hitlets['length'] = (hitlets['right_integration'] -
                             hitlets['left_integration'])
        hitlets = strax.sort_by_time(hitlets)
        rlinks = strax.record_links(records)

        strax.sum_waveform(peaklets, hitlets, r, rlinks, self.to_pe)

        strax.compute_widths(peaklets)

        # Split peaks using low-split natural breaks;
        # see https://github.com/XENONnT/straxen/pull/45
        # and https://github.com/AxFoundation/strax/pull/225
        peaklets = strax.split_peaks(
            peaklets,
            hitlets,
            r,
            rlinks,
            self.to_pe,
            algorithm='natural_breaks',
            threshold=self.natural_breaks_threshold,
            split_low=True,
            filter_wing_width=self.config['peak_split_filter_wing_width'],
            min_area=self.config['peak_split_min_area'],
            do_iterations=self.config['peak_split_iterations'])

        # Saturation correction using non-saturated channels
        # similar method used in pax
        # see https://github.com/XENON1T/pax/pull/712
        # Cases when records is not writeable for unclear reason
        # only see this when loading 1T test data
        # more details on https://numpy.org/doc/stable/reference/generated/numpy.ndarray.flags.html
        if not r['data'].flags.writeable:
            r = r.copy()

        if self.config['saturation_correction_on']:
            peak_list = peak_saturation_correction(
                r,
                rlinks,
                peaklets,
                hitlets,
                self.to_pe,
                reference_length=self.config['saturation_reference_length'],
                min_reference_length=self.
                config['saturation_min_reference_length'])

            # Compute the width again for corrected peaks
            strax.compute_widths(peaklets, select_peaks_indices=peak_list)

        # Compute tight coincidence level.
        # Making this a separate plugin would
        # (a) doing hitfinding yet again (or storing hits)
        # (b) increase strax memory usage / max_messages,
        #     possibly due to its currently primitive scheduling.
        hit_max_times = np.sort(
            hitlets['time'] +
            hitlets['dt'] * hit_max_sample(records, hitlets) +
            hitlet_time_shift  # add time shift again to get correct maximum
        )
        peaklet_max_times = (
            peaklets['time'] +
            np.argmax(peaklets['data'], axis=1) * peaklets['dt'])
        tight_coincidence_channel = get_tight_coin(
            hit_max_times, hitlets['channel'], peaklet_max_times,
            self.config['tight_coincidence_window_left'],
            self.config['tight_coincidence_window_right'], self.channel_range)

        peaklets['tight_coincidence'] = tight_coincidence_channel

        if self.config['diagnose_sorting'] and len(r):
            assert np.diff(r['time']).min(initial=1) >= 0, "Records not sorted"
            assert np.diff(
                hitlets['time']).min(initial=1) >= 0, "Hits/Hitlets not sorted"
            assert np.all(peaklets['time'][1:] >= strax.endtime(peaklets)[:-1]
                          ), "Peaks not disjoint"

        # Update nhits of peaklets:
        counts = strax.touching_windows(hitlets, peaklets)
        counts = np.diff(counts, axis=1).flatten()
        peaklets['n_hits'] = counts

        return dict(peaklets=peaklets, lone_hits=lone_hits)
Esempio n. 16
0
def match_peaks(allpeaks1, allpeaks2, matching_fuzz=0, unknown_types=(0, )):
    """
    Perform peak matching between two numpy record arrays with fields:
        time, endtime (or dt and length), id, type, area
    If a peak is split into many fragments (e.g. two close peaks split
    into three peaks), the results are unreliable and depend on which
    peak set is peaks1 and which is peaks2.

    Returns (allpeaks1, allpeaks2), each with two extra fields:
    outcome, matched_to:
        outcome: Can be one of:
            found:  Peak was matched 1-1 between peaks1 and peaks2 (type agrees,
             no other peaks in range).
                    Note that area, widths, etc. can still be quite different!
            missed: Peak is not present in the other list
            misid_as_XX: Peak is present in the other list, but has type XX
            merged: Peak is merged with another peak in the other list, the new
                'super-peak' has the same type
            merged_to_XX: As above, but 'super-peak' has type XX
            split: Peak is split in the other list, but more than one fragment
                has the same type as the parent.
            chopped: As split, but one or several fragments are unclassified,
                exactly one has the correct type.
            split_and_unclassified: As split, but all fragments are unclassified
                in the other list.
            split_and_misid: As split, but at least one fragment has a different
                peak type.
        matched_to: id of matching in *peak* in the other list if outcome is found
            or misid_as_XX, INT_NAN otherwise.
    """
    # Check required fields
    for i, d in enumerate((allpeaks1, allpeaks2)):
        assert hasattr(d, 'dtype'), 'Cannot work with non-numpy arrays'
        m = ''
        for k in ('area', 'type', 'id'):
            if k not in d.dtype.names:
                m += f'Argument {i} misses field {k} required for matching \n'
        if m != '':
            raise ValueError(m)
    log.debug('Appending fields')
    # Append id, outcome and matched_to fields
    allpeaks1 = append_fields(
        allpeaks1,
        ('outcome', 'matched_to'),
        (np.array(['missed'] * len(allpeaks1), dtype=OUTCOME_DTYPE),
         INT_NAN * np.ones(len(allpeaks1), dtype=np.int64)),
        dtypes=(OUTCOME_DTYPE, np.int64),
    )
    allpeaks2 = append_fields(
        allpeaks2,
        ('outcome', 'matched_to'),
        (np.array(['missed'] * len(allpeaks2), dtype=OUTCOME_DTYPE),
         INT_NAN * np.ones(len(allpeaks2), dtype=np.int64)),
        dtypes=(OUTCOME_DTYPE, np.int64),
    )

    log.debug('Getting windows')
    windows = strax.touching_windows(allpeaks1,
                                     allpeaks2,
                                     window=matching_fuzz)
    deep_windows = np.empty((0, 2), dtype=(np.int64, np.int64))
    # Each of the windows projects to a set of peaks in allpeaks2
    # belonging to allpeaks1. We also need to go the reverse way, which
    # I'm calling deep_windows below.
    if len(windows):
        # The order matters!! We matched allpeaks1->allpeaks2 so we now should match allpeaks2->allpeaks1
        deep_windows = get_deepwindows(windows, allpeaks2, allpeaks1,
                                       matching_fuzz)
        log.debug(
            f'Got {len(deep_windows)} deep windows and {len(windows)} windows')

    if not len(windows):
        # patch for empty data
        deep_windows = np.array([[-1, -1]], dtype=(np.int64, np.int64))
    assert np.shape(np.shape(deep_windows))[0] == 2, (
        f'deep_windows shape is wrong {np.shape(deep_windows)}\n{deep_windows}'
    )

    # make array for numba
    unknown_types = np.array(unknown_types)

    # Inner matching
    _match_peaks(allpeaks1, allpeaks2, windows, deep_windows, unknown_types)
    return allpeaks1, allpeaks2
Esempio n. 17
0
    def compute(self, records, start, end):
        r = records

        hits = strax.find_hits(r,
                               min_amplitude=straxen.hit_min_amplitude(
                                   self.config['hit_min_amplitude']))

        # Remove hits in zero-gain channels
        # they should not affect the clustering!
        hits = hits[self.to_pe[hits['channel']] != 0]

        hits = strax.sort_by_time(hits)

        # Use peaklet gap threshold for initial clustering
        # based on gaps between hits
        peaklets = strax.find_peaks(
            hits,
            self.to_pe,
            gap_threshold=self.config['peaklet_gap_threshold'],
            left_extension=self.config['peak_left_extension'],
            right_extension=self.config['peak_right_extension'],
            min_channels=self.config['peak_min_pmts'],
            result_dtype=self.dtype_for('peaklets'))

        # Make sure peaklets don't extend out of the chunk boundary
        # This should be very rare in normal data due to the ADC pretrigger
        # window.
        self.clip_peaklet_times(peaklets, start, end)

        # Get hits outside peaklets, and store them separately.
        # fully_contained is OK provided gap_threshold > extension,
        # which is asserted inside strax.find_peaks.
        lone_hits = hits[strax.fully_contained_in(hits, peaklets) == -1]
        strax.integrate_lone_hits(
            lone_hits,
            records,
            peaklets,
            save_outside_hits=(self.config['peak_left_extension'],
                               self.config['peak_right_extension']),
            n_channels=len(self.to_pe))

        # Compute basic peak properties -- needed before natural breaks
        strax.sum_waveform(peaklets, r, self.to_pe)
        strax.compute_widths(peaklets)

        # Split peaks using low-split natural breaks;
        # see https://github.com/XENONnT/straxen/pull/45
        # and https://github.com/AxFoundation/strax/pull/225
        peaklets = strax.split_peaks(
            peaklets,
            r,
            self.to_pe,
            algorithm='natural_breaks',
            threshold=self.natural_breaks_threshold,
            split_low=True,
            filter_wing_width=self.config['peak_split_filter_wing_width'],
            min_area=self.config['peak_split_min_area'],
            do_iterations=self.config['peak_split_iterations'])

        # Saturation correction using non-saturated channels
        # similar method used in pax
        # see https://github.com/XENON1T/pax/pull/712
        if self.config['saturation_correction_on']:
            peak_saturation_correction(
                r,
                peaklets,
                self.to_pe,
                reference_length=self.config['saturation_reference_length'],
                min_reference_length=self.
                config['saturation_min_reference_length'])

        # Compute tight coincidence level.
        # Making this a separate plugin would
        # (a) doing hitfinding yet again (or storing hits)
        # (b) increase strax memory usage / max_messages,
        #     possibly due to its currently primitive scheduling.
        hit_max_times = np.sort(hits['time'] +
                                hits['dt'] * hit_max_sample(records, hits))
        peaklet_max_times = (
            peaklets['time'] +
            np.argmax(peaklets['data'], axis=1) * peaklets['dt'])
        peaklets['tight_coincidence'] = get_tight_coin(
            hit_max_times, peaklet_max_times,
            self.config['tight_coincidence_window_left'],
            self.config['tight_coincidence_window_right'])

        if self.config['diagnose_sorting'] and len(r):
            assert np.diff(r['time']).min(initial=1) >= 0, "Records not sorted"
            assert np.diff(hits['time']).min(initial=1) >= 0, "Hits not sorted"
            assert np.all(peaklets['time'][1:] >= strax.endtime(peaklets)[:-1]
                          ), "Peaks not disjoint"

        # Update nhits of peaklets:
        counts = strax.touching_windows(hits, peaklets)
        counts = np.diff(counts, axis=1).flatten()
        counts += 1
        peaklets['n_hits'] = counts

        return dict(peaklets=peaklets, lone_hits=lone_hits)
Esempio n. 18
0
    def compute_shadow(self, peaks, current_peak):
        # 1. Define time window for each peak, we will find previous peaks within these time windows
        roi_shadow = np.zeros(len(current_peak), dtype=strax.time_fields)
        roi_shadow['time'] = current_peak['center_time'] - self.config[
            'shadow_time_window_backward']
        roi_shadow['endtime'] = current_peak['center_time']

        # 2. Calculate S2 position shadow, S2 time shadow, and S1 time shadow
        result = np.zeros(len(current_peak), self.dtype)
        for key in ['s2_position_shadow', 's2_time_shadow', 's1_time_shadow']:
            is_position = 'position' in key
            type_str = key.split('_')[0]
            stype = 2 if 's2' in key else 1
            mask_pre = (peaks['type'] == stype) & (
                peaks['area'] > self.config['shadow_threshold'][key])
            split_peaks = strax.touching_windows(peaks[mask_pre], roi_shadow)
            array = np.zeros(len(current_peak), np.dtype(self.shadowdtype))

            # Initialization
            array['x'] = np.nan
            array['y'] = np.nan
            array['dt'] = self.config['shadow_time_window_backward']
            # The default value for shadow is set to be the lowest possible value
            if 'time' in key:
                array['shadow'] = self.config['shadow_threshold'][key] * array[
                    'dt']**self.config['shadow_deltatime_exponent']
            else:
                array['shadow'] = 0
            array['nearest_dt'] = self.config['shadow_time_window_backward']

            # Calculating shadow, the Major of the plugin. Only record the previous peak casting the largest shadow
            if len(current_peak):
                self.peaks_shadow(
                    current_peak, peaks[mask_pre], split_peaks,
                    self.config['shadow_deltatime_exponent'], array,
                    is_position,
                    self.getsigma(self.config['shadow_sigma_and_baseline'],
                                  current_peak['area']))

            # Fill results
            names = ['shadow', 'dt']
            if 's2' in key:  # Only previous S2 peaks have (x,y)
                names += ['x', 'y']
            if 'time' in key:  # Only time shadow gives the nearest large peak
                names += ['nearest_dt']
            for name in names:
                if name == 'nearest_dt':
                    result[f'{name}_{type_str}'] = array[name]
                else:
                    result[f'{name}_{key}'] = array[name]

        distance = np.sqrt(
            (result[f'x_s2_position_shadow'] - current_peak['x'])**2 +
            (result[f'y_s2_position_shadow'] - current_peak['y'])**2)
        # If distance is NaN, set largest distance
        distance = np.where(np.isnan(distance), 2 * straxen.tpc_r, distance)
        # HalfCauchy PDF when calculating S2 position shadow
        result['pdf_s2_position_shadow'] = halfcauchy.pdf(
            distance,
            scale=self.getsigma(self.config['shadow_sigma_and_baseline'],
                                current_peak['area']))

        # 6. Set time and endtime for peaks
        result['time'] = current_peak['time']
        result['endtime'] = strax.endtime(current_peak)
        return result