Exemplo n.º 1
0
def _get_hitlets_data(hitlets, records, to_pe):
    rranges = _touching_windows(records['time'], strax.endtime(records),
                                hitlets['time'], strax.endtime(hitlets))

    for i, h in enumerate(hitlets):
        recorded_samples_offset = 0
        n_recorded_samples = 0
        is_first_record = True
        for ind, r_ind in enumerate(range(rranges[i][0], rranges[i][1])):
            r = records[r_ind]
            if r['channel'] != h['channel']:
                continue

            (r_start, r_end), (h_start, h_end) = strax.overlap_indices(
                r['time'] // r['dt'], r['length'], h['time'] // h['dt'],
                h['length'])

            if (r_end - r_start) == 0 and (h_end - h_start) == 0:
                # _touching_windows will give a range of overlapping records  with hitlet
                # independent of channel. Hence, in rare cases it might be that a record of
                # channel A touches with a hitlet of channel B which  starts before the previous
                # record of channel b. Hence we get one non-overlapping record in channel b.
                continue

            if is_first_record:
                # We need recorded_samples_offset because hits may extend beyond the boundaries
                # of our recorded data. As the data is not defined in those regions we have to
                # chop and realign our data. See the following Example: (fragment 0, 1) [2, 2, 2,
                # 2] [2, 2, 2] with a hitfinder threshold of 1 and left/right extension of 3. In
                # the first fragment our hitlet would range from 3 to 8 in the second from 8 to
                # 11. Hence we have to subtract from every h_start and h_end the offset of 3 to
                # realign our data. Time and length of the hitlet are updated accordingly.
                is_first_record = False
                recorded_samples_offset = h_start

            h_start -= recorded_samples_offset
            h_end -= recorded_samples_offset

            h['data'][
                h_start:h_end] += r['data'][r_start:r_end] + r['baseline'] % 1
            n_recorded_samples += r_end - r_start

        # Chop time and length in case hit extends into non-recorded regions.
        h['time'] += int(recorded_samples_offset * h['dt'])
        h['length'] = n_recorded_samples

        h['data'][:] = h['data'][:] * to_pe[h['channel']]
        h['area'] = np.sum(h['data'])
Exemplo n.º 2
0
def peak_saturation_correction(
    records,
    rlinks,
    peaks,
    hitlets,
    to_pe,
    reference_length=100,
    min_reference_length=20,
    use_classification=False,
):
    """Correct the area and per pmt area of peaks from saturation
    :param records: Records
    :param rlinks: strax.record_links of corresponding records.
    :param peaks: Peaklets / Peaks
    :param hitlets: Hitlets found in records to build peaks.
        (Hitlets are hits including the left/right extension)
    :param to_pe: adc to PE conversion (length should equal number of PMTs)
    :param reference_length: Maximum number of reference sample used
    to correct saturated samples
    :param min_reference_length: Minimum number of reference sample used
    to correct saturated samples
    :param use_classification: Option of using classification to pick only S2
    """

    if not len(records):
        return
    if not len(peaks):
        return

    # Search for peaks with saturated channels
    mask = peaks['n_saturated_channels'] > 0
    if use_classification:
        mask &= peaks['type'] == 2
    peak_list = np.where(mask)[0]
    # Look up records that touch each peak
    record_ranges = _touching_windows(records['time'], strax.endtime(records),
                                      peaks[peak_list]['time'],
                                      strax.endtime(peaks[peak_list]))

    # Create temporary arrays for calculation
    dt = records[0]['dt']
    n_channels = len(peaks[0]['saturated_channel'])
    len_buffer = np.max(peaks['length'] * peaks['dt']) // dt + 1
    max_nrecord = len_buffer // len(records[0]['data']) + 1

    # Buff the sum wf [pe] of non-saturated channels
    b_sumwf = np.zeros(len_buffer, dtype=np.float32)
    # Buff the records 'data' [ADC] in saturated channels
    b_pulse = np.zeros((n_channels, len_buffer), dtype=np.int16)
    # Buff the corresponding record index of saturated channels
    b_index = np.zeros((n_channels, max_nrecord), dtype=np.int64)

    # Main
    for ix, peak_i in enumerate(peak_list):
        # reset buffers
        b_sumwf[:] = 0
        b_pulse[:] = 0
        b_index[:] = -1

        p = peaks[peak_i]
        channel_saturated = p['saturated_channel'] > 0

        for record_i in range(record_ranges[ix][0], record_ranges[ix][1]):
            r = records[record_i]
            r_slice, b_slice = strax.overlap_indices(
                r['time'] // dt, r['length'], p['time'] // dt,
                p['length'] * p['dt'] // dt)

            ch = r['channel']
            if channel_saturated[ch]:
                b_pulse[ch, slice(*b_slice)] += r['data'][slice(*r_slice)]
                b_index[ch, np.argmin(b_index[ch])] = record_i
            else:
                b_sumwf[slice(*b_slice)] += r['data'][slice(*r_slice)] \
                    * to_pe[ch]

        _peak_saturation_correction_inner(channel_saturated, records, p, to_pe,
                                          b_sumwf, b_pulse, b_index,
                                          reference_length,
                                          min_reference_length)

        # Back track sum wf downsampling
        peaks[peak_i]['length'] = p['length'] * p['dt'] / dt
        peaks[peak_i]['dt'] = dt

    strax.sum_waveform(peaks, hitlets, records, rlinks, to_pe, peak_list)
    return peak_list