コード例 #1
0
ファイル: test_url_config.py プロジェクト: jorana/straxen
class ExamplePlugin(strax.Plugin):
    depends_on = ()
    dtype = strax.time_fields
    provides = ('test_data', )
    test_config = straxen.URLConfig(default=42, )
    cached_config = straxen.URLConfig(default=666, cache=1)

    def compute(self):
        pass
コード例 #2
0
class BayesPeakClassification(strax.Plugin):
    """
    Bayes Peak classification
    Returns the ln probability of a each event belonging to the S1 and S2 class.
    Uses conditional probabilities and data parameterization learned from wfsim data.
    More info can be found here xenon:xenonnt:ahiguera:bayespeakclassification

    :param peaks: peaks
    :param waveforms: peaks waveforms in PE/ns
    :param quantiles: quantiles in ns, calculate from a cumulative sum over the waveform,
                      from zero to the total area with normalized cumulative sum to determine the time
    :returns: the ln probability of a each peak belonging to S1 and S2 class
    """

    provides = 'peak_classification_bayes'
    depends_on = ('peaks', )
    __version__ = '0.0.3'
    dtype = (strax.time_fields +
             [('ln_prob_s1', np.float32, 'S1 ln probability')] +
             [('ln_prob_s2', np.float32, 'S2 ln probability')])

    # Descriptor configs
    bayes_config_file = straxen.URLConfig(
        default='resource://cmt://'
        'bayes_model'
        '?version=ONLINE&run_id=plugin.run_id&fmt=npy',
        help=
        'Bayes model, conditional probabilities tables and Bayes discrete bins'
    )
    bayes_n_nodes = straxen.URLConfig(
        default=50,
        help='Number of attributes(features) per waveform and quantile')
    n_bayes_classes = straxen.URLConfig(
        default=2, help='Number of label classes S1(1)/S2(2)')

    def setup(self):
        self.class_prior = np.ones(self.n_bayes_classes) / self.n_bayes_classes
        self.bins = self.bayes_config_file['bins']
        self.cpt = self.bayes_config_file['cprob']

    def compute(self, peaks):
        result = np.zeros(len(peaks), dtype=self.dtype)

        waveforms, quantiles = compute_wf_and_quantiles(
            peaks, self.bayes_n_nodes)

        ln_prob_s1, ln_prob_s2 = compute_inference(
            self.bins, self.bayes_n_nodes, self.cpt, self.n_bayes_classes,
            self.class_prior, waveforms, quantiles)
        result['time'] = peaks['time']
        result['endtime'] = strax.endtime(peaks)
        result['ln_prob_s1'] = ln_prob_s1
        result['ln_prob_s2'] = ln_prob_s2
        return result
コード例 #3
0
ファイル: veto_events.py プロジェクト: XENONnT/straxen
class muVETOEvents(nVETOEvents):
    """Plugin which computes the boundaries of veto events.
    """
    depends_on = 'hitlets_mv'
    provides = 'events_mv'
    data_kind = 'events_mv'

    compressor = 'zstd'
    child_plugin = True

    __version__ = '0.0.1'
    events_seen = 0

    event_left_extension_mv = straxen.URLConfig(
        default=0,
        track=True,
        type=int,
        child_option=True,
        parent_option_name='event_left_extension_nv',
        help='Extends event window this many [ns] to the left.')
    event_resolving_time_mv = straxen.URLConfig(
        default=300,
        track=True,
        type=int,
        child_option=True,
        parent_option_name='event_resolving_time_nv',
        help='Resolving time for window coincidence [ns].')
    event_min_hits_mv = straxen.URLConfig(
        default=3,
        track=True,
        type=int,
        child_option=True,
        parent_option_name='event_min_hits_nv',
        help='Minimum number of fully confined hitlets to define an event.')

    def infer_dtype(self):
        self.name_event_number = 'event_number_mv'
        self.channel_range = self.config['channel_map']['mv']
        self.n_channel = (self.channel_range[1] - self.channel_range[0]) + 1
        return veto_event_dtype(self.name_event_number, self.n_channel)

    def get_window_size(self):
        return self.config['event_left_extension_mv'] + self.config[
            'event_resolving_time_mv'] + 1

    def compute(self, hitlets_mv, start, end):
        return super().compute(hitlets_mv, start, end)
コード例 #4
0
class EnergyEstimates(strax.Plugin):
    """
    Plugin which converts cS1 and cS2 into energies (from PE to KeVee).
    """
    __version__ = '0.1.1'
    depends_on = ['corrected_areas']
    dtype = [('e_light', np.float32, 'Energy in light signal [keVee]'),
             ('e_charge', np.float32, 'Energy in charge signal [keVee]'),
             ('e_ces', np.float32, 'Energy estimate [keVee]')
             ] + strax.time_fields
    save_when = strax.SaveWhen.TARGET

    # config options don't double cache things from the resource cache!
    g1 = straxen.URLConfig(
        default='bodega://g1?bodega_version=v2',
        help="S1 gain in PE / photons produced",
    )
    g2 = straxen.URLConfig(
        default='bodega://g2?bodega_version=v2',
        help="S2 gain in PE / electrons produced",
    )
    lxe_w = straxen.URLConfig(default=13.7e-3,
                              help="LXe work function in quanta/keV")

    def compute(self, events):
        el = self.cs1_to_e(events['cs1'])
        ec = self.cs2_to_e(events['cs2'])
        return dict(e_light=el,
                    e_charge=ec,
                    e_ces=el + ec,
                    time=events['time'],
                    endtime=strax.endtime(events))

    def cs1_to_e(self, x):
        return self.lxe_w * x / self.g1

    def cs2_to_e(self, x):
        return self.lxe_w * x / self.g2
コード例 #5
0
class PeakPositionsCNN(PeakPositionsBaseNT):
    """Convolutional Neural Network (CNN) neural net for position reconstruction"""
    provides = "peak_positions_cnn"
    algorithm = "cnn"
    __version__ = '0.0.1'

    tf_model_cnn = straxen.URLConfig(
        default=f'tf://'
        f'resource://'
        f'cmt://{algorithm}_model'
        f'?version=ONLINE'
        f'&run_id=plugin.run_id'
        f'&fmt=abs_path',
        cache=3,
    )
コード例 #6
0
class PeakPositionsMLP(PeakPositionsBaseNT):
    """Multilayer Perceptron (MLP) neural net for position reconstruction"""
    provides = "peak_positions_mlp"
    algorithm = "mlp"

    tf_model_mlp = straxen.URLConfig(
        default=f'tf://'
        f'resource://'
        f'cmt://{algorithm}_model'
        f'?version=ONLINE'
        f'&run_id=plugin.run_id'
        f'&fmt=abs_path',
        help='MLP model. Should be opened using the "tf" descriptor. '
        'Set to "None" to skip computation',
        cache=3,
    )
コード例 #7
0
class PeakPositionsGCN(PeakPositionsBaseNT):
    """Graph Convolutional Network (GCN) neural net for position reconstruction"""
    provides = "peak_positions_gcn"
    algorithm = "gcn"
    __version__ = '0.0.1'

    tf_model_gcn = straxen.URLConfig(
        default=f'tf://'
        f'resource://'
        f'cmt://{algorithm}_model'
        f'?version=ONLINE'
        f'&run_id=plugin.run_id'
        f'&fmt=abs_path',
        help='GCN model. Should be opened using the "tf" descriptor. '
        'Set to "None" to skip computation',
        cache=3,
    )
コード例 #8
0
class PeakPositionsNT(strax.MergeOnlyPlugin):
    """
    Merge the reconstructed algorithms of the different algorithms
    into a single one that can be used in Event Basics.

    Select one of the plugins to provide the 'x' and 'y' to be used
    further down the chain. Since we already have the information
    needed here, there is no need to wait until events to make the
    decision.

    Since the computation is trivial as it only combined the three
    input plugins, don't save this plugins output.
    """
    provides = "peak_positions"
    depends_on = ("peak_positions_cnn", "peak_positions_mlp",
                  "peak_positions_gcn")
    save_when = strax.SaveWhen.NEVER
    __version__ = '0.0.0'

    default_reconstruction_algorithm = straxen.URLConfig(
        default=DEFAULT_POSREC_ALGO,
        help="default reconstruction algorithm that provides (x,y)")

    def infer_dtype(self):
        dtype = strax.merged_dtype(
            [self.deps[d].dtype_for(d) for d in self.depends_on])
        dtype += [
            ('x', np.float32, 'Reconstructed S2 X position (cm), uncorrected'),
            ('y', np.float32, 'Reconstructed S2 Y position (cm), uncorrected')
        ]
        return dtype

    def compute(self, peaks):
        result = {dtype: peaks[dtype] for dtype in peaks.dtype.names}
        algorithm = self.config['default_reconstruction_algorithm']
        for xy in ('x', 'y'):
            result[xy] = peaks[f'{xy}_{algorithm}']
        return result
コード例 #9
0
ファイル: acqmon_processing.py プロジェクト: XENONnT/straxen
class VetoIntervals(strax.OverlapWindowPlugin):
    """ Find pairs of veto start and veto stop signals and the veto
    duration between them:
     - busy_*  <= V1495 busy veto for tpc channels
     - busy_he_*    <= V1495 busy veto for high energy tpc channels
     - hev_*   <= DDC10 hardware high energy veto
     - straxen_deadtime <= special case of deadtime introduced by the
       DAQReader-plugin
    """
    __version__ = '1.1.0'
    depends_on = 'aqmon_hits'
    provides = 'veto_intervals'
    data_kind = 'veto_intervals'

    # This option is just showing where the OverlapWindowPlugin fails.
    # We need to buffer the entire run in order not to run into chunking
    # issues. A better solution would be using
    #   github.com/AxFoundation/strax/pull/654
    max_veto_window = straxen.URLConfig(
        default=int(7.2e12),
        track=True,
        type=int,
        help='Maximum separation between veto stop and start pulses [ns]. '
        'Set to be >> than the max duration of the run to be able to '
        'fully store one run into buffer since aqmon-hits are not '
        'sorted by endtime')

    def infer_dtype(self):
        dtype = [(('veto interval [ns]', 'veto_interval'), np.int64),
                 (('veto signal type', 'veto_type'), np.str_('U20'))]
        dtype += strax.time_fields
        return dtype

    def setup(self):
        self.veto_names = ['busy_', 'busy_he_', 'hev_']
        self.channel_map = {
            aq_ch.name.lower(): int(aq_ch)
            for aq_ch in AqmonChannels
        }

    def get_window_size(self):
        # Give a very wide window
        return self.max_veto_window

    def compute(self, aqmon_hits, start, end):
        # Allocate a nice big buffer and throw away the part we don't need later
        result = np.zeros(len(aqmon_hits) * len(self.veto_names), self.dtype)
        vetos_seen = 0

        for veto_name in self.veto_names:
            veto_hits_start = channel_select(
                aqmon_hits, self.channel_map[veto_name + 'start'])
            veto_hits_stop = channel_select(
                aqmon_hits, self.channel_map[veto_name + 'stop'])

            veto_hits_start, veto_hits_stop = self.handle_starts_and_stops_outside_of_run(
                veto_hits_start=veto_hits_start,
                veto_hits_stop=veto_hits_stop,
                chunk_start=start,
                chunk_end=end,
                veto_name=veto_name,
            )
            n_vetos = len(veto_hits_start)

            result["time"][vetos_seen:vetos_seen +
                           n_vetos] = veto_hits_start['time']
            result["endtime"][vetos_seen:vetos_seen +
                              n_vetos] = veto_hits_stop['time']
            result["veto_type"][vetos_seen:vetos_seen +
                                n_vetos] = veto_name + 'veto'

            vetos_seen += n_vetos

        # Straxen deadtime is special, it's a start and stop with no data
        # but already an interval so easily used here
        artificial_deadtime = aqmon_hits[(
            aqmon_hits['channel'] == AqmonChannels.ARTIFICIAL_DEADTIME)]
        n_artificial = len(artificial_deadtime)

        if n_artificial:
            result[vetos_seen:n_artificial]['time'] = artificial_deadtime[
                'time']
            result[vetos_seen:n_artificial]['endtime'] = strax.endtime(
                artificial_deadtime)
            result[vetos_seen:n_artificial][
                'veto_type'] = 'straxen_deadtime_veto'
            vetos_seen += n_artificial

        result = result[:vetos_seen]
        result['veto_interval'] = result['endtime'] - result['time']
        sort = np.argsort(result['time'])
        result = result[sort]
        return result

    def handle_starts_and_stops_outside_of_run(
        self,
        veto_hits_start: np.ndarray,
        veto_hits_stop: np.ndarray,
        chunk_start: int,
        chunk_end: int,
        veto_name: str,
    ) -> typing.Tuple[np.ndarray, np.ndarray]:
        """
        We might be missing one start or one stop at the end of the run,
        set it to the chunk endtime if this is the case
        """
        # Just for traceback info that we declare this here
        extra_start = []
        extra_stop = []
        missing_a_final_stop = (
            len(veto_hits_start) and len(veto_hits_stop)
            and veto_hits_start[-1]['time'] > veto_hits_stop['time'][-1])
        missing_a_final_stop = (missing_a_final_stop
                                or (len(veto_hits_start)
                                    and not len(veto_hits_stop)))
        if missing_a_final_stop:
            # There is one *start* of the //end// of the run -> the
            # **stop** is missing (because it's outside of the run),
            # let's add one **stop** at the //end// of this chunk
            extra_stop = self.fake_hit(chunk_end)
            veto_hits_stop = np.concatenate([veto_hits_stop, extra_stop])
        if len(veto_hits_stop) - len(veto_hits_start) == 1:
            # There is one *stop* of the //beginning// of the run
            # -> the **start** is missing (because it's from before
            # starting the run), # let's add one **start** at the
            # //beginning// of this chunk
            extra_start = self.fake_hit(chunk_start)
            veto_hits_start = np.concatenate([extra_start, veto_hits_start])

        something_is_wrong = len(veto_hits_start) != len(veto_hits_stop)

        message = (f'Got inconsistent number of {veto_name} starts '
                   f'{len(veto_hits_start)}) / stops ({len(veto_hits_stop)}).')
        if len(extra_start):
            message += ' Despite the fact that we inserted one extra start at the beginning of the run.'  # noqa
        elif len(extra_stop):
            message += ' Despite the fact that we inserted one extra stop at the end of the run.'  # noqa
        if something_is_wrong:
            raise ValueError(message)

        if np.any(veto_hits_start['time'] > veto_hits_stop['time']):
            raise ValueError(
                'Found veto\'s starting before the previous stopped')

        return veto_hits_start, veto_hits_stop

    @staticmethod
    def fake_hit(start, dt=1, length=1):
        hit = np.zeros(1, strax.hit_dtype)
        hit['time'] = start
        hit['dt'] = dt
        hit['length'] = length
        return hit
コード例 #10
0
class CorrectedAreas(strax.Plugin):
    """
    Plugin which applies light collection efficiency maps and electron
    life time to the data.

    Computes the cS1/cS2 for the main/alternative S1/S2 as well as the
    corrected life time.

    Note:
        Please be aware that for both, the main and alternative S1, the
        area is corrected according to the xy-position of the main S2.

        There are now 3 components of cS2s: cs2_top, cS2_bottom and cs2.
        cs2_top and cs2_bottom are corrected by the corresponding maps,
        and cs2 is the sum of the two.
    """
    __version__ = '0.3.0'

    depends_on = ['event_basics', 'event_positions']

    # Descriptor configs
    elife = straxen.URLConfig(
        default='cmt://elife?version=ONLINE&run_id=plugin.run_id',
        help='electron lifetime in [ns]')

    # default posrec, used to determine which LCE map to use
    default_reconstruction_algorithm = straxen.URLConfig(
        default=DEFAULT_POSREC_ALGO,
        help="default reconstruction algorithm that provides (x,y)")
    s1_xyz_map = straxen.URLConfig(
        default='itp_map://resource://cmt://format://'
        's1_xyz_map_{algo}?version=ONLINE&run_id=plugin.run_id'
        '&fmt=json&algo=plugin.default_reconstruction_algorithm',
        cache=True)
    s2_xy_map = straxen.URLConfig(
        default='itp_map://resource://cmt://format://'
        's2_xy_map_{algo}?version=ONLINE&run_id=plugin.run_id'
        '&fmt=json&algo=plugin.default_reconstruction_algorithm',
        cache=True)

    # average SE gain for a given time period. default to the value of this run in ONLINE model
    # thus, by default, there will be no time-dependent correction according to se gain
    avg_se_gain = straxen.URLConfig(
        default='cmt://avg_se_gain?version=ONLINE&run_id=plugin.run_id',
        help='Nominal single electron (SE) gain in PE / electron extracted. '
        'Data will be corrected to this value')

    # se gain for this run, allowing for using CMT. default to online
    se_gain = straxen.URLConfig(
        default='cmt://se_gain?version=ONLINE&run_id=plugin.run_id',
        help='Actual SE gain for a given run (allows for time dependence)')

    # relative extraction efficiency which can change with time and modeled by CMT.
    rel_extraction_eff = straxen.URLConfig(
        default='cmt://rel_extraction_eff?version=ONLINE&run_id=plugin.run_id',
        help=
        'Relative extraction efficiency for this run (allows for time dependence)'
    )

    # relative light yield
    # defaults to no correction
    rel_light_yield = straxen.URLConfig(
        default=
        'cmt://relative_light_yield?version=ONLINE&run_id=plugin.run_id',
        help='Relative light yield (allows for time dependence)')

    region_linear = straxen.URLConfig(
        default=28,
        help=
        'linear cut (cm) for ab region, check out the note https://xe1t-wiki.lngs.infn.it/doku.php?id=jlong:sr0_2_region_se_correction'
    )

    region_circular = straxen.URLConfig(
        default=60,
        help=
        'circular cut (cm) for ab region, check out the note https://xe1t-wiki.lngs.infn.it/doku.php?id=jlong:sr0_2_region_se_correction'
    )

    def infer_dtype(self):
        dtype = []
        dtype += strax.time_fields

        for peak_type, peak_name in zip(['', 'alt_'], ['main', 'alternate']):
            dtype += [
                (f'{peak_type}cs1', np.float32,
                 f'Corrected area of {peak_name} S1 [PE]'),
                (f'{peak_type}cs1_wo_timecorr', np.float32,
                 f'Corrected area of {peak_name} S1 [PE] before time-dep LY correction'
                 ),
                (f'{peak_type}cs2_wo_elifecorr', np.float32,
                 f'Corrected area of {peak_name} S2 before elife correction '
                 f'(s2 xy correction + SEG/EE correction applied) [PE]'),
                (f'{peak_type}cs2_wo_timecorr', np.float32,
                 f'Corrected area of {peak_name} S2 before SEG/EE and elife corrections'
                 f'(s2 xy correction applied) [PE]'),
                (f'{peak_type}cs2_area_fraction_top', np.float32,
                 f'Fraction of area seen by the top PMT array for corrected {peak_name} S2'
                 ),
                (f'{peak_type}cs2_bottom', np.float32,
                 f'Corrected area of {peak_name} S2 in the bottom PMT array [PE]'
                 ),
                (f'{peak_type}cs2', np.float32,
                 f'Corrected area of {peak_name} S2 [PE]'),
            ]
        return dtype

    def ab_region(self, x, y):
        new_x, new_y = rotate_perp_wires(x, y)
        cond = new_x < self.region_linear
        cond &= new_x > -self.region_linear
        cond &= new_x**2 + new_y**2 < self.region_circular**2
        return cond

    def cd_region(self, x, y):
        return ~self.ab_region(x, y)

    def compute(self, events):
        result = np.zeros(len(events), self.dtype)
        result['time'] = events['time']
        result['endtime'] = events['endtime']

        # S1 corrections depend on the actual corrected event position.
        # We use this also for the alternate S1; for e.g. Kr this is
        # fine as the S1 correction varies slowly.
        event_positions = np.vstack([events['x'], events['y'], events['z']]).T

        for peak_type in ["", "alt_"]:
            result[f"{peak_type}cs1_wo_timecorr"] = events[
                f'{peak_type}s1_area'] / self.s1_xyz_map(event_positions)
            result[f"{peak_type}cs1"] = result[
                f"{peak_type}cs1_wo_timecorr"] / self.rel_light_yield

        # s2 corrections
        # S2 top and bottom are corrected separately, and cS2 total is the sum of the two
        # figure out the map name
        if len(self.s2_xy_map.map_names) > 1:
            s2_top_map_name = "map_top"
            s2_bottom_map_name = "map_bottom"
        else:
            s2_top_map_name = "map"
            s2_bottom_map_name = "map"

        regions = {'ab': self.ab_region, 'cd': self.cd_region}

        # setup SEG and EE corrections
        # if they are dicts, we just leave them as is
        # if they are not, we assume they are floats and
        # create a dict with the same correction in each region
        if isinstance(self.se_gain, dict):
            seg = self.se_gain
        else:
            seg = {key: self.se_gain for key in regions}

        if isinstance(self.avg_se_gain, dict):
            avg_seg = self.avg_se_gain
        else:
            avg_seg = {key: self.avg_se_gain for key in regions}

        if isinstance(self.rel_extraction_eff, dict):
            ee = self.rel_extraction_eff
        else:
            ee = {key: self.rel_extraction_eff for key in regions}

        # now can start doing corrections
        for peak_type in ["", "alt_"]:
            # S2(x,y) corrections use the observed S2 positions
            s2_positions = np.vstack(
                [events[f'{peak_type}s2_x'], events[f'{peak_type}s2_y']]).T

            # corrected s2 with s2 xy map only, i.e. no elife correction
            # this is for s2-only events which don't have drift time info

            cs2_top_xycorr = (
                events[f'{peak_type}s2_area'] *
                events[f'{peak_type}s2_area_fraction_top'] /
                self.s2_xy_map(s2_positions, map_name=s2_top_map_name))
            cs2_bottom_xycorr = (
                events[f'{peak_type}s2_area'] *
                (1 - events[f'{peak_type}s2_area_fraction_top']) /
                self.s2_xy_map(s2_positions, map_name=s2_bottom_map_name))

            # For electron lifetime corrections to the S2s,
            # use drift time computed using the main S1.
            el_string = peak_type + "s2_interaction_" if peak_type == "alt_" else peak_type
            elife_correction = np.exp(events[f'{el_string}drift_time'] /
                                      self.elife)
            result[f"{peak_type}cs2_wo_timecorr"] = (
                cs2_top_xycorr + cs2_bottom_xycorr) * elife_correction

            for partition, func in regions.items():
                # partitioned SE and EE
                partition_mask = func(events[f'{peak_type}s2_x'],
                                      events[f'{peak_type}s2_y'])

                # Correct for SEgain and extraction efficiency
                seg_ee_corr = seg[partition] / avg_seg[partition] * ee[
                    partition]

                # note that these are already masked!
                cs2_top_wo_elifecorr = cs2_top_xycorr[
                    partition_mask] / seg_ee_corr
                cs2_bottom_wo_elifecorr = cs2_bottom_xycorr[
                    partition_mask] / seg_ee_corr

                result[f"{peak_type}cs2_wo_elifecorr"][
                    partition_mask] = cs2_top_wo_elifecorr + cs2_bottom_wo_elifecorr

                # cs2aft doesn't need elife/time corrections as they cancel
                result[f"{peak_type}cs2_area_fraction_top"][
                    partition_mask] = cs2_top_wo_elifecorr / (
                        cs2_top_wo_elifecorr + cs2_bottom_wo_elifecorr)

                result[f"{peak_type}cs2"][
                    partition_mask] = result[f"{peak_type}cs2_wo_elifecorr"][
                        partition_mask] * elife_correction[partition_mask]
                result[f"{peak_type}cs2_bottom"][
                    partition_mask] = cs2_bottom_wo_elifecorr * elife_correction[
                        partition_mask]

        return result
コード例 #11
0
class Events(strax.OverlapWindowPlugin):
    """
    Plugin which defines an "event" in our TPC.

    An event is defined by peak(s) in fixed range of time around a peak
    which satisfies certain conditions:
        1. The triggering peak must have a certain area.
        2. The triggering peak must have less than
           "trigger_max_competing" peaks. (A competing peak must have a
           certain area fraction of the triggering peak and must be in a
           window close to the main peak)

    Note:
        The time range which defines an event gets chopped at the chunk
        boundaries. This happens at invalid boundaries of the
    """
    depends_on = ['peak_basics', 'peak_proximity']
    provides = 'events'
    data_kind = 'events'
    __version__ = '0.1.1'
    save_when = strax.SaveWhen.NEVER

    electron_drift_velocity = straxen.URLConfig(
        default='cmt://'
        'electron_drift_velocity'
        '?version=ONLINE&run_id=plugin.run_id',
        cache=True,
        help='Vertical electron drift velocity in cm/ns (1e4 m/ms)')

    dtype = [('event_number', np.int64, 'Event number in this dataset'),
             ('time', np.int64, 'Event start time in ns since the unix epoch'),
             ('endtime', np.int64, 'Event end time in ns since the unix epoch')
             ]

    events_seen = 0

    def setup(self):
        if self.config['s1_min_coincidence'] > self.config[
                'event_s1_min_coincidence']:
            raise ValueError(
                'Peak s1 coincidence requirement should be smaller '
                'or equal to event_s1_min_coincidence')
        self.drift_time_max = int(self.config['max_drift_length'] /
                                  self.electron_drift_velocity)
        # Left_extension and right_extension should be computed in setup to be
        # reflected in cutax too.
        self.left_extension = self.config[
            'left_event_extension'] + self.drift_time_max
        self.right_extension = self.config['right_event_extension']

    def get_window_size(self):
        # Take a large window for safety, events can have long tails
        return 10 * (self.config['left_event_extension'] + self.drift_time_max
                     + self.config['right_event_extension'])

    def compute(self, peaks, start, end):
        _is_triggering = peaks['area'] > self.config['trigger_min_area']
        _is_triggering &= (peaks['n_competing'] <=
                           self.config['trigger_max_competing'])
        if self.config['exclude_s1_as_triggering_peaks']:
            _is_triggering &= peaks['type'] == 2
        else:
            is_not_s1 = peaks['type'] != 1
            has_tc_large_enough = (peaks['tight_coincidence'] >=
                                   self.config['event_s1_min_coincidence'])
            _is_triggering &= (is_not_s1 | has_tc_large_enough)

        triggers = peaks[_is_triggering]

        # Join nearby triggers
        t0, t1 = strax.find_peak_groups(triggers,
                                        gap_threshold=self.left_extension +
                                        self.right_extension + 1,
                                        left_extension=self.left_extension,
                                        right_extension=self.right_extension)

        # Don't extend beyond the chunk boundaries
        # This will often happen for events near the invalid boundary of the
        # overlap processing (which should be thrown away)
        t0 = np.clip(t0, start, end)
        t1 = np.clip(t1, start, end)

        result = np.zeros(len(t0), self.dtype)
        result['time'] = t0
        result['endtime'] = t1
        result['event_number'] = np.arange(len(result)) + self.events_seen

        if not result.size > 0:
            print("Found chunk without events?!")

        self.events_seen += len(result)

        return result
コード例 #12
0
class EventPositions(strax.Plugin):
    """
    Computes the observed and corrected position for the main S1/S2
    pairs in an event. For XENONnT data, it returns the FDC corrected
    positions of the default_reconstruction_algorithm. In case the fdc_map
    is given as a file (not through CMT), then the coordinate system
    should be given as (x, y, z), not (x, y, drift_time).
    """

    depends_on = ('event_basics', )

    __version__ = '0.1.5'

    default_reconstruction_algorithm = straxen.URLConfig(
        default=DEFAULT_POSREC_ALGO,
        help="default reconstruction algorithm that provides (x,y)")

    electron_drift_velocity = straxen.URLConfig(
        default='cmt://'
        'electron_drift_velocity'
        '?version=ONLINE&run_id=plugin.run_id',
        cache=True,
        help='Vertical electron drift velocity in cm/ns (1e4 m/ms)')

    electron_drift_time_gate = straxen.URLConfig(
        default='cmt://'
        'electron_drift_time_gate'
        '?version=ONLINE&run_id=plugin.run_id',
        help='Electron drift time from the gate in ns',
        cache=True)

    def infer_dtype(self):
        dtype = []
        for j in 'x y r'.split():
            comment = f'Main interaction {j}-position, field-distortion corrected (cm)'
            dtype += [(j, np.float32, comment)]
            for s_i in [1, 2]:
                comment = f'Alternative S{s_i} interaction (rel. main S{int(2*(1.5-s_i)+s_i)}) {j}-position, field-distortion corrected (cm)'
                field = f'alt_s{s_i}_{j}_fdc'
                dtype += [(field, np.float32, comment)]

        for j in ['z']:
            comment = 'Interaction z-position, using mean drift velocity only (cm)'
            dtype += [(j, np.float32, comment)]
            for s_i in [1, 2]:
                comment = f'Alternative S{s_i} z-position (rel. main S{int(2*(1.5-s_i)+s_i)}), using mean drift velocity only (cm)'
                field = f'alt_s{s_i}_z'
                dtype += [(field, np.float32, comment)]

        naive_pos = []
        fdc_pos = []
        for j in 'r z'.split():
            naive_pos += [
                (f'{j}_naive', np.float32,
                 f'Main interaction {j}-position with observed position (cm)')
            ]
            fdc_pos += [
                (f'{j}_field_distortion_correction', np.float32,
                 f'Correction added to {j}_naive for field distortion (cm)')
            ]
            for s_i in [1, 2]:
                naive_pos += [(
                    f'alt_s{s_i}_{j}_naive', np.float32,
                    f'Alternative S{s_i} interaction (rel. main S{int(2*(1.5-s_i)+s_i)}) {j}-position with observed position (cm)'
                )]
                fdc_pos += [(
                    f'alt_s{s_i}_{j}_field_distortion_correction', np.float32,
                    f'Correction added to alt_s{s_i}_{j}_naive for field distortion (cm)'
                )]
        dtype += naive_pos + fdc_pos
        for s_i in [1, 2]:
            dtype += [(
                f'alt_s{s_i}_theta', np.float32,
                f'Alternative S{s_i} (rel. main S{int(2*(1.5-s_i)+s_i)}) interaction angular position (radians)'
            )]

        dtype += [('theta', np.float32,
                   f'Main interaction angular position (radians)')]
        return dtype + strax.time_fields

    def setup(self):
        if isinstance(self.config['fdc_map'], str):
            self.map = InterpolatingMap(
                get_resource(self.config['fdc_map'], fmt='binary'))

        elif is_cmt_option(self.config['fdc_map']):
            self.map = InterpolatingMap(
                get_cmt_resource(
                    self.run_id,
                    tuple([
                        'suffix',
                        self.config['default_reconstruction_algorithm'],
                        *self.config['fdc_map']
                    ]),
                    fmt='binary'))
            self.map.scale_coordinates([1., 1., -self.electron_drift_velocity])

        else:
            raise NotImplementedError('FDC map format not understood.')

    def compute(self, events):

        result = {'time': events['time'], 'endtime': strax.endtime(events)}

        # s_i == 0 indicates the main event, while s_i != 0 means alternative S1 or S2 is used based on s_i value
        # while the other peak is the main one (e.g., s_i == 1 means that the event is defined using altS1 and main S2)
        for s_i in [0, 1, 2]:

            # alt_sx_interaction_drift_time is calculated between main Sy and alternative Sx
            drift_time = events['drift_time'] if not s_i else events[
                f'alt_s{s_i}_interaction_drift_time']

            z_obs = -self.electron_drift_velocity * drift_time
            xy_pos = 's2_' if s_i != 2 else 'alt_s2_'
            orig_pos = np.vstack(
                [events[f'{xy_pos}x'], events[f'{xy_pos}y'], z_obs]).T
            r_obs = np.linalg.norm(orig_pos[:, :2], axis=1)
            delta_r = self.map(orig_pos)
            z_obs = z_obs + self.electron_drift_velocity * self.electron_drift_time_gate

            # apply radial correction
            with np.errstate(invalid='ignore', divide='ignore'):
                r_cor = r_obs + delta_r
                scale = np.divide(r_cor,
                                  r_obs,
                                  out=np.zeros_like(r_cor),
                                  where=r_obs != 0)

            # z correction due to longer drift time for distortion
            # calculated based on the Pythagorean theorem where
            # the electron track is assumed to be a straight line
            # (geometrical reasoning not valid if |delta_r| > |z_obs|,
            #  as cathetus cannot be longer than hypothenuse)
            with np.errstate(invalid='ignore'):
                z_cor = -(z_obs**2 - delta_r**2)**0.5
                invalid = np.abs(z_obs) < np.abs(delta_r)
                # do not apply z correction above gate
                invalid |= z_obs >= 0
            z_cor[invalid] = z_obs[invalid]
            delta_z = z_cor - z_obs

            pre_field = '' if s_i == 0 else f'alt_s{s_i}_'
            post_field = '' if s_i == 0 else '_fdc'
            result.update({
                f'{pre_field}x{post_field}':
                orig_pos[:, 0] * scale,
                f'{pre_field}y{post_field}':
                orig_pos[:, 1] * scale,
                f'{pre_field}r{post_field}':
                r_cor,
                f'{pre_field}r_naive':
                r_obs,
                f'{pre_field}r_field_distortion_correction':
                delta_r,
                f'{pre_field}theta':
                np.arctan2(orig_pos[:, 1], orig_pos[:, 0]),
                f'{pre_field}z_naive':
                z_obs,
                # using z_obs in agreement with the dtype description
                # the FDC for z (z_cor) is found to be not reliable (see #527)
                f'{pre_field}z':
                z_obs,
                f'{pre_field}z_field_distortion_correction':
                delta_z
            })

        return result
コード例 #13
0
class EventBasics(strax.Plugin):
    """
    Computes the basic properties of the main/alternative S1/S2 within
    an event.

    The main S1 and alternative S1 are given by the largest two S1-Peaks
    within the event.
    The main S2 is given by the largest S2-Peak within the event, while
    alternative S2 is selected as the largest S2 other than main S2
    in the time window [main S1 time, main S1 time + max drift time].
    """
    __version__ = '1.3.1'

    depends_on = ('events', 'peak_basics', 'peak_positions', 'peak_proximity')
    provides = 'event_basics'
    data_kind = 'events'
    loop_over = 'events'

    electron_drift_velocity = straxen.URLConfig(
        default='cmt://'
        'electron_drift_velocity'
        '?version=ONLINE&run_id=plugin.run_id',
        cache=True,
        help='Vertical electron drift velocity in cm/ns (1e4 m/ms)')

    def infer_dtype(self):
        # Basic event properties
        self._set_posrec_save()
        self._set_dtype_requirements()
        dtype = []
        dtype += strax.time_fields
        dtype += [
            ('n_peaks', np.int32, 'Number of peaks in the event'),
            ('drift_time', np.float32,
             'Drift time between main S1 and S2 in ns'),
            ('event_number', np.int64, 'Event number in this dataset'),
        ]

        dtype += self._get_si_dtypes(self.peak_properties)

        dtype += [(f's2_x', np.float32,
                   f'Main S2 reconstructed X position, uncorrected [cm]'),
                  (f's2_y', np.float32,
                   f'Main S2 reconstructed Y position, uncorrected [cm]'),
                  (f'alt_s2_x', np.float32,
                   f'Alternate S2 reconstructed X position, uncorrected [cm]'),
                  (f'alt_s2_y', np.float32,
                   f'Alternate S2 reconstructed Y position, uncorrected [cm]'),
                  (f'area_before_main_s2', np.float32,
                   f'Sum of areas before Main S2 [PE]'),
                  (f'large_s2_before_main_s2', np.float32,
                   f'The largest S2 before the Main S2 [PE]')]

        dtype += self._get_posrec_dtypes()
        return dtype

    def _set_dtype_requirements(self):
        """Needs to be run before inferring dtype as it is needed there"""
        # Properties to store for each peak (main and alternate S1 and S2)
        self.peak_properties = (
            ('time', np.int64, 'start time since unix epoch [ns]'),
            ('center_time', np.int64,
             'weighted center time since unix epoch [ns]'),
            ('endtime', np.int64, 'end time since unix epoch [ns]'),
            ('area', np.float32, 'area, uncorrected [PE]'),
            ('n_channels', np.int16, 'count of contributing PMTs'),
            ('n_hits', np.int16,
             'count of hits contributing at least one sample to the peak'),
            ('n_competing', np.int32, 'number of competing peaks'),
            ('max_pmt', np.int16, 'PMT number which contributes the most PE'),
            ('max_pmt_area', np.float32,
             'area in the largest-contributing PMT (PE)'),
            ('range_50p_area', np.float32, 'width, 50% area [ns]'),
            ('range_90p_area', np.float32, 'width, 90% area [ns]'),
            ('rise_time', np.float32,
             'time between 10% and 50% area quantiles [ns]'),
            ('area_fraction_top', np.float32,
             'fraction of area seen by the top PMT array'),
            ('tight_coincidence', np.int16,
             'Channel within tight range of mean'),
            ('n_saturated_channels', np.int16,
             'Total number of saturated channels'),
        )

    def setup(self):
        self.drift_time_max = int(self.config['max_drift_length'] /
                                  self.electron_drift_velocity)

    @staticmethod
    def _get_si_dtypes(peak_properties):
        """Get properties for S1/S2 from peaks directly"""
        si_dtype = []
        for s_i in [1, 2]:
            # Peak indices
            si_dtype += [(f's{s_i}_index', np.int32,
                          f'Main S{s_i} peak index in event'),
                         (f'alt_s{s_i}_index', np.int32,
                          f'Alternate S{s_i} peak index in event')]

            # Peak properties
            for name, dt, comment in peak_properties:
                si_dtype += [(f's{s_i}_{name}', dt, f'Main S{s_i} {comment}'),
                             (f'alt_s{s_i}_{name}', dt,
                              f'Alternate S{s_i} {comment}')]

            # Drifts and delays
            si_dtype += [(f'alt_s{s_i}_interaction_drift_time', np.float32,
                          f'Drift time using alternate S{s_i} [ns]'),
                         (f'alt_s{s_i}_delay', np.int32,
                          f'Time between main and alternate S{s_i} [ns]')]
        return si_dtype

    def _set_posrec_save(self):
        """
        parse x_mlp et cetera if needed to get the algorithms used and
        set required class attributes
        """
        posrec_fields = self.deps['peak_positions'].dtype_for(
            'peak_positions').names
        posrec_names = [d.split('_')[-1] for d in posrec_fields if 'x_' in d]

        # Preserve order. "set" is not ordered and dtypes should always be ordered
        self.pos_rec_labels = list(set(posrec_names))
        self.pos_rec_labels.sort()

        self.posrec_save = [(xy + algo) for xy in ['x_', 'y_']
                            for algo in self.pos_rec_labels]

    def _get_posrec_dtypes(self):
        """Get S2 positions for each of the position reconstruction algorithms"""
        posrec_dtpye = []

        for algo in self.pos_rec_labels:
            # S2 positions
            posrec_dtpye += [
                (f's2_x_{algo}', np.float32,
                 f'Main S2 {algo}-reconstructed X position, uncorrected [cm]'),
                (f's2_y_{algo}', np.float32,
                 f'Main S2 {algo}-reconstructed Y position, uncorrected [cm]'),
                (f'alt_s2_x_{algo}', np.float32,
                 f'Alternate S2 {algo}-reconstructed X position, uncorrected [cm]'
                 ),
                (f'alt_s2_y_{algo}', np.float32,
                 f'Alternate S2 {algo}-reconstructed Y position, uncorrected [cm]'
                 )
            ]

        return posrec_dtpye

    @staticmethod
    def set_nan_defaults(buffer):
        """
        When constructing the dtype, take extra care to set values to
        np.Nan / -1 (for ints) as 0 might have a meaning
        """
        for field in buffer.dtype.names:
            if np.issubdtype(buffer.dtype[field], np.integer):
                buffer[field][:] = -1
            else:
                buffer[field][:] = np.nan

    def compute(self, events, peaks):
        result = np.zeros(len(events), dtype=self.dtype)
        self.set_nan_defaults(result)

        split_peaks = strax.split_by_containment(peaks, events)

        result['time'] = events['time']
        result['endtime'] = events['endtime']
        result['event_number'] = events['event_number']

        self.fill_events(result, events, split_peaks)
        return result

    # If copy_largest_peaks_into_event is ever numbafied, also numbafy this function
    def fill_events(self, result_buffer, events, split_peaks):
        """Loop over the events and peaks within that event"""
        for event_i, _ in enumerate(events):
            peaks_in_event_i = split_peaks[event_i]
            n_peaks = len(peaks_in_event_i)
            result_buffer[event_i]['n_peaks'] = n_peaks

            if not n_peaks:
                raise ValueError(f'No peaks within event?\n{events[event_i]}')

            self.fill_result_i(result_buffer[event_i], peaks_in_event_i)

    def fill_result_i(self, event, peaks):
        """For a single event with the result_buffer"""
        # Consider S2s first, then S1s (to enable allow_posts2_s1s = False)
        # number_of_peaks=0 selects all available s2 and sort by area
        largest_s2s, s2_idx = self.get_largest_sx_peaks(peaks,
                                                        s_i=2,
                                                        number_of_peaks=0)

        if not self.config['allow_posts2_s1s'] and len(largest_s2s):
            s1_latest_time = largest_s2s[0]['time']
        else:
            s1_latest_time = np.inf

        largest_s1s, s1_idx = self.get_largest_sx_peaks(
            peaks,
            s_i=1,
            s1_before_time=s1_latest_time,
            s1_min_coincidence=self.config['event_s1_min_coincidence'])

        if self.config['force_alt_s2_in_max_drift_time']:
            s2_idx, largest_s2s = self.find_main_alt_s2(
                largest_s1s,
                s2_idx,
                largest_s2s,
                self.drift_time_max,
            )
        else:
            # Select only the largest two S2s
            largest_s2s, s2_idx = largest_s2s[0:2], s2_idx[0:2]

        if self.config['force_main_before_alt']:
            s2_order = np.argsort(largest_s2s['time'])
            largest_s2s = largest_s2s[s2_order]
            s2_idx = s2_idx[s2_order]

        self.set_sx_index(event, s1_idx, s2_idx)
        self.set_event_properties(event, largest_s1s, largest_s2s, peaks)

        # Loop over S1s and S2s and over main / alt.
        for s_i, largest_s_i in enumerate([largest_s1s, largest_s2s], 1):
            # Largest index 0 -> main sx, 1 -> alt sx
            for largest_index, main_or_alt in enumerate(['s', 'alt_s']):
                peak_properties_to_save = [
                    name for name, _, _ in self.peak_properties
                ]
                if s_i == 2:
                    peak_properties_to_save += ['x', 'y']
                    peak_properties_to_save += self.posrec_save
                field_names = [
                    f'{main_or_alt}{s_i}_{name}'
                    for name in peak_properties_to_save
                ]
                self.copy_largest_peaks_into_event(event, largest_s_i,
                                                   largest_index, field_names,
                                                   peak_properties_to_save)

    @staticmethod
    @numba.njit
    def find_main_alt_s2(largest_s1s, s2_idx, largest_s2s, drift_time_max):
        """Require alt_s2 happens between main S1 and maximum drift time"""
        if len(largest_s1s) > 0 and len(largest_s2s) > 1:
            # If there is a valid s1-s2 pair and has a second s2, then check alt s2 validity
            s2_after_s1 = largest_s2s['center_time'] > largest_s1s[0][
                'center_time']
            s2_before_max_drift_time = (
                largest_s2s['center_time'] -
                largest_s1s[0]['center_time']) < 1.01 * drift_time_max
            mask = s2_after_s1 & s2_before_max_drift_time
            # The selection avoids main_S2
            mask[0] = True
            # Take main and the largest valid alt_S2
            s2_idx, largest_s2s = s2_idx[mask][:2], largest_s2s[mask][:2]
        return s2_idx, largest_s2s

    @staticmethod
    @numba.njit
    def set_event_properties(result, largest_s1s, largest_s2s, peaks):
        """Get properties like drift time and area before main S2"""
        # Compute drift times only if we have a valid S1-S2 pair
        if len(largest_s1s) > 0 and len(largest_s2s) > 0:
            result['drift_time'] = largest_s2s[0]['center_time'] - largest_s1s[
                0]['center_time']
            if len(largest_s1s) > 1:
                result['alt_s1_interaction_drift_time'] = largest_s2s[0][
                    'center_time'] - largest_s1s[1]['center_time']
                result['alt_s1_delay'] = largest_s1s[1][
                    'center_time'] - largest_s1s[0]['center_time']
            if len(largest_s2s) > 1:
                result['alt_s2_interaction_drift_time'] = largest_s2s[1][
                    'center_time'] - largest_s1s[0]['center_time']
                result['alt_s2_delay'] = largest_s2s[1][
                    'center_time'] - largest_s2s[0]['center_time']

        # areas before main S2
        if len(largest_s2s):
            peaks_before_ms2 = peaks[peaks['time'] < largest_s2s[0]['time']]
            result['area_before_main_s2'] = np.sum(peaks_before_ms2['area'])

            s2peaks_before_ms2 = peaks_before_ms2[peaks_before_ms2['type'] ==
                                                  2]
            if len(s2peaks_before_ms2) == 0:
                result['large_s2_before_main_s2'] = 0
            else:
                result['large_s2_before_main_s2'] = np.max(
                    s2peaks_before_ms2['area'])
        return result

    @staticmethod
    # @numba.njit <- works but slows if fill_events is not numbafied
    def get_largest_sx_peaks(peaks,
                             s_i,
                             s1_before_time=np.inf,
                             s1_min_coincidence=0,
                             number_of_peaks=2):
        """Get the largest S1/S2. For S1s allow a min coincidence and max time"""
        # Find all peaks of this type (S1 or S2)
        s_mask = peaks['type'] == s_i
        if s_i == 1:
            s_mask &= peaks['time'] < s1_before_time
            s_mask &= peaks['tight_coincidence'] >= s1_min_coincidence

        selected_peaks = peaks[s_mask]
        s_index = np.arange(len(peaks))[s_mask]
        largest_peaks = np.argsort(
            selected_peaks['area'])[-number_of_peaks:][::-1]
        return selected_peaks[largest_peaks], s_index[largest_peaks]

    # If only we could numbafy this... Unfortunatly we cannot.
    # Perhaps we could one day consider doing something like strax.copy_to_buffer
    @staticmethod
    def copy_largest_peaks_into_event(
        result,
        largest_s_i,
        main_or_alt_index,
        result_fields,
        peak_fields,
    ):
        """
        For one event, write all the peak_fields (e.g. "area") of the peak
        (largest_s_i) into their associated field in the event (e.g. s1_area),
        main_or_alt_index differentiates between main (index 0) and alt (index 1)
        """
        index_not_in_list_of_largest_peaks = main_or_alt_index >= len(
            largest_s_i)
        if index_not_in_list_of_largest_peaks:
            # There is no such peak. E.g. main_or_alt_index == 1 but largest_s_i = ["Main S1"]
            # Asking for index 1 doesn't work on a len 1 list of peaks.
            return

        for i, ev_field in enumerate(result_fields):
            p_field = peak_fields[i]
            if p_field not in ev_field:
                raise ValueError(
                    "Event fields must derive from the peak fields")
            result[ev_field] = largest_s_i[main_or_alt_index][p_field]

    @staticmethod
    # @numba.njit <- works but slows if fill_events is not numbafied
    def set_sx_index(res, s1_idx, s2_idx):
        if len(s1_idx):
            res['s1_index'] = s1_idx[0]
            if len(s1_idx) > 1:
                res['alt_s1_index'] = s1_idx[1]
        if len(s2_idx):
            res['s2_index'] = s2_idx[0]
            if len(s2_idx) > 1:
                res['alt_s2_index'] = s2_idx[1]
コード例 #14
0
class PeakAmbience(strax.OverlapWindowPlugin):
    """
    Calculate Ambience of peaks.
    Features are the number of lonehits, small S0, S1, S2 in a time window before peaks,
    and the number of small S2 in circle near the S2 peak in a time window.
    References:
        * v0.0.7 reference: xenon:xenonnt:ac:prediction:shadow_ambience
    """
    __version__ = '0.0.7'
    depends_on = ('lone_hits', 'peak_basics', 'peak_positions')
    provides = 'peak_ambience'
    data_kind = 'peaks'
    save_when = strax.SaveWhen.EXPLICIT

    ambience_time_window_backward = straxen.URLConfig(
        default=int(2e6),
        type=int,
        track=True,
        help='Search for ambience in this time window [ns]')

    ambience_divide_t = straxen.URLConfig(
        default=False,
        type=bool,
        track=True,
        help=
        'Whether to divide area by time difference of ambience creating peak to current peak'
    )

    ambience_divide_r = straxen.URLConfig(
        default=False,
        type=bool,
        track=True,
        help=
        'Whether to divide area by radial distance of ambience creating peak to current peak'
    )

    ambient_radius = straxen.URLConfig(
        default=6.7,
        type=float,
        track=True,
        help='Search for ambience in this radius [cm]')

    ambience_area_parameters = straxen.URLConfig(
        default=(5, 60, 60),
        type=(list, tuple),
        track=True,
        help='The upper limit of S0, S1, S2 area to be counted')

    def get_window_size(self):
        return 10 * self.config['ambience_time_window_backward']

    @property
    def origin_dtype(self):
        return ['lh_before', 's0_before', 's1_before', 's2_before', 's2_near']

    def infer_dtype(self):
        dtype = []
        for ambience in self.origin_dtype:
            dtype += [
                ((f"Number of small {' '.join(ambience.split('_'))} a peak",
                  f'n_{ambience}'), np.int16),
                ((f"Area sum of small {' '.join(ambience.split('_'))} a peak",
                  f's_{ambience}'), np.float32)
            ]
        dtype += strax.time_fields
        return dtype

    def compute(self, lone_hits, peaks):
        return self.compute_ambience(lone_hits, peaks, peaks)

    def compute_ambience(self, lone_hits, peaks, current_peak):
        # 1. Initialization
        result = np.zeros(len(current_peak), self.dtype)

        # 2. Define time window for each peak, we will find small peaks & lone hits within these time windows
        roi = np.zeros(len(current_peak), dtype=strax.time_fields)
        roi['time'] = current_peak['center_time'] - self.config[
            'ambience_time_window_backward']
        roi['endtime'] = current_peak['center_time']

        # 3. Calculate number and area sum of lonehits before a peak
        touching_windows = strax.touching_windows(lone_hits, roi)
        # Calculating ambience
        self.lonehits_ambience(current_peak, lone_hits, touching_windows,
                               result['n_lh_before'], result['s_lh_before'],
                               self.config['ambience_divide_t'])

        # 4. Calculate number and area sum of small S0, S1, S2 before a peak
        radius = -1
        for stype, area in zip([0, 1, 2],
                               self.config['ambience_area_parameters']):
            mask_pre = (peaks['type'] == stype) & (peaks['area'] < area)
            touching_windows = strax.touching_windows(peaks[mask_pre], roi)
            # Calculating ambience
            self.peaks_ambience(current_peak, peaks[mask_pre],
                                touching_windows, radius,
                                result[f'n_s{stype}_before'],
                                result[f's_s{stype}_before'],
                                self.config['ambience_divide_t'],
                                self.config['ambience_divide_r'])

        # 5. Calculate number and area sum of small S2 near(in (x,y) space) a S2 peak
        mask_pre = (peaks['type'] == 2) & (
            peaks['area'] < self.config['ambience_area_parameters'][2])
        touching_windows = strax.touching_windows(peaks[mask_pre], roi)
        # Calculating ambience
        self.peaks_ambience(current_peak, peaks[mask_pre], touching_windows,
                            self.config['ambient_radius'], result['n_s2_near'],
                            result['s_s2_near'],
                            self.config['ambience_divide_t'],
                            self.config['ambience_divide_r'])

        # 6. Set time and endtime for peaks
        result['time'] = current_peak['time']
        result['endtime'] = strax.endtime(current_peak)
        return result

    @staticmethod
    @numba.njit
    def lonehits_ambience(peaks, pre_hits, touching_windows, num_array,
                          sum_array, ambience_divide_t):
        # Function to find lonehits before a peak
        # creating_hit is the lonehit creating ambience
        # suspicious_peak is the suspicious peak in the ambience created by creating_hit
        for p_i, suspicious_peak in enumerate(peaks):
            indices = touching_windows[p_i]
            for idx in range(indices[0], indices[1]):
                creating_hit = pre_hits[idx]
                dt = suspicious_peak['center_time'] - creating_hit['time']
                if (dt <= 0) or (creating_hit['area'] <= 0):
                    continue
                num_array[p_i] += 1
                # Sometimes we may interested in sum of area / dt
                if ambience_divide_t:
                    sum_array[p_i] += creating_hit['area'] / dt
                else:
                    sum_array[p_i] += creating_hit['area']

    @staticmethod
    @numba.njit
    def peaks_ambience(peaks, pre_peaks, touching_windows, ambient_radius,
                       num_array, sum_array, ambience_divide_t,
                       ambience_divide_r):
        # Function to find S0, S1, S2 before or near a peak
        # creating_peak is the peak creating ambience
        # suspicious_peak is the suspicious peak in the ambience created by creating_peak
        for p_i, suspicious_peak in enumerate(peaks):
            indices = touching_windows[p_i]
            for idx in range(indices[0], indices[1]):
                creating_peak = pre_peaks[idx]
                r = distance_in_xy(suspicious_peak, creating_peak)
                dt = suspicious_peak['center_time'] - creating_peak[
                    'center_time']
                if dt <= 0:
                    continue
                if (ambient_radius < 0) or (r <= ambient_radius):
                    num_array[p_i] += 1
                    # Sometimes we may interested in sum of area / dt
                    if ambience_divide_t:
                        sum_array[p_i] += creating_peak['area'] / dt
                    else:
                        sum_array[p_i] += creating_peak['area']
                    # Sometimes we may interested in sum of area / r^2
                    if ambience_divide_r and ambient_radius > 0:
                        sum_array[p_i] /= r**2
コード例 #15
0
class IndividualPeakMonitor(strax.Plugin):
    """
    Plugin to write data needed for the online SE monitor to the 
    online-monitor collection in the runs-database. Data that is written by
    this plugin should be small such as to not overload the runs-
    database. If the peaks are large, random
    max_bytes of data are selected from the peaks.

    This plugin takes 'peak_basics' and 'peak_positions_mlp'. Although 
    they are not strictly related, they are aggregated into a single data_type
    in order to minimize the number of documents in the online monitor.

    Produces 'individual_peak_monitor' with info on the peaks and their
    positions.
    """

    online_max_bytes = straxen.URLConfig(
        default=10e6,
        track=True,
        help='Maximum amount of bytes of data for MongoDB document'
    )

    depends_on = ('peak_basics', 'peak_positions_mlp')
    provides = 'individual_peak_monitor'
    data_kind = 'individual_peak_monitor'
    __version__ = '0.0.1'

    def infer_dtype(self):
        dtype = [
            (('Peak integral in PE', 'area'),
             np.float32),
            (('Reconstructed mlp peak x-position', 'x_mlp'),
             np.float32),
            (('Reconstructed mlp peak y-position', 'y_mlp'),
             np.float32),
            (('Width (in ns) of the central 50% area of the peak', 'range_50p_area'),
             np.float32),
            (('Fraction of original peaks array length that is saved', 'weight'),
             np.float32),
        ] + strax.time_fields
        return dtype

    def compute(self, peaks):
        peaks_size = peaks.nbytes

        if peaks_size > self.online_max_bytes:
            # Calculate fraction of the data that can be kept
            # to reduce datasize
            new_len = int(len(peaks) / peaks_size * self.online_max_bytes)
            idx = np.random.choice(np.arange(len(peaks)), replace=False, size=new_len)
            data = peaks[np.sort(idx)]

        else:  # peaks_size <= self.max_bytes:
            data = peaks
        res = np.zeros(len(data), dtype=self.dtype)
        res['time'] = data['time']
        res['x_mlp'] = data['x_mlp']
        res['y_mlp'] = data['y_mlp']
        res['area'] = data['area']
        res['range_50p_area'] = data['range_50p_area']
        res['endtime'] = data['endtime']

        if len(data):
            res['weight'] = len(peaks) / len(data)
        else:
            res['weight'] = 0

        return res
コード例 #16
0
ファイル: event_processing.py プロジェクト: jorana/straxen
class CorrectedAreas(strax.Plugin):
    """
    Plugin which applies light collection efficiency maps and electron
    life time to the data.

    Computes the cS1/cS2 for the main/alternative S1/S2 as well as the
    corrected life time.

    Note:
        Please be aware that for both, the main and alternative S1, the
        area is corrected according to the xy-position of the main S2.

        There are now 3 components of cS2s: cs2_top, cS2_bottom and cs2.
        cs2_top and cs2_bottom are corrected by the corresponding maps,
        and cs2 is the sum of the two.
    """
    __version__ = '0.2.0'

    depends_on = ['event_basics', 'event_positions']

    # Descriptor configs
    elife = straxen.URLConfig(
        default='cmt://elife?version=ONLINE&run_id=plugin.run_id',
        help='electron lifetime in [ns]')

    # default posrec, used to determine which LCE map to use
    default_reconstruction_algorithm = straxen.URLConfig(
        default=DEFAULT_POSREC_ALGO,
        help="default reconstruction algorithm that provides (x,y)")
    s1_xyz_map = straxen.URLConfig(
        default='itp_map://resource://cmt://format://'
        's1_xyz_map_{algo}?version=ONLINE&run_id=plugin.run_id'
        '&fmt=json&algo=plugin.default_reconstruction_algorithm',
        cache=True)
    s2_xy_map = straxen.URLConfig(
        default='itp_map://resource://cmt://format://'
        's2_xy_map_{algo}?version=ONLINE&run_id=plugin.run_id'
        '&fmt=json&algo=plugin.default_reconstruction_algorithm',
        cache=True)

    # average SE gain for a given time period. default to the value of this run in ONLINE model
    # thus, by default, there will be no time-dependent correction according to se gain
    avg_se_gain = straxen.URLConfig(
        default='cmt://se_gain?version=ONLINE&run_id=plugin.run_id',
        help='Nominal single electron (SE) gain in PE / electron extracted. '
        'Data will be corrected to this value')

    # se gain for this run, allowing for using CMT. default to online
    se_gain = straxen.URLConfig(
        default='cmt://se_gain?version=ONLINE&run_id=plugin.run_id',
        help='Actual SE gain for a given run (allows for time dependence)')

    # relative extraction efficiency which can change with time and modeled by CMT.
    # defaults to no correction
    rel_extraction_eff = straxen.URLConfig(
        default=1.0,
        help=
        'Relative extraction efficiency for this run (allows for time dependence)'
    )

    def infer_dtype(self):
        dtype = []
        dtype += strax.time_fields

        for peak_type, peak_name in zip(['', 'alt_'], ['main', 'alternate']):
            dtype += [
                (f'{peak_type}cs1', np.float32,
                 f'Corrected area of {peak_name} S1 [PE]'),
                (f'{peak_type}cs2_wo_elifecorr', np.float32,
                 f'Corrected area of {peak_name} S2 before elife correction '
                 f'(s2 xy correction + SEG/EE correction applied) [PE]'),
                (f'{peak_type}cs2_wo_timecorr', np.float32,
                 f'Corrected area of {peak_name} S2 before SEG/EE and elife corrections'
                 f'(s2 xy correction applied) [PE]'),
                (f'{peak_type}cs2_area_fraction_top', np.float32,
                 f'Fraction of area seen by the top PMT array for corrected {peak_name} S2'
                 ),
                (f'{peak_type}cs2_bottom', np.float32,
                 f'Corrected area of {peak_name} S2 in the bottom PMT array [PE]'
                 ),
                (f'{peak_type}cs2', np.float32,
                 f'Corrected area of {peak_name} S2 [PE]'),
            ]
        return dtype

    def compute(self, events):
        result = dict(time=events['time'], endtime=strax.endtime(events))

        # S1 corrections depend on the actual corrected event position.
        # We use this also for the alternate S1; for e.g. Kr this is
        # fine as the S1 correction varies slowly.
        event_positions = np.vstack([events['x'], events['y'], events['z']]).T

        for peak_type in ["", "alt_"]:
            result[f"{peak_type}cs1"] = events[
                f'{peak_type}s1_area'] / self.s1_xyz_map(event_positions)

        # s2 corrections
        # S2 top and bottom are corrected separately, and cS2 total is the sum of the two
        # figure out the map name
        if len(self.s2_xy_map.map_names) > 1:
            s2_top_map_name = "map_top"
            s2_bottom_map_name = "map_bottom"
        else:
            s2_top_map_name = "map"
            s2_bottom_map_name = "map"

        for peak_type in ["", "alt_"]:
            # S2(x,y) corrections use the observed S2 positions
            s2_positions = np.vstack(
                [events[f'{peak_type}s2_x'], events[f'{peak_type}s2_y']]).T

            # corrected s2 with s2 xy map only, i.e. no elife correction
            # this is for s2-only events which don't have drift time info
            cs2_top_xycorr = (
                events[f'{peak_type}s2_area'] *
                events[f'{peak_type}s2_area_fraction_top'] /
                self.s2_xy_map(s2_positions, map_name=s2_top_map_name))
            cs2_bottom_xycorr = (
                events[f'{peak_type}s2_area'] *
                (1 - events[f'{peak_type}s2_area_fraction_top']) /
                self.s2_xy_map(s2_positions, map_name=s2_bottom_map_name))

            # Correct for SEgain and extraction efficiency
            seg_ee_corr = (self.se_gain /
                           self.avg_se_gain) * self.rel_extraction_eff
            cs2_top_wo_elifecorr = cs2_top_xycorr / seg_ee_corr
            cs2_bottom_wo_elifecorr = cs2_bottom_xycorr / seg_ee_corr
            result[
                f"{peak_type}cs2_wo_elifecorr"] = cs2_top_wo_elifecorr + cs2_bottom_wo_elifecorr

            # cs2aft doesn't need elife/time corrections as they cancel
            result[
                f"{peak_type}cs2_area_fraction_top"] = cs2_top_wo_elifecorr / result[
                    f"{peak_type}cs2_wo_elifecorr"]

            # For electron lifetime corrections to the S2s,
            # use drift time computed using the main S1.
            el_string = peak_type + "s2_interaction_" if peak_type == "alt_" else peak_type
            elife_correction = np.exp(events[f'{el_string}drift_time'] /
                                      self.elife)
            result[f"{peak_type}cs2_wo_timecorr"] = (
                cs2_top_xycorr + cs2_bottom_xycorr) * elife_correction
            result[f"{peak_type}cs2"] = result[
                f"{peak_type}cs2_wo_elifecorr"] * elife_correction
            result[
                f"{peak_type}cs2_bottom"] = cs2_bottom_wo_elifecorr * elife_correction

        return result
コード例 #17
0
ファイル: event_processing.py プロジェクト: jorana/straxen
class EventPositions(strax.Plugin):
    """
    Computes the observed and corrected position for the main S1/S2
    pairs in an event. For XENONnT data, it returns the FDC corrected
    positions of the default_reconstruction_algorithm. In case the fdc_map
    is given as a file (not through CMT), then the coordinate system
    should be given as (x, y, z), not (x, y, drift_time).
    """

    depends_on = ('event_basics', )

    __version__ = '0.1.4'

    default_reconstruction_algorithm = straxen.URLConfig(
        default=DEFAULT_POSREC_ALGO,
        help="default reconstruction algorithm that provides (x,y)")

    dtype = [
        ('x', np.float32,
         'Interaction x-position, field-distortion corrected (cm)'),
        ('y', np.float32,
         'Interaction y-position, field-distortion corrected (cm)'),
        ('z', np.float32,
         'Interaction z-position, using mean drift velocity only (cm)'),
        ('r', np.float32,
         'Interaction radial position, field-distortion corrected (cm)'),
        ('z_naive', np.float32,
         'Interaction z-position using mean drift velocity only (cm)'),
        ('r_naive', np.float32,
         'Interaction r-position using observed S2 positions directly (cm)'),
        ('r_field_distortion_correction', np.float32,
         'Correction added to r_naive for field distortion (cm)'),
        ('z_field_distortion_correction', np.float32,
         'Correction added to z_naive for field distortion (cm)'),
        ('theta', np.float32, 'Interaction angular position (radians)')
    ] + strax.time_fields

    def setup(self):

        self.electron_drift_velocity = get_correction_from_cmt(
            self.run_id, self.config['electron_drift_velocity'])
        self.electron_drift_time_gate = get_correction_from_cmt(
            self.run_id, self.config['electron_drift_time_gate'])

        if isinstance(self.config['fdc_map'], str):
            self.map = InterpolatingMap(
                get_resource(self.config['fdc_map'], fmt='binary'))

        elif is_cmt_option(self.config['fdc_map']):
            self.map = InterpolatingMap(
                get_cmt_resource(
                    self.run_id,
                    tuple([
                        'suffix',
                        self.config['default_reconstruction_algorithm'],
                        *self.config['fdc_map']
                    ]),
                    fmt='binary'))
            self.map.scale_coordinates([1., 1., -self.electron_drift_velocity])

        else:
            raise NotImplementedError('FDC map format not understood.')

    def compute(self, events):

        result = {'time': events['time'], 'endtime': strax.endtime(events)}

        z_obs = -self.electron_drift_velocity * (events['drift_time'] -
                                                 self.electron_drift_time_gate)
        orig_pos = np.vstack([events[f's2_x'], events[f's2_y'], z_obs]).T
        r_obs = np.linalg.norm(orig_pos[:, :2], axis=1)
        delta_r = self.map(orig_pos)

        # apply radial correction
        with np.errstate(invalid='ignore', divide='ignore'):
            r_cor = r_obs + delta_r
            scale = r_cor / r_obs

        # z correction due to longer drift time for distortion
        # (geometrical reasoning not valid if |delta_r| > |z_obs|,
        #  as cathetus cannot be longer than hypothenuse)
        with np.errstate(invalid='ignore'):
            z_cor = -(z_obs**2 - delta_r**2)**0.5
            invalid = np.abs(z_obs) < np.abs(delta_r)
            # do not apply z correction above gate
            invalid |= z_obs >= 0
        z_cor[invalid] = z_obs[invalid]
        delta_z = z_cor - z_obs

        result.update({
            'x': orig_pos[:, 0] * scale,
            'y': orig_pos[:, 1] * scale,
            'r': r_cor,
            'r_naive': r_obs,
            'r_field_distortion_correction': delta_r,
            'theta': np.arctan2(orig_pos[:, 1], orig_pos[:, 0]),
            'z_naive': z_obs,
            # using z_obs in agreement with the dtype description
            # the FDC for z (z_cor) is found to be not reliable (see #527)
            'z': z_obs,
            'z_field_distortion_correction': delta_z
        })

        return result
コード例 #18
0
ファイル: veto_events.py プロジェクト: XENONnT/straxen
class nVETOEvents(strax.OverlapWindowPlugin):
    """
    Plugin which computes the boundaries of veto events.
    """
    depends_on = 'hitlets_nv'
    provides = 'events_nv'
    data_kind = 'events_nv'
    compressor = 'zstd'

    __version__ = '0.0.3'
    events_seen = 0

    event_left_extension_nv = straxen.URLConfig(
        default=0,
        track=True,
        type=int,
        help='Extends event window this many [ns] to the left.')
    event_resolving_time_nv = straxen.URLConfig(
        default=200,
        track=True,
        type=int,
        help='Resolving time for window coincidence [ns].')
    event_min_hits_nv = straxen.URLConfig(
        default=3,
        track=True,
        type=int,
        help='Minimum number of fully confined hitlets to define an event.')
    channel_map = straxen.URLConfig(
        track=False,
        type=immutabledict,
        help='immutabledict mapping subdetector to (min, max) channel number')

    def infer_dtype(self):
        self.name_event_number = 'event_number_nv'
        self.channel_range = self.config['channel_map']['nveto']
        self.n_channel = (self.channel_range[1] - self.channel_range[0]) + 1
        return veto_event_dtype(self.name_event_number, self.n_channel)

    def get_window_size(self):
        return self.config['event_left_extension_nv'] + self.config[
            'event_resolving_time_nv'] + 1

    def compute(self, hitlets_nv, start, end):

        events, hitlets_ids_in_event = find_veto_events(
            hitlets_nv,
            self.config['event_min_hits_nv'],
            self.config['event_resolving_time_nv'],
            self.config['event_left_extension_nv'],
            event_number_key=self.name_event_number,
            n_channel=self.n_channel,
        )

        if len(hitlets_ids_in_event):
            compute_nveto_event_properties(events,
                                           hitlets_nv,
                                           hitlets_ids_in_event,
                                           start_channel=self.channel_range[0])

        # Get eventids:
        n_events = len(events)
        events[self.name_event_number] = np.arange(n_events) + self.events_seen
        self.events_seen += n_events

        # Don't extend beyond the chunk boundaries
        # This will often happen for events near the invalid boundary of the
        # overlap processing (which should be thrown away)
        events['time'] = np.clip(events['time'], start, end)
        events['endtime'] = np.clip(events['endtime'], start, end)
        return events
コード例 #19
0
ファイル: acqmon_processing.py プロジェクト: XENONnT/straxen
class VetoProximity(strax.OverlapWindowPlugin):
    """
    Find the closest next/previous veto start w.r.t. the event time or
    when a busy happens during an event.
    """

    __version__ = '2.1.0'
    # Strictly speaking, we could depend on 'events', but then you couldn't
    # change the event_window_fields to e.g. s1_time and s2_endtime.
    depends_on = ('event_basics', 'veto_intervals')
    provides = 'veto_proximity'
    data_kind = 'events'

    event_window_fields = straxen.URLConfig(
        default=('time', 'endtime'),
        help='Fields to determine where to look for overlaps for using '
        'this plugin in the events. The default uses start and endtime '
        'of an event, but this can also be the S1 or S2 start/endtime')

    veto_proximity_window = straxen.URLConfig(
        default=int(300e9),
        help='Maximum separation between veto stop and start pulses [ns]')
    time_no_aqmon_veto_found = straxen.URLConfig(
        default=int(3.6e+12),
        track=True,
        type=int,
        help='If no next/previous veto is found, we will fill the fields '
        'time_to_previous_XX with this time. Set to a large number '
        'such that one will never cut events that are < YY ns.')

    veto_names = ['busy', 'busy_he', 'hev', 'straxen_deadtime']

    def infer_dtype(self):
        dtype = []
        dtype += strax.time_fields
        start_field, stop_field = self.event_window_fields
        for name in self.veto_names:
            dtype += [
                ((f'Duration of event overlapping with "{name}"-veto [ns]',
                  f'veto_{name}_overlap'), np.int64),
                ((f'Time (absolute value) to previous "{name}"-veto '
                  f'from "{start_field}" of event [ns]',
                  f'time_to_previous_{name}'), np.int64),
                ((f'Time (absolute value) to next "{name}"-veto '
                  f'from "{stop_field}" of event [ns]',
                  f'time_to_next_{name}'), np.int64),
            ]

        return dtype

    def get_window_size(self):
        return self.veto_proximity_window

    def set_result_for_veto(self, result_buffer: np.ndarray,
                            event_window: np.ndarray,
                            veto_intervals: np.ndarray,
                            veto_name: str) -> None:
        """
        Fill the result buffer inplace. Goal is to find vetos with
        <veto_name> that are either during, before or after the
         current event_window.

        :param result_buffer: The buffer to fill inplace
        :param event_window: start/stop boundaries of the event to consider.
            Should be an array with ['time'] and ['endtime'] which can be
            based on event start/end times or S1/S2 times
        :param veto_intervals: veto intervals datatype
        :param veto_name: The name of the veto to fill the result buffer for
        :return: Nothing, results are filled in place
        """
        # Set defaults to be some very long time
        result_buffer[
            f'time_to_previous_{veto_name}'] = self.time_no_aqmon_veto_found
        result_buffer[
            f'time_to_next_{veto_name}'] = self.time_no_aqmon_veto_found

        selected_intervals = veto_intervals[veto_intervals['veto_type'] ==
                                            f'{veto_name}_veto']
        if not len(selected_intervals):
            return

        vetos_during_event = strax.touching_windows(selected_intervals,
                                                    event_window)

        # Figure out the vetos *during* an event
        for event_i, veto_window in enumerate(vetos_during_event):
            if veto_window[1] - veto_window[0]:
                vetos_in_window = selected_intervals[
                    veto_window[0]:veto_window[1]].copy()
                starts = np.clip(vetos_in_window['time'],
                                 event_window[event_i]['time'],
                                 event_window[event_i]['endtime'])
                stops = np.clip(vetos_in_window['endtime'],
                                event_window[event_i]['time'],
                                event_window[event_i]['endtime'])
                # Now sum over all the stops-starts that are clipped
                # within the duration of the event
                result_buffer[event_i][f'veto_{veto_name}_overlap'] = np.sum(
                    stops - starts)

        # Find the next and previous veto's
        times_to_prev, times_to_next = self.abs_time_to_prev_next(
            event_window, selected_intervals)
        mask_prev = times_to_prev > 0
        result_buffer[f'time_to_previous_{veto_name}'][
            mask_prev] = times_to_prev[mask_prev]

        max_next = times_to_next > 0
        result_buffer[f'time_to_next_{veto_name}'][max_next] = times_to_next[
            max_next]

    @staticmethod
    @numba.njit
    def abs_time_to_prev_next(event_window, selected_intervals):
        """Get the absolute time to the previous and the next interval"""
        times_to_prev = np.ones(len(event_window)) * -1
        times_to_next = np.ones(len(event_window)) * -1
        for event_i, ev_wind in enumerate(event_window):
            # Two cases left, either veto's are before or after the event window
            interval_before = selected_intervals['endtime'] < ev_wind['time']
            interval_after = selected_intervals['time'] > ev_wind['endtime']

            if np.sum(interval_before):
                prev_intervals = selected_intervals[interval_before]
                time_to_prev = np.abs(ev_wind['time'] -
                                      prev_intervals['endtime'])
                prev_idx = np.argmin(time_to_prev)
                times_to_prev[event_i] = time_to_prev[prev_idx]

            if np.sum(interval_after):
                next_intervals = selected_intervals[interval_after]
                time_to_next = np.abs(next_intervals['endtime'] -
                                      ev_wind['endtime'])
                next_idx = np.argmin(time_to_next)
                times_to_next[event_i] = time_to_next[next_idx]
        return times_to_prev, times_to_next

    def compute(self, events, veto_intervals):
        result = np.zeros(len(events), self.dtype)
        result['time'] = events['time']
        result['endtime'] = events['endtime']

        # Get containers for touching windows based on self.event_window_fields
        event_window = np.zeros(len(events), dtype=strax.time_fields)
        event_window['time'] = events[self.event_window_fields[0]]
        event_window['endtime'] = events[self.event_window_fields[1]]

        for veto_name in self.veto_names:
            self.set_result_for_veto(result, event_window, veto_intervals,
                                     veto_name)
        return result
コード例 #20
0
class PeakShadow(strax.OverlapWindowPlugin):
    """
    This plugin can find and calculate the time & position shadow
    from previous peaks in time.
    It also gives the area and (x,y) of the previous peaks.
    References:
        * v0.1.5 reference: xenon:xenonnt:ac:prediction:shadow_ambience
    """

    __version__ = '0.1.5'
    depends_on = ('peak_basics', 'peak_positions')
    provides = 'peak_shadow'
    save_when = strax.SaveWhen.EXPLICIT

    shadow_time_window_backward = straxen.URLConfig(
        default=int(1e9),
        type=int,
        track=True,
        help=
        'Search for peaks casting time & position shadow in this time window [ns]'
    )

    shadow_threshold = straxen.URLConfig(
        default={
            's1_time_shadow': 1e3,
            's2_time_shadow': 1e4,
            's2_position_shadow': 1e4
        },
        type=dict,
        track=True,
        help=
        'Only take S1/S2s larger than this into account when calculating Shadow [PE]'
    )

    shadow_deltatime_exponent = straxen.URLConfig(
        default=-1.0,
        type=float,
        track=True,
        help='The exponent of delta t when calculating shadow')

    shadow_sigma_and_baseline = straxen.URLConfig(
        default=[15.220, 0.036],
        type=list,
        track=True,
        help=
        'Fitted position correlation sigma[cm*PE^0.5] and baseline[cm] using in position shadow'
    )

    def get_window_size(self):
        return 10 * self.config['shadow_time_window_backward']

    def infer_dtype(self):
        s1_time_shadow_dtype = []
        s2_time_shadow_dtype = []
        s2_position_shadow_dtype = []
        nearest_dtype = []
        # We have time shadow(S2/dt) and position shadow(S2/dt*p(s))
        # previous S1 can only cast time shadow, previous S2 can cast both time & position shadow
        for key, dtype in zip(
            ['s1_time_shadow', 's2_time_shadow', 's2_position_shadow'], [
                s1_time_shadow_dtype, s2_time_shadow_dtype,
                s2_position_shadow_dtype
            ]):
            type_str, tp_desc, _ = key.split('_')
            dtype.append(((
                f'previous large {type_str} casted largest {tp_desc} shadow [PE/ns]',
                f'shadow_{key}'), np.float32))
            dtype.append(((
                f'time difference to the previous large {type_str} peak casting largest {tp_desc} shadow [ns]',
                f'dt_{key}'), np.int64))
            # Only previous S2 peaks have (x,y)
            if 's2' in key:
                dtype.append(((
                    f'x of previous large s2 peak casting largest {tp_desc} shadow [cm]',
                    f'x_{key}'), np.float32))
                dtype.append(((
                    f'y of previous large s2 peak casting largest {tp_desc} shadow [cm]',
                    f'y_{key}'), np.float32))
            # Only time shadow gives the nearest large peak
            if 'time' in key:
                dtype.append(((
                    f'time difference to the nearest previous large {type_str} [ns]',
                    f'nearest_dt_{type_str}'), np.int64))
        # Also record the PDF of HalfCauchy when calculating S2 position shadow
        s2_position_shadow_dtype.append(
            (('PDF describing correlation to the previous large s2',
              'pdf_s2_position_shadow'), np.float32))

        dtype = s1_time_shadow_dtype + s2_time_shadow_dtype + s2_position_shadow_dtype + nearest_dtype + strax.time_fields
        return dtype

    @property
    def shadowdtype(self):
        dtype = []
        dtype += [('shadow', np.float32), ('dt', np.int64)]
        dtype += [('x', np.float32), ('y', np.float32)]
        dtype += [('nearest_dt', np.int64)]
        return dtype

    def compute(self, peaks):
        return self.compute_shadow(peaks, peaks)

    def compute_shadow(self, peaks, current_peak):
        # 1. Define time window for each peak, we will find previous peaks within these time windows
        roi_shadow = np.zeros(len(current_peak), dtype=strax.time_fields)
        roi_shadow['time'] = current_peak['center_time'] - self.config[
            'shadow_time_window_backward']
        roi_shadow['endtime'] = current_peak['center_time']

        # 2. Calculate S2 position shadow, S2 time shadow, and S1 time shadow
        result = np.zeros(len(current_peak), self.dtype)
        for key in ['s2_position_shadow', 's2_time_shadow', 's1_time_shadow']:
            is_position = 'position' in key
            type_str = key.split('_')[0]
            stype = 2 if 's2' in key else 1
            mask_pre = (peaks['type'] == stype) & (
                peaks['area'] > self.config['shadow_threshold'][key])
            split_peaks = strax.touching_windows(peaks[mask_pre], roi_shadow)
            array = np.zeros(len(current_peak), np.dtype(self.shadowdtype))

            # Initialization
            array['x'] = np.nan
            array['y'] = np.nan
            array['dt'] = self.config['shadow_time_window_backward']
            # The default value for shadow is set to be the lowest possible value
            if 'time' in key:
                array['shadow'] = self.config['shadow_threshold'][key] * array[
                    'dt']**self.config['shadow_deltatime_exponent']
            else:
                array['shadow'] = 0
            array['nearest_dt'] = self.config['shadow_time_window_backward']

            # Calculating shadow, the Major of the plugin. Only record the previous peak casting the largest shadow
            if len(current_peak):
                self.peaks_shadow(
                    current_peak, peaks[mask_pre], split_peaks,
                    self.config['shadow_deltatime_exponent'], array,
                    is_position,
                    self.getsigma(self.config['shadow_sigma_and_baseline'],
                                  current_peak['area']))

            # Fill results
            names = ['shadow', 'dt']
            if 's2' in key:  # Only previous S2 peaks have (x,y)
                names += ['x', 'y']
            if 'time' in key:  # Only time shadow gives the nearest large peak
                names += ['nearest_dt']
            for name in names:
                if name == 'nearest_dt':
                    result[f'{name}_{type_str}'] = array[name]
                else:
                    result[f'{name}_{key}'] = array[name]

        distance = np.sqrt(
            (result[f'x_s2_position_shadow'] - current_peak['x'])**2 +
            (result[f'y_s2_position_shadow'] - current_peak['y'])**2)
        # If distance is NaN, set largest distance
        distance = np.where(np.isnan(distance), 2 * straxen.tpc_r, distance)
        # HalfCauchy PDF when calculating S2 position shadow
        result['pdf_s2_position_shadow'] = halfcauchy.pdf(
            distance,
            scale=self.getsigma(self.config['shadow_sigma_and_baseline'],
                                current_peak['area']))

        # 6. Set time and endtime for peaks
        result['time'] = current_peak['time']
        result['endtime'] = strax.endtime(current_peak)
        return result

    @staticmethod
    @np.errstate(invalid='ignore')
    def getsigma(sigma_and_baseline, s2):
        # The parameter of HalfCauchy, which is a function of S2 area
        return sigma_and_baseline[0] / np.sqrt(s2) + sigma_and_baseline[1]

    @staticmethod
    @numba.njit
    def peaks_shadow(peaks,
                     pre_peaks,
                     touching_windows,
                     exponent,
                     result,
                     pos_corr,
                     sigmas=None):
        """
        For each peak in peaks, check if there is a shadow-casting peak
        and check if it casts the largest shadow
        """
        for p_i, (suspicious_peak, sigma) in enumerate(zip(peaks, sigmas)):
            # casting_peak is the previous large peak casting shadow
            # suspicious_peak is the suspicious peak which in shadow from casting_peak
            indices = touching_windows[p_i]
            for idx in range(indices[0], indices[1]):
                casting_peak = pre_peaks[idx]
                dt = suspicious_peak['center_time'] - casting_peak[
                    'center_time']
                if dt <= 0:
                    continue
                # First we record the time difference to the nearest previous peak
                result['nearest_dt'][p_i] = min(result['nearest_dt'][p_i], dt)
                # Calculate time shadow
                new_shadow = casting_peak['area'] * dt**exponent
                if pos_corr:
                    # Calculate position shadow which is time shadow with a HalfCauchy PDF multiplier
                    distance = distance_in_xy(suspicious_peak, casting_peak)
                    distance = np.where(np.isnan(distance), 2 * straxen.tpc_r,
                                        distance)
                    new_shadow *= 2 / (np.pi * sigma * (1 +
                                                        (distance / sigma)**2))
                # Only the previous peak with largest shadow is recorded
                if new_shadow > result['shadow'][p_i]:
                    result['shadow'][p_i] = new_shadow
                    result['x'][p_i] = casting_peak['x']
                    result['y'][p_i] = casting_peak['y']
                    result['dt'][p_i] = suspicious_peak[
                        'center_time'] - casting_peak['center_time']
コード例 #21
0
ファイル: acqmon_processing.py プロジェクト: XENONnT/straxen
class AqmonHits(strax.Plugin):
    """
    Find hits in acquisition monitor data. These hits could be
    then used by other plugins for deadtime calculations,
    GPS SYNC analysis, etc.
    """
    save_when = strax.SaveWhen.TARGET
    __version__ = '1.1.2'
    hit_min_amplitude_aqmon = straxen.URLConfig(
        default=(
            # Analogue signals
            (50, (int(AqmonChannels.SUM_WF), )),
            # Digital signals, can set a much higher threshold
            (1500, (
                int(AqmonChannels.MV_TRIGGER),
                int(AqmonChannels.GPS_SYNC),
                int(AqmonChannels.GPS_SYNC_AM),
                int(AqmonChannels.HEV_STOP),
                int(AqmonChannels.HEV_START),
                int(AqmonChannels.BUSY_HE_STOP),
                int(AqmonChannels.BUSY_HE_START),
                int(AqmonChannels.BUSY_STOP),
                int(AqmonChannels.BUSY_START),
            )),
            # Fake signals, 0 meaning that we won't find hits using
            # strax but just look for starts and stops
            (0, (int(AqmonChannels.ARTIFICIAL_DEADTIME), )),
        ),
        track=True,
        help='Minimum hit threshold in ADC*counts above baseline. Specified '
        'per channel in the format (threshold, (chx,chy),)',
    )
    baseline_samples_aqmon = straxen.URLConfig(
        default=10,
        track=True,
        help=
        'Number of samples to use at the start of the pulse to determine the baseline'
    )
    check_raw_record_aqmon_overlaps = straxen.URLConfig(
        default=True,
        track=False,
        help=
        'Crash if any of the pulses in raw_records_aqmon overlap with others '
        'in the same channel')

    depends_on = 'raw_records_aqmon'
    provides = 'aqmon_hits'
    data_kind = 'aqmon_hits'

    dtype = strax.hit_dtype

    def compute(self, raw_records_aqmon):
        not_allowed_channels = (set(np.unique(raw_records_aqmon['channel'])) -
                                set(self.aqmon_channels))
        if not_allowed_channels:
            raise ValueError(
                f'Unknown channel {not_allowed_channels}. Only know {self.aqmon_channels}'
            )

        if self.check_raw_record_aqmon_overlaps:
            straxen.check_overlaps(raw_records_aqmon,
                                   n_channels=max(AqmonChannels).value + 1)

        records = strax.raw_to_records(raw_records_aqmon)
        strax.zero_out_of_bounds(records)
        strax.baseline(records,
                       baseline_samples=self.baseline_samples_aqmon,
                       flip=True)
        aqmon_hits = self.find_aqmon_hits_per_channel(records)
        aqmon_hits = strax.sort_by_time(aqmon_hits)
        return aqmon_hits

    @property
    def aqmon_channels(self):
        return [
            channel for hit_and_channel_list in self.hit_min_amplitude_aqmon
            for channel in hit_and_channel_list[1]
        ]

    def find_aqmon_hits_per_channel(self, records):
        """Allow different thresholds to be applied to different channels"""
        aqmon_thresholds = np.zeros(np.max(self.aqmon_channels) + 1)
        for hit_threshold, channels in self.hit_min_amplitude_aqmon:
            aqmon_thresholds[np.array(channels)] = hit_threshold

        # Split the artificial deadtime ones and do those separately if there are any
        is_artificial = records['channel'] == AqmonChannels.ARTIFICIAL_DEADTIME
        aqmon_hits = strax.find_hits(records[~is_artificial],
                                     min_amplitude=aqmon_thresholds)

        if np.sum(is_artificial):
            aqmon_hits = np.concatenate(
                [aqmon_hits,
                 self.get_deadtime_hits(records[is_artificial])])
        return aqmon_hits

    def get_deadtime_hits(self, artificial_deadtime):
        """
        Actually, the artificial deadtime hits are already an interval so
        we only have to copy the appropriate hits
        """
        hits = np.zeros(len(artificial_deadtime), dtype=self.dtype)
        hits['time'] = artificial_deadtime['time']
        hits['dt'] = artificial_deadtime['dt']
        hits['length'] = artificial_deadtime['length']
        hits['channel'] = artificial_deadtime['channel']
        return hits
コード例 #22
0
class EventPatternFit(strax.Plugin):
    '''
    Plugin that provides patter information for events
    '''

    depends_on = ('event_area_per_channel', 'event_basics', 'event_positions')
    provides = 'event_pattern_fit'
    __version__ = '0.1.3'

    # Getting S1 AFT maps
    s1_aft_map = straxen.URLConfig(
        default='itp_map://resource://cmt://'
        's1_aft_xyz_map'
        '?version=ONLINE&run_id=plugin.run_id&fmt=json',
        cache=True)

    electron_drift_velocity = straxen.URLConfig(
        default='cmt://'
        'electron_drift_velocity'
        '?version=ONLINE&run_id=plugin.run_id',
        cache=True,
        help='Vertical electron drift velocity in cm/ns (1e4 m/ms)')

    electron_drift_time_gate = straxen.URLConfig(
        default='cmt://'
        'electron_drift_time_gate'
        '?version=ONLINE&run_id=plugin.run_id',
        help='Electron drift time from the gate in ns',
        cache=True)

    def infer_dtype(self):
        dtype = [
            ('s2_2llh', np.float32,
             'Modified Poisson likelihood value for main S2 in the event'),
            ('s2_neural_2llh', np.float32,
             'Data-driven based likelihood value for main S2 in the event'),
            ('alt_s2_2llh', np.float32,
             'Modified Poisson likelihood value for alternative S2'),
            ('alt_s2_neural_2llh', np.float32,
             'Data-driven based likelihood value for alternative S2 in the event'
             ),
            ('s1_2llh', np.float32,
             'Modified Poisson likelihood value for main S1'),
            ('s1_top_2llh', np.float32,
             'Modified Poisson likelihood value for main S1, calculated from top array'
             ),
            ('s1_bottom_2llh', np.float32,
             'Modified Poisson likelihood value for main S1, calculated from bottom array'
             ),
            ('s1_area_fraction_top_continuous_probability', np.float32,
             'Continuous binomial test for S1 area fraction top'),
            ('s1_area_fraction_top_discrete_probability', np.float32,
             'Discrete binomial test for S1 area fraction top'),
            ('s1_photon_fraction_top_continuous_probability', np.float32,
             'Continuous binomial test for S1 photon fraction top'),
            ('s1_photon_fraction_top_discrete_probability', np.float32,
             'Discrete binomial test for S1 photon fraction top'),
            ('alt_s1_area_fraction_top_continuous_probability', np.float32,
             'Continuous binomial test for alternative S1 area fraction top'),
            ('alt_s1_area_fraction_top_discrete_probability', np.float32,
             'Discrete binomial test for alternative S1 area fraction top'),
            ('alt_s1_photon_fraction_top_continuous_probability', np.float32,
             'Continuous binomial test for alternative S1 photon fraction top'
             ),
            ('alt_s1_photon_fraction_top_discrete_probability', np.float32,
             'Discrete binomial test for alternative S1 photon fraction top')
        ]

        if self.config['store_per_channel']:
            dtype += [
                (('2LLH per channel for main S2', 's2_2llh_per_channel'),
                 np.float32, (self.config['n_top_pmts'], )),
                (('2LLH per channel for alternative S2',
                  'alt_s2_2llh_per_channel'), np.float32,
                 (self.config['n_top_pmts'], )),
                (('Pattern main S2', 's2_pattern'), np.float32,
                 (self.config['n_top_pmts'], )),
                (('Pattern alt S2', 'alt_s2_pattern'), np.float32,
                 (self.config['n_top_pmts'], )),
                (('Pattern for main S1', 's1_pattern'), np.float32,
                 (self.config['n_tpc_pmts'], )),
                (('2LLH per channel for main S1', 's1_2llh_per_channel'),
                 np.float32, (self.config['n_tpc_pmts'], )),
            ]
        dtype += strax.time_fields
        return dtype

    def setup(self):
        self.mean_pe_photon = self.config['mean_pe_per_photon']

        # Getting optical maps
        self.s1_pattern_map = straxen.InterpolatingMap(
            straxen.get_resource(self.config['s1_optical_map'],
                                 fmt=self._infer_map_format(
                                     self.config['s1_optical_map'])))
        self.s2_pattern_map = straxen.InterpolatingMap(
            straxen.get_resource(self.config['s2_optical_map'],
                                 fmt=self._infer_map_format(
                                     self.config['s2_optical_map'])))

        # Getting S2 data-driven tensorflow models
        downloader = straxen.MongoDownloader()
        self.model_file = downloader.download_single(
            self.config['s2_tf_model'])
        with tempfile.TemporaryDirectory() as tmpdirname:
            tar = tarfile.open(self.model_file, mode="r:gz")
            tar.extractall(path=tmpdirname)

            import tensorflow as tf

            def _logl_loss(patterns_true, likelihood):
                return likelihood / 10.

            self.model = tf.keras.models.load_model(
                tmpdirname, custom_objects={"_logl_loss": _logl_loss})
            self.model_chi2 = tf.keras.Model(
                self.model.inputs,
                self.model.get_layer('Likelihood').output)

        # Getting gain model to get dead PMTs
        self.to_pe = straxen.get_correction_from_cmt(self.run_id,
                                                     self.config['gain_model'])
        self.dead_PMTs = np.where(self.to_pe == 0)[0]
        self.pmtbool = ~np.in1d(np.arange(0, self.config['n_tpc_pmts']),
                                self.dead_PMTs)
        self.pmtbool_top = self.pmtbool[:self.config['n_top_pmts']]
        self.pmtbool_bottom = self.pmtbool[self.config['n_top_pmts']:self.
                                           config['n_tpc_pmts']]

    def compute(self, events):

        result = np.zeros(len(events), dtype=self.dtype)
        result['time'] = events['time']
        result['endtime'] = strax.endtime(events)

        # Computing LLH values for S1s
        self.compute_s1_llhvalue(events, result)

        # Computing LLH values for S2s
        self.compute_s2_llhvalue(events, result)

        # Computing chi2 values for S2s
        self.compute_s2_neural_llhvalue(events, result)

        # Computing binomial test for s1 area fraction top
        positions = np.vstack([events['x'], events['y'], events['z']]).T
        aft_prob = self.s1_aft_map(positions)

        alt_s1_interaction_drift_time = events['s2_center_time'] - events[
            'alt_s1_center_time']
        alt_s1_interaction_z = -self.electron_drift_velocity * (
            alt_s1_interaction_drift_time - self.electron_drift_time_gate)
        alt_positions = np.vstack(
            [events['x'], events['y'], alt_s1_interaction_z]).T
        alt_aft_prob = self.s1_aft_map(alt_positions)

        # main s1 events
        mask_s1 = ~np.isnan(aft_prob)
        mask_s1 &= ~np.isnan(events['s1_area'])
        mask_s1 &= ~np.isnan(events['s1_area_fraction_top'])

        # default value is nan, it will be overwrite if the event satisfy the requirements
        result['s1_area_fraction_top_continuous_probability'][:] = np.nan
        result['s1_area_fraction_top_discrete_probability'][:] = np.nan
        result['s1_photon_fraction_top_continuous_probability'][:] = np.nan
        result['s1_photon_fraction_top_discrete_probability'][:] = np.nan

        # compute binomial test only if we have events that have valid aft prob, s1 area and s1 aft
        if np.sum(mask_s1):
            arg = aft_prob[mask_s1], events['s1_area'][mask_s1], events[
                's1_area_fraction_top'][mask_s1]
            result['s1_area_fraction_top_continuous_probability'][
                mask_s1] = s1_area_fraction_top_probability(*arg)
            result['s1_area_fraction_top_discrete_probability'][
                mask_s1] = s1_area_fraction_top_probability(*arg, 'discrete')
            arg = aft_prob[mask_s1], events['s1_area'][mask_s1] / self.config[
                'mean_pe_per_photon'], events['s1_area_fraction_top'][mask_s1]
            result['s1_photon_fraction_top_continuous_probability'][
                mask_s1] = s1_area_fraction_top_probability(*arg)
            result['s1_photon_fraction_top_discrete_probability'][
                mask_s1] = s1_area_fraction_top_probability(*arg, 'discrete')

        # alternative s1 events
        mask_alt_s1 = ~np.isnan(alt_aft_prob)
        mask_alt_s1 &= ~np.isnan(events['alt_s1_area'])
        mask_alt_s1 &= ~np.isnan(events['alt_s1_area_fraction_top'])

        # default value is nan, it will be ovewrite if the event satisfy the requirments
        result['alt_s1_area_fraction_top_continuous_probability'][:] = np.nan
        result['alt_s1_area_fraction_top_discrete_probability'][:] = np.nan
        result['alt_s1_photon_fraction_top_continuous_probability'][:] = np.nan
        result['alt_s1_photon_fraction_top_discrete_probability'][:] = np.nan

        # compute binomial test only if we have events that have valid aft prob, alt s1 area and alt s1 aft
        if np.sum(mask_alt_s1):
            arg = alt_aft_prob[mask_alt_s1], events['alt_s1_area'][
                mask_alt_s1], events['alt_s1_area_fraction_top'][mask_alt_s1]
            result['alt_s1_area_fraction_top_continuous_probability'][
                mask_alt_s1] = s1_area_fraction_top_probability(*arg)
            result['alt_s1_area_fraction_top_discrete_probability'][
                mask_alt_s1] = s1_area_fraction_top_probability(
                    *arg, 'discrete')
            arg = alt_aft_prob[mask_alt_s1], events['alt_s1_area'][
                mask_alt_s1] / self.config['mean_pe_per_photon'], events[
                    'alt_s1_area_fraction_top'][mask_alt_s1]
            result['alt_s1_photon_fraction_top_continuous_probability'][
                mask_alt_s1] = s1_area_fraction_top_probability(*arg)
            result['alt_s1_photon_fraction_top_discrete_probability'][
                mask_alt_s1] = s1_area_fraction_top_probability(
                    *arg, 'discrete')

        return result

    def compute_s1_llhvalue(self, events, result):
        # Selecting S1s for pattern fit calculation
        # - must exist (index != -1)
        # - must have total area larger minimal one
        # - must have positive AFT
        x, y, z = events['x'], events['y'], events['z']
        cur_s1_bool = events['s1_area'] > self.config['s1_min_area_pattern_fit']
        cur_s1_bool &= events['s1_index'] != -1
        cur_s1_bool &= events['s1_area_fraction_top'] >= 0
        cur_s1_bool &= np.isfinite(x)
        cur_s1_bool &= np.isfinite(y)
        cur_s1_bool &= np.isfinite(z)
        cur_s1_bool &= (x**2 + y**2) < self.config['max_r_pattern_fit']**2

        # default value is nan, it will be ovewrite if the event satisfy the requirments
        result['s1_2llh'][:] = np.nan
        result['s1_top_2llh'][:] = np.nan
        result['s1_bottom_2llh'][:] = np.nan

        # Making expectation patterns [ in PE ]
        if np.sum(cur_s1_bool):
            s1_map_effs = self.s1_pattern_map(np.array([x, y,
                                                        z]).T)[cur_s1_bool, :]
            s1_area = events['s1_area'][cur_s1_bool]
            s1_pattern = s1_area[:, None] * (
                s1_map_effs[:, self.pmtbool]) / np.sum(
                    s1_map_effs[:, self.pmtbool], axis=1)[:, None]

            s1_pattern_top = (events['s1_area_fraction_top'][cur_s1_bool] *
                              s1_area)
            s1_pattern_top = s1_pattern_top[:, None] * (
                (s1_map_effs[:, :self.config['n_top_pmts']])[:,
                                                             self.pmtbool_top])
            s1_pattern_top /= np.sum(
                (s1_map_effs[:, :self.config['n_top_pmts']])[:,
                                                             self.pmtbool_top],
                axis=1)[:, None]
            s1_pattern_bottom = (
                (1 - events['s1_area_fraction_top'][cur_s1_bool]) * s1_area)
            s1_pattern_bottom = s1_pattern_bottom[:, None] * (
                (s1_map_effs[:,
                             self.config['n_top_pmts']:])[:,
                                                          self.pmtbool_bottom])
            s1_pattern_bottom /= np.sum(
                (s1_map_effs[:,
                             self.config['n_top_pmts']:])[:,
                                                          self.pmtbool_bottom],
                axis=1)[:, None]

            # Getting pattern from data
            s1_area_per_channel_ = events['s1_area_per_channel'][
                cur_s1_bool, :]
            s1_area_per_channel = s1_area_per_channel_[:, self.pmtbool]
            s1_area_per_channel_top = (
                s1_area_per_channel_[:, :self.config['n_top_pmts']]
            )[:, self.pmtbool_top]
            s1_area_per_channel_bottom = (
                s1_area_per_channel_[:, self.config['n_top_pmts']:]
            )[:, self.pmtbool_bottom]

            # Top and bottom
            arg1 = s1_pattern / self.mean_pe_photon, s1_area_per_channel, self.mean_pe_photon
            arg2 = s1_area_per_channel / self.mean_pe_photon, s1_area_per_channel, self.mean_pe_photon
            norm_llh_val = (neg2llh_modpoisson(*arg1) -
                            neg2llh_modpoisson(*arg2))
            result['s1_2llh'][cur_s1_bool] = np.sum(norm_llh_val, axis=1)

            # If needed to stire - store only top and bottom array, but not together
            if self.config['store_per_channel']:
                # Storring pattern information
                store_patterns = np.zeros(
                    (s1_pattern.shape[0], self.config['n_tpc_pmts']))
                store_patterns[:, self.pmtbool] = s1_pattern
                result['s1_pattern'][cur_s1_bool] = store_patterns
                # Storing actual LLH values
                store_2LLH_ch = np.zeros(
                    (norm_llh_val.shape[0], self.config['n_tpc_pmts']))
                store_2LLH_ch[:, self.pmtbool] = norm_llh_val
                result['s1_2llh_per_channel'][cur_s1_bool] = store_2LLH_ch

            # Top
            arg1 = s1_pattern_top / self.mean_pe_photon, s1_area_per_channel_top, self.mean_pe_photon
            arg2 = s1_area_per_channel_top / self.mean_pe_photon, s1_area_per_channel_top, self.mean_pe_photon
            norm_llh_val = (neg2llh_modpoisson(*arg1) -
                            neg2llh_modpoisson(*arg2))
            result['s1_top_2llh'][cur_s1_bool] = np.sum(norm_llh_val, axis=1)

            # Bottom
            arg1 = s1_pattern_bottom / self.mean_pe_photon, s1_area_per_channel_bottom, self.mean_pe_photon
            arg2 = s1_area_per_channel_bottom / self.mean_pe_photon, s1_area_per_channel_bottom, self.mean_pe_photon
            norm_llh_val = (neg2llh_modpoisson(*arg1) -
                            neg2llh_modpoisson(*arg2))
            result['s1_bottom_2llh'][cur_s1_bool] = np.sum(norm_llh_val,
                                                           axis=1)

    def compute_s2_llhvalue(self, events, result):
        for t_ in ['s2', 'alt_s2']:
            # Selecting S2s for pattern fit calculation
            # - must exist (index != -1)
            # - must have total area larger minimal one
            # - must have positive AFT
            x, y = events[t_ + '_x'], events[t_ + '_y']
            s2_mask = (events[t_ + '_area'] >
                       self.config['s2_min_area_pattern_fit'])
            s2_mask &= (events[t_ + '_area_fraction_top'] > 0)
            s2_mask &= (x**2 + y**2) < self.config['max_r_pattern_fit']**2

            # default value is nan, it will be ovewrite if the event satisfy the requirments
            result[t_ + '_2llh'][:] = np.nan

            # Making expectation patterns [ in PE ]
            if np.sum(s2_mask):
                s2_map_effs = self.s2_pattern_map(np.array(
                    [x, y]).T)[s2_mask, 0:self.config['n_top_pmts']]
                s2_map_effs = s2_map_effs[:, self.pmtbool_top]
                s2_top_area = (events[t_ + '_area_fraction_top'] *
                               events[t_ + '_area'])[s2_mask]
                s2_pattern = s2_top_area[:, None] * s2_map_effs / np.sum(
                    s2_map_effs, axis=1)[:, None]

                # Getting pattern from data
                s2_top_area_per_channel = events[t_ + '_area_per_channel'][
                    s2_mask, 0:self.config['n_top_pmts']]
                s2_top_area_per_channel = s2_top_area_per_channel[:, self.
                                                                  pmtbool_top]

                # Calculating LLH, this is shifted Poisson
                # we get area expectation and we need to scale them to get
                # photon expectation
                norm_llh_val = (
                    neg2llh_modpoisson(mu=s2_pattern / self.mean_pe_photon,
                                       areas=s2_top_area_per_channel,
                                       mean_pe_photon=self.mean_pe_photon) -
                    neg2llh_modpoisson(
                        mu=s2_top_area_per_channel / self.mean_pe_photon,
                        areas=s2_top_area_per_channel,
                        mean_pe_photon=self.mean_pe_photon))
                result[t_ + '_2llh'][s2_mask] = np.sum(norm_llh_val, axis=1)

                if self.config['store_per_channel']:
                    store_patterns = np.zeros(
                        (s2_pattern.shape[0], self.config['n_top_pmts']))
                    store_patterns[:, self.pmtbool_top] = s2_pattern
                    result[t_ + '_pattern'][
                        s2_mask] = store_patterns  #:s2_pattern[s2_mask]

                    store_2LLH_ch = np.zeros(
                        (norm_llh_val.shape[0], self.config['n_top_pmts']))
                    store_2LLH_ch[:, self.pmtbool_top] = norm_llh_val
                    result[t_ + '_2llh_per_channel'][s2_mask] = store_2LLH_ch

    def compute_s2_neural_llhvalue(self, events, result):
        for t_ in ['s2', 'alt_s2']:
            x, y = events[t_ + '_x'], events[t_ + '_y']
            s2_mask = (events[t_ + '_area'] >
                       self.config['s2_min_area_pattern_fit'])
            s2_mask &= (events[t_ + '_area_fraction_top'] > 0)

            # default value is nan, it will be ovewrite if the event satisfy the requirements
            result[t_ + '_neural_2llh'][:] = np.nan

            # Produce position and top pattern to feed tensorflow model, return chi2/N
            if np.sum(s2_mask):
                s2_pos = np.stack((x, y)).T[s2_mask]
                s2_pat = events[t_ + '_area_per_channel'][
                    s2_mask, 0:self.config['n_top_pmts']]
                # Output[0]: loss function, -2*log-likelihood, Output[1]: chi2
                result[t_ + '_neural_2llh'][s2_mask] = self.model_chi2.predict(
                    {
                        'xx': s2_pos,
                        'yy': s2_pat
                    })[1]

    @staticmethod
    def _infer_map_format(map_name, known_formats=('pkl', 'json', 'json.gz')):
        for fmt in known_formats:
            if map_name.endswith(fmt):
                return fmt
        raise ValueError(f'Extension of {map_name} not in {known_formats}')
コード例 #23
0
ファイル: acqmon_processing.py プロジェクト: XENONnT/straxen
class DetectorSynchronization(strax.Plugin):
    """
    Plugin which computes the synchronization delay between TPC and
    vetos.

    Reference:
        * xenon:xenonnt:dsg:mveto:sync_monitor
    """
    __version__ = '0.0.3'
    depends_on = ('raw_records_aqmon', 'raw_records_aqmon_nv',
                  'raw_records_aux_mv')
    provides = 'detector_time_offsets'
    data_kind = 'detector_time_offsets'

    tpc_internal_delay = straxen.URLConfig(
        default={
            '0': 4917,
            '020380': 10137
        },
        type=dict,
        track=True,
        help='Internal delay between aqmon and regular TPC channels ins [ns]')
    adc_threshold_nim_signal = straxen.URLConfig(
        default=500,
        type=int,
        track=True,
        help='Threshold in [adc] to search for the NIM signal')
    # This value is only valid for SR0:
    epsilon_offset = straxen.URLConfig(
        default=76,
        type=int,
        track=True,
        help='Measured missing offset for nveto in [ns]')
    sync_max_delay = strax.Config(
        default=11e3, help='max delay DetectorSynchronization [ns]')
    sync_expected_min_clock_distance = straxen.URLConfig(
        default=9.9e9, help='min clock distance DetectorSynchronization [ns]')
    sync_expected_max_clock_distance = straxen.URLConfig(
        default=10.1e9, help='max clock distance DetectorSynchronization [ns]')

    def infer_dtype(self):
        dtype = []
        dtype += strax.time_fields
        dtype += [(('Time offset for nV to synchronize with TPC in [ns]',
                    'time_offset_nv'), np.int64),
                  (('Time offset for mV to synchronize with TPC in [ns]',
                    'time_offset_mv'), np.int64)]
        return dtype

    def compute(self, raw_records_aqmon, raw_records_aqmon_nv,
                raw_records_aux_mv, start, end):
        rr_tpc = raw_records_aqmon
        rr_nv = raw_records_aqmon_nv
        rr_mv = raw_records_aux_mv

        extra_offset = 0
        _mask_tpc = (rr_tpc['channel'] == AqmonChannels.GPS_SYNC)
        if not np.any(_mask_tpc):
            # For some runs in the beginning no signal has been acquired here.
            # In that case we have to add the internal DAQ delay as an extra offset later.
            _mask_tpc = (rr_tpc['channel'] == AqmonChannels.GPS_SYNC_AM)
            extra_offset = self.get_delay()

        hits_tpc = self.get_nim_edge(rr_tpc[_mask_tpc],
                                     self.config['adc_threshold_nim_signal'])
        hits_tpc['time'] += extra_offset

        _mask_mveto = (rr_mv['channel'] == AqmonChannels.GPS_SYNC_MV)
        hits_mv = self.get_nim_edge(rr_mv[_mask_mveto],
                                    self.config['adc_threshold_nim_signal'])

        _mask_nveto = rr_nv['channel'] == AqmonChannels.GPS_SYNC_NV
        hits_nv = self.get_nim_edge(rr_nv[_mask_nveto],
                                    self.config['adc_threshold_nim_signal'])
        nveto_extra_offset = 0
        if not len(hits_nv):
            # During SR0 sync signal was not recorded properly for the
            # neutron-veto, hence take waveform itself as "hits".
            _mask_nveto &= rr_nv['record_i'] == 0
            nveto_extra_offset = self.config['epsilon_offset']
            hits_nv = rr_nv[_mask_nveto]
        hits_nv['time'] += nveto_extra_offset

        offsets_mv = self.estimate_delay(hits_tpc, hits_mv)
        offsets_nv = self.estimate_delay(hits_tpc, hits_nv)
        assert len(offsets_mv) == len(
            offsets_nv), 'Unequal number of sync signals!'

        result = np.zeros(len(offsets_mv), dtype=self.dtype)
        result['time'] = hits_tpc['time']
        result['endtime'] = strax.endtime(hits_tpc)
        result['time_offset_nv'] = offsets_nv
        result['time_offset_mv'] = offsets_mv

        return result

    def get_delay(self):
        delay = 0
        for run_id, _delay in self.config['tpc_internal_delay'].items():
            if int(self.run_id) >= int(run_id):
                delay = _delay
        return delay

    @staticmethod
    def get_nim_edge(raw_records, threshold=500):
        records = strax.raw_to_records(raw_records)
        strax.baseline(records)
        hits = strax.find_hits(records, min_amplitude=threshold)
        return hits

    def estimate_delay(self, hits_det0, hits_det1):
        """
        Function to estimate the average offset between two hits.
        """
        err_value = -10000000000

        offsets = []
        prev_time = 0
        for ind in range(len(hits_det0)):
            offset = self.find_offset_nearest(hits_det1['time'],
                                              hits_det0['time'][ind])
            if ind:
                # Cannot compute time to prev for first event
                time_to_prev = hits_det0['time'][ind] - prev_time
            else:
                time_to_prev = 10e9

            # Additional check to avoid spurious signals
            _correct_distance_to_prev_lock = time_to_prev >= self.sync_expected_min_clock_distance
            _correct_distance_to_prev_lock = time_to_prev < self.sync_expected_max_clock_distance
            if (abs(offset) <
                    self.sync_max_delay) & _correct_distance_to_prev_lock:
                offsets.append(offset)
                prev_time = hits_det0['time'][ind]
            else:
                # Add err_value in case offset is not valid
                offsets.append(err_value)
                prev_time = hits_det0['time'][ind]

        return np.array(offsets)

    def find_offset_nearest(self, array, value):
        if not len(array):
            return -self.sync_max_delay
        array = np.asarray(array)
        idx = (np.abs(array - value)).argmin()
        return value - array[idx]