示例#1
0
class ReconstructedPosition(StrictModel):
    """Reconstructed position

    Each reconstruction algorithm creates one of these.
    """
    x = float('nan')  #: x position (cm)
    y = float('nan')  #: y position (cm)

    #: For 3d-position reconstruction algorithms, the z-position (cm)
    #: This is NOT related to drift time, which is an interaction-level quantity!
    z = float('nan')

    #: Goodness-of-fit of hitpattern to position (provided by PosRecTopPatternFit)
    #: For PosRecThreedPatternFit, the 3d position goodness-of-fit.
    goodness_of_fit = float('nan')

    #: Number of degrees of freedom used in goodness-of-fit calculation
    ndf = float('nan')

    #: Name of algorithm which provided this position
    algorithm = 'none'

    #: Confidence_levels
    # error_matrix = np.array([], dtype=np.float64)
    confidence_tuples = ListField(ConfidenceTuple)

    # For convenience: cylindrical coordinates
    # Must be properties so InterpolatingDetectorMap can transparently use
    # cylindrical coordinates
    @property
    def r(self):
        """Radial position"""
        return np.sqrt(self.x ** 2 + self.y ** 2)

    #: phi position, i.e. angle wrt the x=0 axis in the xy plane (radians)
    @property
    def phi(self):
        """Angular position (radians, origin at positive x-axis)"""
        return np.arctan2(self.y, self.x)
示例#2
0
class Event(StrictModel):
    """Object holding high-level information about a triggered event,
    and list of objects (such as Peak, Hit and Pulse) containing lower-level information.
    """
    #: The name of the dataset this event belongs to
    dataset_name = 'Unknown'

    #: A nonnegative integer that uniquely identifies the event within the dataset.
    event_number = 0

    #: Internal number used for multiprocessing, no physical meaning.
    block_id = -1

    #: Total number of channels in the event (whether or not they see anything).
    #: Has to be the same as n_channels in config, provided here for deserialization ease.
    n_channels = INT_NAN

    #: Integer start time of the event in nanoseconds since January 1, 1970.
    #: This is the time that the first sample starts.
    #: NB: don't do floating-point arithmetic on 64-bit integers such as these,
    #: floats have rounding that result in loss of precision.
    start_time = long(0)

    #: Integer stop time of the event in nanoseconds since January 1, 1970.
    #: This is the time that the last sample ends.
    #: NB: don't do floating-point arithmetic on 64-bit integers such as these,
    #: floats have rounding that result in loss of precision.
    stop_time = long(0)

    #: Time duration of a sample (in ns).
    #: For V1724 digitizers (e.g. XENON), this is 10 nanoseconds.
    #: This is also in config, but we need it here too, to convert between event duration and length in samples
    #: Must be an int for same reason as start_time and stop_time
    #: DO NOT set to 10 ns as default, otherwise no way to check if it was given to constructor!
    sample_duration = 0

    #: A list of :class:`pax.datastructure.Interaction` objects.
    interactions = ListField(Interaction)

    #: A list of :class:`pax.datastructure.Peak` objects.
    peaks = ListField(Peak)

    #: Array of trigger signals contained in the event
    trigger_signals = np.array([], dtype=TriggerSignal.get_dtype())

    #: Array of all hits found in event
    #: These will get grouped into peaks during clustering. New hits will be added when peaks are split.
    #: NEVER rely upon the order of hits in this field! It depends on lunar phase and ambient pressure.
    #: This is usually emptied before output (but not in LED mode)
    all_hits = np.array([], dtype=Hit.get_dtype())

    #: A list :class:`pax.datastructure.SumWaveform` objects.
    sum_waveforms = ListField(SumWaveform)

    #: A list of :class:`pax.datastructure.Interaction` objects.
    #: A pulse holds a stream of samples in one channel provided by the digitizer.
    #: To save space, only the pulses contributing hits to S1s are kept in the output (but not in LED mode)
    #: The order of this field cannot be changed after the hitfinder, since hits have a found_in_pulse field
    #: referring to the index of a pulse in this field.
    pulses = ListField(Pulse)

    #: Number of pulses per channel
    n_pulses_per_channel = np.array([], dtype=np.int16)

    #: Total number of pulses
    n_pulses = 0

    #: Number of noise pulses (pulses without any hits found) per channel
    noise_pulses_in = np.array([], dtype=np.int16)

    #: Number of lone hits per channel BEFORE suspicious channel hit rejection.
    #: lone_hit is a peak type (sorry, confusing...) indicating just one contributing channel.
    #: Use this to check / calibrate the suspicious channel hit rejection.
    lone_hits_per_channel_before = np.array([], dtype=np.int16)

    #: Number of lone hits per channel AFTER suspicious channel hit rejection.
    #: lone_hit is a peak type (sorry, confusing...) indicating just one contributing channel
    lone_hits_per_channel = np.array([], dtype=np.int16)

    #: Was channel flagged as suspicious?
    is_channel_suspicious = np.array([], dtype=np.bool)

    #: Number of hits rejected per channel in the suspicious channel algorithm
    n_hits_rejected = np.array([], dtype=np.int16)

    def __init__(self, n_channels, start_time, **kwargs):

        # Start time is mandatory, so it is not in kwargs
        kwargs['start_time'] = start_time
        kwargs['n_channels'] = n_channels

        # Model's init must be called first, else we can't store attributes
        # This will store all of the kwargs as attrs
        # We don't pass length, it's not an attribute that can be set
        StrictModel.__init__(self, **{k: v for k, v in kwargs.items() if k != 'length'})

        # Cheat to init stop_time from length and duration
        if 'length' in kwargs and self.sample_duration and not self.stop_time:
            self.stop_time = int(self.start_time + kwargs['length'] * self.sample_duration)

        if not self.stop_time or not self.sample_duration:
            raise ValueError("Cannot initialize an event with an unknown length: " +
                             "pass sample_duration and either stop_time or length")

        if self.duration() <= 0:
            raise ValueError("Nonpositive event duration %s!" % self.duration())

        # Initialize numpy arrays -- need to have n_channels and self.length
        self.n_pulses_per_channel = np.zeros(n_channels, dtype=np.int16)
        self.noise_pulses_in = np.zeros(n_channels, dtype=np.int16)
        self.n_hits_rejected = np.zeros(n_channels, dtype=np.int16)
        self.is_channel_suspicious = np.zeros(n_channels, dtype=np.bool)
        self.lone_hits_per_channel_before = np.zeros(n_channels, dtype=np.int16)
        self.lone_hits_per_channel = np.zeros(n_channels, dtype=np.int16)

    @classmethod
    def empty_event(cls):
        """Returns an empty example event: for testing purposes only!!
        """
        return Event(n_channels=1, start_time=10, length=1, sample_duration=int(10 * units.ns))

    def duration(self):
        """Duration of event window in units of ns
        """
        return self.stop_time - self.start_time

    def get_sum_waveform_names(self):
        """Get list of the names of sum waveform objects
        Deprecated -- for Xerawdp matching only
        """
        return [sw.name for sw in self.sum_waveforms]

    def get_sum_waveform(self, name):
        """Get sum waveform object by name
        Deprecated -- for Xerawdp matching only
        """
        for sw in self.sum_waveforms:
            if sw.name == name:
                return sw

        raise RuntimeError("SumWaveform %s not found" % name)

    def length(self):
        """Number of samples in the event
        """
        return int(self.duration() / self.sample_duration)

    def s1s(self, detector='tpc', sort_key=('tight_coincidence', 'area'), reverse=True):  # noqa
        """List of S1 (scintillation) signals in this event
        In the ROOT class output, this returns a list of integer indices in event.peaks
        Inside pax, returns a list of :class:`pax.datastructure.Peak` objects
          whose type is 's1', and
          who are in the detector specified by the 'detector' argument (unless detector='all')
        The returned list is sorted DESCENDING (i.e. reversed!) by the key sort_key (default area)
        unless you pass reverse=False, then it is ascending.
        """
        return self.get_peaks_by_type('s1', sort_key=sort_key, reverse=reverse, detector=detector)

    def S1s(self, *args, **kwargs):
        """See s1s"""
        return self.s1s(*args, **kwargs)

    def s2s(self, detector='tpc', sort_key='area', reverse=True):  # noqa
        """List of S2 (ionization) signals in this event
        In the ROOT class output, this returns a list of integer indices in event.peaks.
        Inside pax, returns a list of :class:`pax.datastructure.Peak` objects
          whose type is 's2', and
          who are in the detector specified by the 'detector' argument (unless detector='all')
        The returned list is sorted DESCENDING (i.e. reversed!) by the key sort_key (default area)
        unless you pass reverse=False, then it is ascending.
        """
        return self.get_peaks_by_type(desired_type='s2', sort_key=sort_key, reverse=reverse, detector=detector)

    def S2s(self, *args, **kwargs):
        """See s2s"""
        return self.s2s(*args, **kwargs)

    @property
    def main_s1(self):
        """Return the S1 of the primary interaction, or if that does not exist, the largest S1 in the tpc.
        Returns None if neither exist"""
        if self.interactions:
            return self.peaks[self.interactions[0].s1]
        else:
            try:
                return self.s1s()[0]
            except IndexError:
                return None

    @property
    def main_s2(self):
        """Return the S2 of the primary interaction, or if that does not exist, the largest S2 in the tpc.
        Returns None if neither exist"""
        if self.interactions:
            return self.peaks[self.interactions[0].s2]
        else:
            try:
                return self.s2s()[0]
            except IndexError:
                return None

    def get_peaks_by_type(self, desired_type='all', detector='tpc', sort_key='area', reverse=True):
        """Helper function for retrieving only certain types of peaks
        Returns a list of :class:`pax.datastructure.Peak` objects
          whose type is desired_type, and
          who are in the detector specified by the 'detector' argument (unless detector='all')
        The returned list is sorted DESCENDING (i.e. reversed!) by the key sort_key (default area)
        unless you pass reverse=False, then it is ascending (normal sort order).
        """
        # Extract only peaks of a certain type
        peaks = []
        for peak in self.peaks:
            if detector != 'all':
                if peak.detector != detector:
                    continue
            if desired_type != 'all' and peak.type.lower() != desired_type:
                continue
            peaks.append(peak)

        # Sort the peaks by your sort key
        if isinstance(sort_key, (str, bytes)):
            sort_key = [sort_key]
        peaks = sorted(peaks,
                       key=operator.attrgetter(*sort_key),
                       reverse=reverse)

        return peaks
示例#3
0
class Peak(StrictModel):
    """A group of nearby hits across one or more channels.
    Peaks will be classified as e.g. s1, s2, lone_hit, unknown, coincidence
    """
    #: Type of peak (e.g., 's1', 's2', ...):
    #: NB 'lone_hit' incicates one or more hits in a single channel. Use lone_hit_channel to retrieve that channel.
    type = 'unknown'

    #: Detector in which the peak was found, e.g. tpc or veto
    detector = 'none'

    ##
    #  Hit, area, and saturation data
    ##

    #: The hits that make up this peak. To save space, we usually only store the hits for s1s in the root file.
    #: Do not rely on the order of hits in this field!!
    #: For the root output, this gets converted back to a list of Hit classes (then to a vector of c++ Hit objects)
    hits = np.array([], dtype=Hit.get_dtype())

    #: Total areas of all hits per PMT (pe).
    area_per_channel = np.array([], dtype='float64')

    # contribution to tight coincidence, 1 or 0 for each channel
    coincidence_per_channel = np.array([], dtype=np.int16)

    #: Total area of all hits across all PMTs (pes).
    #: In XerawdpImitation mode, rightmost sample is not included in area integral.
    area = 0.0

    #: Fraction of area in the top PMTs
    area_fraction_top = 0.0

    #: Multiplicative correction on S2 due to LCE variations
    s2_spatial_correction = 1.0

    #: Multiplicative correction on S2 top due to LCE variations
    s2_top_spatial_correction = 1.0

    #: Multiplicative correction on S2 bottom due to LCE variations
    s2_bottom_spatial_correction = 1.0

    #: Multiplicative correction on S2 due to saturation
    s2_saturation_correction = 1.0

    #: Number of hits in the peak, per channel (that is, it's an array with index = channel number)
    hits_per_channel = np.array([], dtype=np.int16)

    #: Number of channels which contribute to the peak
    n_contributing_channels = 0

    #: Number of channels in the top array contributing to the peak
    n_contributing_channels_top = 0

    #: Total number of hits in the peak
    n_hits = 0

    #: Fraction of hits in the top array
    hits_fraction_top = 0.0

    #: Number of samples with ADC saturation in this peak, per channel
    n_saturated_per_channel = np.array([], dtype=np.int16)

    @property
    def is_channel_saturated(self):
        """Boolean array of n_channels which indicates if there was ADC saturation in any hit
        in that channel during the peak"""
        return self.n_saturated_per_channel > 0

    @property
    def saturated_channels(self):
        """List of channels which contribute hits with saturated channels in this peak"""
        return np.where(self.n_saturated_per_channel > 0)[0]

    #: Total number of samples with ADC saturation threshold in all channels in this peak
    n_saturated_samples = 0

    #: Total number of channels in the peakw hich have at least one saturated hit
    n_saturated_channels = 0

    #: If the peak is a lone_hit: the channel the hit is / hits are in
    lone_hit_channel = INT_NAN

    # Area of the largest hit in the peak
    largest_hit_area = float('nan')

    # Channel of the largest hit in the peak
    largest_hit_channel = INT_NAN

    @property
    def does_channel_contribute(self):
        """Boolean array of n_channels which tells you if the channel contributes any hit"""
        return self.area_per_channel > 0

    @property
    def contributing_channels(self):
        """List of channels which contribute one or more hits to this peak"""
        return np.where(self.does_channel_contribute)[0]

    #: Number of channels that have a hit maximum within a short (configurable) window around the peak's sum
    #: waveform maximum.
    tight_coincidence = INT_NAN
    tight_coincidence_thresholds = np.array([], dtype=np.int16)  # various tight coincidence levels

    ##
    # Time distribution information
    ##

    left = 0                 #: Index/sample of left bound (inclusive) in event.
    right = 0                #: Index/sample of right bound (INCLUSIVE) in event.

    #: Weighted (by hit area) mean of hit times (since event start) [ns]
    hit_time_mean = 0.0

    #: Weighted (by hit area) std of hit times [ns]
    hit_time_std = 0.0

    #: Central range of peak (hit-only) sum waveform which includes a given decile (0-10) of area [ns].
    #: e.g. range_area_decile[5] = range of 50% area = distance (in time) between point
    #: of 25% area and 75% area (with boundary samples added fractionally).
    #: First element (0) is always zero, last element (10) is the full range of the peak.
    range_area_decile = np.zeros(11, dtype=np.float)

    #: Time (ns) from the area decile point to the area midpoint.
    #: If you want to know the time until some other point (say the sum waveform maximum),
    #: just add the difference between that point and the area midpoint.
    area_decile_from_midpoint = np.zeros(11, dtype=np.float)

    @property
    def range_50p_area(self):
        return self.range_area_decile[5]

    @property
    def range_90p_area(self):
        return self.range_area_decile[9]

    @property
    def full_range(self):
        return self.range_area_decile[10]

    #: Time at which the peak reaches 50% of its area (with the central sample considered fractionally)
    area_midpoint = 0.0

    ##
    # Spatial pattern information
    ##

    #: List of reconstructed positions (instances of :class:`pax.datastructure.ReconstructedPosition`)
    reconstructed_positions = ListField(ReconstructedPosition)

    def get_reconstructed_position_from_algorithm(self, algorithm):
        """Return reconstructed position found by algorithm, or None if the peak doesn't have one"""
        for rp in self.reconstructed_positions:
            if rp.algorithm == algorithm:
                return rp
        return None

    def get_position_from_preferred_algorithm(self, algorithm_list):
        """Return reconstructed position by the first algorithm in list,
        unless it doesn't exist or is a nan position, then moves on to further algorithms."""
        for algo in algorithm_list:
            rp = self.get_reconstructed_position_from_algorithm(algo)
            if rp is not None and not np.isnan(rp.x):
                return rp
        else:
            raise ValueError("Could not find any position from the chosen algorithms: %s" % algorithm_list)

    #: Weighted-average distance of top array hits from weighted mean hitpattern center on top array (cm)
    top_hitpattern_spread = float('nan')

    #: Weighted-average distance of bottom array hits from weighted mean hitpattern center on bottom array (cm)
    bottom_hitpattern_spread = float('nan')

    ##
    # Signal / noise info
    ##

    #: Weighted (by area) mean hit amplitude / noise level in that hit's channel
    mean_amplitude_to_noise = 0.0

    #: Number of pulses without hits in the event overlapping (in time; at least partially) with this peak.
    #: Includes channels from other detectors (since veto and tpc cables could influence each other)
    n_noise_pulses = 0

    ##
    # Sum-waveform properties
    ##

    #: Cut-out of the peak's sum waveform in pe/bin
    #: The peak's center of gravity is always in the center of the array.
    sum_waveform = np.array([], dtype=np.float32)

    #: For tpc peaks, the peak's sum waveform in the top array only. Aligned with the sum waveform.
    sum_waveform_top = np.array([], dtype=np.float32)

    #: Index/sample in the event's sum waveform at which this peak has its maximum.
    index_of_maximum = 0

    #: Time since start of the event at which the peak's sum waveform has its center of gravity [ns].
    center_time = 0.0

    #: Height of sum waveform (in pe/bin)
    height = 0.0

    ##
    # Clustering record
    ##

    #: Best goodness of split observed inside the peak
    interior_split_goodness = float('nan')

    #: Area fraction of the smallest of the two halves considered in the best split inside the peak
    #: (i.e. the one corresponding to interior_split_goodness)
    interior_split_fraction = float('nan')

    #: Goodness of split of last split that was used to construct this peak (if split did occur).
    birthing_split_goodness = float('nan')

    #: Area of this peak / area of parent peak it was split from (if split did occur)
    birthing_split_fraction = float('nan')