def test_overwrite(): @strax.takes_config( strax.Option('option', default=False), ) class BasePlugin(strax.Plugin): """The plugin that we will be sub-classing""" provides = 'base' dtype = test_dtype provides = 'base' depends_on = tuple() def compute(self, something): return np.ones(len(something), dtype=self.dtype) st = strax.Context(storage=[]) st.register(BasePlugin) # Keep an account of this lineage hash such that we can compare it later lineage_base = st.key_for('0', 'base').lineage_hash try: @strax.takes_config( strax.Option('option', default=True), ) class CrashPlugin(BasePlugin): """ Try subclassing with a different option default will cause a runtime error """ pass st.register(CrashPlugin) except RuntimeError: print('Ran into a RuntimeError because we tried specifying an ' 'option twice. This is exactly what we want!') @strax.takes_config( strax.Option('option', default=True, overwrite=True), ) class OverWritePlugin(BasePlugin): """Only overwrite the option, the rest is the same""" pass st.register(OverWritePlugin) assert st.key_for( '0', 'base').lineage_hash != lineage_base, 'Lineage did not change' p = st.get_single_plugin('0', 'base') assert p.__class__.__name__ == 'OverWritePlugin' assert p.config['option'] is True, f'Option was not overwritten: {p.config}'
def make_MCreader(self, name : str, path:str, rate:float ): self.names.append(name) @strax.takes_config( strax.Option(f'input_dir_{name}', type=str, track=True, default=path, help="Directory where readers put data"), strax.Option(f'rate_{name}', type=float, track=True, default=rate, help="rate [GHz] of this source"), ) class newMCreader(MCreader): sourcename = name provides = [f'photons_{sourcename}', f'nest_hits_{sourcename}'] data_kind = {k: k for k in provides} newMCreader.__name__ = f'MCreader_{name}' self.source_plugins[name]=newMCreader return newMCreader
class Records(strax.Plugin): provides = 'records' depends_on = tuple() dtype = strax.record_dtype() def iter(self, *args, **kwargs): for t in range(n_chunks): r = np.zeros(recs_per_chunk, self.dtype) r['time'] = t r['length'] = 1 r['dt'] = 1 r['channel'] = np.arange(len(r)) yield r @strax.takes_config(strax.Option('some_option', default=0)) class Peaks(strax.Plugin): provides = 'peaks' depends_on = ('records', ) dtype = strax.peak_dtype() def compute(self, records): p = np.zeros(len(records), self.dtype) p['time'] = records['time'] return p recs_per_chunk = 10 n_chunks = 10 run_id = '0'
import strax import straxen import numpy as np @strax.takes_config( strax.Option('peak_split_min_area', default=40., help='Minimum area to evaluate natural breaks criterion. ' 'Smaller peaks are not split.')) class MyPlugin(strax.Plugin): """ """ provides = ('fancy_peaks') data_kind = 'fancy_peaks' <<<<<<< HEAD #depends_on = 'peak_basics' depends_on = 'peaklets' ======= depends_on = ('peak_basics') >>>>>>> e7d61d47667e4a6bd35c8a2fed07d7eec2704367 def infer_dtype(self): print("infer") dtype = strax.time_fields + [(('Electron life time in TPC', 'elif'), np.float32)] return dtype <<<<<<< HEAD def compute(self, peaklets, start, end):
import strax import straxen from straxen.get_corrections import is_cmt_option from straxen.plugins.veto_pulse_processing import MV_PREAMBLE, NV_HIT_OPTIONS export, __all__ = strax.exporter() @export @strax.takes_config( *NV_HIT_OPTIONS, strax.Option( 'min_split_nv', default=0.063, track=True, infer_type=False, help= 'Minimum height difference pe/sample between local minimum and maximum, ' 'that a pulse get split.'), strax.Option( 'min_split_ratio_nv', default=0.75, track=True, infer_type=False, help= 'Min ratio between local maximum and minimum to split pulse (zero to switch this ' 'off).'), strax.Option( 'entropy_template_nv', default='flat', track=True,
import strax import numpy as np from nEXO_strax.common import pax_file, get_resource, get_elife, first_sr1_run from nEXO_strax.itp_map import InterpolatingMap export, __all__ = strax.exporter() @export @strax.takes_config( strax.Option('trigger_min_area', default=100, help='Peaks must have more area (PE) than this to ' 'cause events'), strax.Option('trigger_max_competing', default=7, help='Peaks must have FEWER nearby larger or slightly smaller' ' peaks to cause events'), strax.Option('left_event_extension', default=int(1e6), help='Extend events this many ns to the left from each ' 'triggering peak'), strax.Option('right_event_extension', default=int(1e6), help='Extend events this many ns to the right from each ' 'triggering peak'), ) class Events(strax.OverlapWindowPlugin): depends_on = ['peak_basics', 'n_competing'] data_kind = 'events'
import numpy as np import strax from immutabledict import immutabledict from strax.processing.general import _touching_windows import straxen from .pulse_processing import HITFINDER_OPTIONS, HITFINDER_OPTIONS_he, HE_PREAMBLE from straxen.get_corrections import is_cmt_option export, __all__ = strax.exporter() FAKE_MERGED_S2_TYPE = -42 @export @strax.takes_config( strax.Option('peaklet_gap_threshold', default=700, infer_type=False, help="No hits for this many ns triggers a new peak"), strax.Option('peak_left_extension', default=30, infer_type=False, help="Include this many ns left of hits in peaks"), strax.Option('peak_right_extension', default=200, infer_type=False, help="Include this many ns right of hits in peaks"), strax.Option( 'peak_min_pmts', default=2, infer_type=False, help="Minimum number of contributing PMTs needed to define a peak"), strax.Option(
'mode': self.subrun_modes[int(run_id) % 2], 'source': self.subrun_source } with open(self.context.storage[0]._run_meta_path(str(run_id)), 'w') as fp: json.dump(run_doc, fp, sort_keys=True, indent=4, default=json_util.default) @strax.takes_config( strax.Option( name='some_additional_value', default=42, help="Some additional value for merger", )) class RecordsExtension(strax.Plugin): depends_on = 'records' provides = 'records_extension' dtype = strax.time_dt_fields + [( ('Some additional field', 'additional_field'), np.int16)] def compute(self, records): res = np.zeros(len(records), self.dtype) res['time'] = records['time'] res['length'] = records['length'] res['dt'] = records['dt']
import numpy as np import numba from enum import IntEnum from scipy.stats import halfcauchy import strax import straxen from straxen.common import pax_file, get_resource, first_sr1_run export, __all__ = strax.exporter() from .pulse_processing import HE_PREAMBLE @export @strax.takes_config( strax.Option('n_top_pmts', default=straxen.n_top_pmts, infer_type=False, help="Number of top PMTs"), strax.Option('check_peak_sum_area_rtol', default=None, track=False, infer_type=False, help="Check if the sum area and the sum of area per " "channel are the same. If None, don't do the " "check. To perform the check, set to the desired " " rtol value used e.g. '1e-4' (see np.isclose)."), ) class PeakBasics(strax.Plugin): """ Compute the basic peak-properties, thereby dropping structured arrays. NB: This plugin can therefore be loaded as a pandas DataFrame.
import tempfile import strax import straxen import numpy as np from immutabledict import immutabledict from strax.testutils import run_id, recs_per_chunk # Number of chunks for the dummy raw records we are writing here N_CHUNKS = 2 ## # Tools ## @strax.takes_config(strax.Option('secret_time_offset', default=0, track=False)) class DummyRawRecords(strax.Plugin): """ Provide dummy raw records for the mayor raw_record types """ provides = ('raw_records', 'raw_records_he', 'raw_records_nv', 'raw_records_aqmon') parallel = 'process' depends_on = tuple() data_kind = immutabledict(zip(provides, provides)) rechunk_on_save = False dtype = {p: strax.raw_record_dtype() for p in provides} def source_finished(self): return True
import tempfile import shutil import os import os.path as osp import glob import pytest import numpy as np import strax @strax.takes_config(strax.Option('crash', default=False)) class Records(strax.Plugin): provides = 'records' depends_on = tuple() dtype = strax.record_dtype() def iter(self, *args, **kwargs): if self.config['crash']: raise SomeCrash("CRASH!!!!") for t in range(n_chunks): r = np.zeros(recs_per_chunk, self.dtype) r['time'] = t r['length'] = 1 r['dt'] = 1 r['channel'] = np.arange(len(r)) yield r class SomeCrash(Exception): pass
def _convert_to_interval(time_stamps, allow_zero_length): time_stamps = np.sort(time_stamps) intervals = np.zeros(len(time_stamps) // 2, strax.time_dt_fields) intervals['dt'] = 1 intervals['time'] = time_stamps[::2] intervals['length'] = time_stamps[1::2] - time_stamps[::2] if not allow_zero_length: intervals = intervals[intervals['length'] > 0] return np.unique(intervals) @strax.takes_config( strax.Option('secret_time_offset', default=0, track=False), strax.Option('recs_per_chunk', default=10, track=False), strax.Option( 'n_chunks', default=2, track=False, help='Number of chunks for the dummy raw records we are writing here'), strax.Option('channel_map', track=False, type=immutabledict, help="frozendict mapping subdetector to (min, max) " "channel number.")) class DummyRawRecords(strax.Plugin): """ Provide dummy raw records for the mayor raw_record types """
import string import typing as ty import warnings import numexpr import numpy as np import pandas as pd from tqdm import tqdm import strax export, __all__ = strax.exporter() @strax.takes_config( strax.Option(name='storage_converter', default=False, help='If True, save data that is loaded from one frontend ' 'through all willing other storage frontends.'), strax.Option(name='fuzzy_for', default=tuple(), help='Tuple of plugin names for which no checks for version, ' 'providing plugin, and config will be performed when ' 'looking for data.'), strax.Option(name='fuzzy_for_options', default=tuple(), help='Tuple of config options for which no checks will be ' 'performed when looking for data.'), strax.Option(name='allow_incomplete', default=False, help="Allow loading of incompletely written data, if the " "storage systems support it"), strax.Option(name='allow_rechunk',
import strax import numpy as np import numba import straxen export, __all__ = strax.exporter() MV_PREAMBLE = 'Muno-Veto Plugin: Same as the corresponding nVETO-PLugin.\n' @export @strax.takes_config( strax.Option('save_outside_hits_nv', default=(3, 15), track=True, help='Save (left, right) samples besides hits; cut the rest'), strax.Option( 'baseline_samples_nv', default=('baseline_samples_nv', 'ONLINE', True), track=True, help='Number of samples to use at the start of the pulse to determine ' 'the baseline'), strax.Option('hit_min_amplitude_nv', default=20, track=True, help='Minimum hit amplitude in ADC counts above baseline. ' 'Specify as a tuple of length n_nveto_pmts, or a number.'), strax.Option( 'min_samples_alt_baseline_nv', default=None, track=True,
import strax import straxen import numpy as np import numba import pandas as pd import typing as ty from immutabledict import immutabledict export, __all__ = strax.exporter() @strax.takes_config( strax.Option('event_left_extension_nv', default=0, help="Extends events this many ns to the left"), strax.Option('event_resolving_time_nv', default=300, help="Resolving time for fixed window coincidence [ns]."), strax.Option( 'event_min_hits_nv', default=3, help="Minimum number of fully confined hitlets to define an event."), strax.Option('gain_model_nv', help='PMT gain model. Specify as (model_type, model_config)'), strax.Option('channel_map', track=False, type=immutabledict, help="immutabledict mapping subdetector to (min, max) " "channel number."),
import numpy as np from scipy.ndimage import convolve1d from immutabledict import immutabledict import strax import straxen from straxen.get_corrections import is_cmt_option export, __all__ = strax.exporter() @export @strax.takes_config( strax.Option('coincidence_level_recorder_nv', type=int, default=3, help="Required coincidence level."), strax.Option('pre_trigger_time_nv', type=int, default=150, help="Pretrigger time before coincidence window in ns."), strax.Option('resolving_time_recorder_nv', type=int, default=600, help="Resolving time of the coincidence in ns."), strax.Option('baseline_samples_nv', infer_type=False, default=('baseline_samples_nv', 'ONLINE', True), track=True, help="Number of samples used in baseline rms calculation"), strax.Option(
import numba import numpy as np import strax import straxen from .pulse_processing import HITFINDER_OPTIONS, HITFINDER_OPTIONS_he, HE_PREAMBLE from strax.processing.general import _touching_windows from warnings import warn export, __all__ = strax.exporter() @export @strax.takes_config( strax.Option('peaklet_gap_threshold', default=350, help="No hits for this many ns triggers a new peak"), strax.Option('peak_left_extension', default=30, help="Include this many ns left of hits in peaks"), strax.Option('peak_right_extension', default=200, help="Include this many ns right of hits in peaks"), strax.Option( 'peak_min_pmts', default=2, help="Minimum number of contributing PMTs needed to define a peak"), strax.Option( 'peak_split_gof_threshold', # See https://xe1t-wiki.lngs.infn.it/doku.php?id= # xenon:xenonnt:analysis:strax_clustering_classification
output_record_index += 1 mypax.shutdown() # In strax data, records are always stored # sorted, baselined and integrated records = strax.sort_by_time(records) strax.baseline(records) strax.integrate(records) return records @export @strax.takes_config( strax.Option('pax_raw_dir', default='/data/xenon/raw', track=False, help="Directory with raw pax datasets"), strax.Option('stop_after_zips', default=0, track=False, help="Convert only this many zip files. 0 = all.")) class RecordsFromPax(strax.Plugin): provides = 'raw_records' data_kind = 'raw_records' depends_on = tuple() dtype = strax.record_dtype() parallel = False def iter(self, *args, **kwargs): if not os.path.exists(self.config['pax_raw_dir']): raise FileNotFoundError(self.config['pax_raw_dir'])
import strax import numpy as np import numba import straxen from straxen.get_corrections import is_cmt_option export, __all__ = strax.exporter() MV_PREAMBLE = 'Muno-Veto Plugin: Same as the corresponding nVETO-PLugin.\n' NV_HIT_OPTIONS = ( strax.Option( 'save_outside_hits_nv', default=(3, 15), track=True, infer_type=False, help='Save (left, right) samples besides hits; cut the rest'), strax.Option( 'hit_min_amplitude_nv', infer_type=False, default=('hit_thresholds_nv', 'ONLINE', True), track=True, help='Minimum hit amplitude in ADC counts above baseline. ' 'Specify as a tuple of length n_nveto_pmts, or a number, ' 'or a string like "pmt_commissioning_initial" which means calling ' 'hitfinder_thresholds.py, ' 'or a tuple like (correction=str, version=str, nT=boolean), ' 'which means we are using cmt.'), ) @export @strax.takes_config( *NV_HIT_OPTIONS, strax.Option(
ARTIFICIAL_DEADTIME_CHANNEL = 799 class ArtificialDeadtimeInserted(UserWarning): pass @export @strax.takes_config( # All these must have track=False, so the raw_records hash never changes! # DAQ settings -- should match settings given to redax strax.Option('record_length', default=110, track=False, type=int, help="Number of samples per raw_record"), strax.Option( 'max_digitizer_sampling_time', default=10, track=False, type=int, help="Highest interval time of the digitizer sampling times(s) used."), strax.Option('run_start_time', type=float, track=False, default=0, help="time of start run (s since unix epoch)"), strax.Option('daq_chunk_duration', track=False,
import tempfile import numpy as np import numba import strax import straxen from straxen.common import pax_file, get_resource, first_sr1_run export, __all__ = strax.exporter() from .pulse_processing import HE_PREAMBLE @export @strax.takes_config( strax.Option('n_top_pmts', default=straxen.n_top_pmts, help="Number of top PMTs")) class PeakBasics(strax.Plugin): """ Compute the basic peak-properties, thereby dropping structured arrays. NB: This plugin can therefore be loaded as a pandas DataFrame. """ __version__ = "0.0.8" parallel = True depends_on = ('peaks', ) provides = 'peak_basics' dtype = [ (('Start time of the peak (ns since unix epoch)', 'time'), np.int64), (('End time of the peak (ns since unix epoch)', 'endtime'), np.int64), (('Weighted center time of the peak (ns since unix epoch)',
offset = rec_i * samples_per_record r['data'][:n_store] = p['signal'][offset:offset + n_store] output_record_index += 1 results.append(records) if len(results) >= events_per_chunk: yield finish_results() if len(results): y = finish_results() if len(y): yield y @export @strax.takes_config( strax.Option('fax_file', default=None, track=False, help="Directory with fax instructions"), strax.Otion('nevents',default = 50,track=False, help="Number of random events to generate if no instructions are provided") strax.Option('events_per_chunk', default=50, track=False, help="Number of events to yield per chunk"), strax.Option('samples_per_record', default=strax.DEFAULT_RECORD_LENGTH, track=False, help="Number of samples per record") strax.Option('general_config',default='https://github..../') ) class PeaksFromFax(strax.Plugin): provides = 'Peaks' data_kind = 'Peaks' compressor = 'zstd' depends_on = tuple() parallel = False rechunk_on_save = False
from immutabledict import immutabledict import numba import numpy as np import strax import straxen export, __all__ = strax.exporter() __all__ += ['NO_PULSE_COUNTS'] # These are also needed in peaklets, since hitfinding is repeated HITFINDER_OPTIONS = tuple([ strax.Option('hit_min_amplitude', default='pmt_commissioning_initial', help='Minimum hit amplitude in ADC counts above baseline. ' 'See straxen.hit_min_amplitude for options.') ]) HITFINDER_OPTIONS_he = tuple([ strax.Option( 'hit_min_amplitude_he', track=True, default="pmt_commissioning_initial_he", child_option=True, parent_option_name='hit_min_amplitude', help= 'Minimum hit amplitude in ADC counts above baseline for the high energy channels. ' 'See straxen.hit_min_amplitude for options.') ]) HE_PREAMBLE = """High energy channels: attenuated signals of the top PMT-array\n"""
import numpy as np import numba import strax export, __all__ = strax.exporter() # V/adc * (sec/sample) * (1/resistance) * (1/electron charge) adc_to_e = (2.25/2**14) * (1e-9) * (1/50) * (1/1.602e-19) to_pe = np.ones(7)*adc_to_e/6e6 @export @strax.takes_config( strax.Option('input_dir', type=str, track=False, help='The directory with the data'), ) class RecordReader(strax.Plugin): """ Reads records in from disk """ provides = 'raw_records' depends_on = tuple() dtype = strax.record_dtype() rechunk_on_save = False def source_finished(self): return True def is_ready(self, chunk_i): try:
_truth = self.truth_buffer[self.truth_buffer['fill']] # Return truth without 'fill' field truth = np.zeros(len(_truth), dtype=instruction_dtype + truth_extra_dtype) for name in truth.dtype.names: truth[name] = _truth[name] return dict(raw_records=records, truth=truth) def source_finished(self): return self.rawdata.source_finished @strax.takes_config( strax.Option('fax_file', default=None, track=True, help="Directory with fax instructions"), strax.Option('experiment', default='XENON1T', track=True, help="Directory with fax instructions"), strax.Option('event_rate', default=5, track=False, help="Average number of events per second"), strax.Option('chunk_size', default=5, track=False, help="Duration of each chunk in seconds"), strax.Option('nchunk', default=4,
import numpy as np import numba from straxen.numbafied_scipy import numba_gammaln, numba_betainc from scipy.special import loggamma from scipy.stats import binomtest import tarfile import tempfile export, __all__ = strax.exporter() @export @strax.takes_config( strax.Option( 's1_optical_map', help='S1 (x, y, z) optical/pattern map.', infer_type=False, default='XENONnT_s1_xyz_patterns_LCE_corrected_qes_MCva43fa9b_wires.pkl' ), strax.Option( 's2_optical_map', help='S2 (x, y) optical/pattern map.', infer_type=False, default='XENONnT_s2_xy_patterns_LCE_corrected_qes_MCva43fa9b_wires.pkl' ), strax.Option( 's2_tf_model', help='S2 (x, y) optical data-driven model', infer_type=False, default='XENONnT_s2_optical_map_data_driven_ML_v0_2021_11_25.tar.gz'), strax.Option( 'mean_pe_per_photon',
r['data'][:n_store] = p.raw_data[offset:offset + n_store] output_record_index += 1 results.append(records) if len(results) >= events_per_chunk: yield finish_results() mypax.shutdown() if len(results): yield finish_results() @export @strax.takes_config( strax.Option('pax_raw_dir', default='/data/xenon/raw', track=False, help="Directory with raw pax datasets"), strax.Option('stop_after_zips', default=0, track=False, help="Convert only this many zip files. 0 = all."), strax.Option('events_per_chunk', default=10, track=False, help="Number of events to yield per chunk") ) class RecordsFromPax(strax.Plugin): provides = 'raw_records' data_kind = 'raw_records' depends_on = tuple() dtype = strax.record_dtype() parallel = False def iter(self, *args, **kwargs): if not os.path.exists(self.config['pax_raw_dir']): raise FileNotFoundError(self.config['pax_raw_dir'])
import numba import numpy as np from immutabledict import immutabledict import strax import straxen export, __all__ = strax.exporter() MV_PREAMBLE = 'Muno-Veto Plugin: Same as the corresponding nVETO-PLugin.\n' @export @strax.takes_config( strax.Option( 'save_outside_hits_nv', default=(3, 15), track=True, help='Save (left, right) samples besides hits; cut the rest'), strax.Option( 'hit_min_amplitude_nv', default=20, track=True, help='Minimum hit amplitude in ADC counts above baseline. ' 'Specify as a tuple of length 120, or a number.'), strax.Option( 'min_split_nv', default=100, track=True, help='Minimum height difference pe/sample between local minimum and maximum, ' 'that a pulse get split.'), strax.Option( 'min_split_ratio_nv', default=0, track=True, help='Min ratio between local maximum and minimum to split pulse (zero to switch this off).'),
results.append(records) if len(results) >= events_per_chunk: yield finish_results() mypax.shutdown() if len(results): y = finish_results() if len(y): yield y @export @strax.takes_config( strax.Option('pax_raw_dir', default='/data/xenon/raw', track=False, help="Directory with raw pax datasets"), strax.Option('stop_after_zips', default=0, track=False, help="Convert only this many zip files. 0 = all."), strax.Option('events_per_chunk', default=50, track=False, help="Number of events to yield per chunk"), strax.Option('samples_per_record', default=strax.DEFAULT_RECORD_LENGTH, track=False, help="Number of samples per record")) class RecordsFromPax(strax.Plugin): provides = 'raw_records'
import strax import numpy as np import numba export, __all__ = strax.exporter() MV_PREAMBLE = 'Muno-Veto Plugin: Same as the corresponding nVETO-PLugin.\n' @export @strax.takes_config( strax.Option( 'save_outside_hits_nv', default=(3, 15), track=True, help='Save (left, right) samples besides hits; cut the rest'), strax.Option( 'baseline_samples_nv', default=10, track=True, help='Number of samples to use at the start of the pulse to determine ' 'the baseline'), strax.Option( 'hit_min_amplitude_nv', default=20, track=True, help='Minimum hit amplitude in ADC counts above baseline. ' 'Specify as a tuple of length n_nveto_pmts, or a number.'), ) class nVETOPulseProcessing(strax.Plugin): """ nVETO equivalent of pulse processing. The following steps are applied: 1. Flip, baseline and integrate waveforms. 2. Find hits and apply ZLE