Exemplo n.º 1
0
class DummyRawRecords(strax.Plugin):
    """
    Provide dummy raw records for the mayor raw_record types
    """
    provides = ('raw_records', 'raw_records_he', 'raw_records_nv',
                'raw_records_aqmon')
    parallel = 'process'
    depends_on = tuple()
    data_kind = immutabledict(zip(provides, provides))
    rechunk_on_save = False
    dtype = {p: strax.raw_record_dtype() for p in provides}

    def source_finished(self):
        return True

    def is_ready(self, chunk_i):
        return chunk_i < N_CHUNKS

    def compute(self, chunk_i):
        t0 = chunk_i + self.config['secret_time_offset']
        if chunk_i < N_CHUNKS - 1:
            r = np.zeros(recs_per_chunk, self.dtype['raw_records'])
            r['time'] = t0
            r['length'] = r['dt'] = 1
            r['channel'] = np.arange(len(r))
        else:
            r = np.zeros(0, self.dtype['raw_records'])
        res = {
            p: self.chunk(start=t0, end=t0 + 1, data=r, data_type=p)
            for p in self.provides
        }
        return res
Exemplo n.º 2
0
    def __init__(self, seed=0):

        self.logger = logging.getLogger(__name__)
        self.logger.info('Generator Initialize')
        np.random.seed(seed)
        self.__data_path = pkg_resources.resource_filename('wfsimn', 'data/')

        # Strax record parameters
        self.dt = 2  # Time resolution in ns
        self.record_length = 110  # waveform length in one record
        self.peak_lbk = 20  # Average pulse bins before peak
        self.peak_lfw = 40  # Average pulse bins after peak
        self.bin_baseline = 40  # nbins of baseline including strax format
        self.nbins_templete_wf = self.peak_lbk + self.peak_lfw  # number of avarage pulse bins
        self.time_window = 100  # ns; event time window in strax format

        self.nv_raw_record_dtype = strax.raw_record_dtype(self.record_length)

        # Pulse parameters
        self.pulse_height = 57  # mean of 1pe pulse height in ADC
        self.pulse_spread = 26  # std. div of 1pe pulse height in ADC
        self.pulse_baseline_ADC = 15925  # Actural baseline in ADC
        self.pulse_baseline_spread = 3.5  # baseline spread in ADC

        self.event_time_interval = 1.e-6  # Each event will occur in this interval (second)

        self.preprocessor = wfsimn.preprocessor()
Exemplo n.º 3
0
 def infer_dtype(self):
     dtype = {
         data_type: strax.raw_record_dtype(
             samples_per_record=strax.DEFAULT_RECORD_LENGTH)
         for data_type in self.provides if data_type != 'truth'
     }
     dtype['truth'] = instruction_dtype + truth_extra_dtype
     return dtype
Exemplo n.º 4
0
 def infer_dtype(self):
     dtype = dict([(data_type, instruction_dtype +
                    truth_extra_dtype) if 'truth' in data_type else
                   (data_type,
                    strax.raw_record_dtype(
                        samples_per_record=strax.DEFAULT_RECORD_LENGTH))
                   for data_type in self.provides])
     return dtype
Exemplo n.º 5
0
 def __init__(self, config):
     self.config = config
     self.rawdata = wfsim.RawDataOptical(self.config)
     self.record_buffer = np.zeros(
         5000000, dtype=strax.raw_record_dtype())  # 2*250 ms buffer
     self.truth_buffer = np.zeros(10000,
                                  dtype=instruction_dtype +
                                  truth_extra_dtype + [('fill', bool)])
Exemplo n.º 6
0
class DummyRawRecords(strax.Plugin):
    """
    Provide dummy raw records for the mayor raw_record types
    """
    provides = ('raw_records',
                'raw_records_he',
                'raw_records_nv',
                'raw_records_aqmon',
                'raw_records_aux_mv',
                'raw_records_mv'
                )
    parallel = 'process'
    depends_on = tuple()
    data_kind = immutabledict(zip(provides, provides))
    rechunk_on_save = False
    dtype = {p: strax.raw_record_dtype() for p in provides}

    def setup(self):
        self.channel_map_keys = {'he': 'he',
                                 'nv': 'nveto',
                                 'aqmon': 'aqmon',
                                 'aux_mv': 'aux_mv',
                                 's_mv': 'mv',
                                 }  # s_mv otherwise same as aux in endswith

    def source_finished(self):
        return True

    def is_ready(self, chunk_i):
        return chunk_i < self.config['n_chunks']

    def compute(self, chunk_i):
        t0 = chunk_i + self.config['secret_time_offset']
        if chunk_i < self.config['n_chunks'] - 1:
            # One filled chunk
            r = np.zeros(self.config['recs_per_chunk'], self.dtype['raw_records'])
            r['time'] = t0
            r['length'] = r['dt'] = 1
            r['channel'] = np.arange(len(r))
        else:
            # One empty chunk
            r = np.zeros(0, self.dtype['raw_records'])

        res = {}
        for p in self.provides:
            rr = np.copy(r)
            # Add detector specific channel offset:
            for key, channel_key in self.channel_map_keys.items():
                if channel_key not in self.config['channel_map']:
                    # Channel map for 1T is different.
                    continue
                if p.endswith(key):
                    s, e = self.config['channel_map'][channel_key]
                    rr['channel'] += s
            res[p] = self.chunk(start=t0, end=t0 + 1, data=rr, data_type=p)
        return res
Exemplo n.º 7
0
    def infer_dtype(self):
        self.record_length = strax.record_length_from_dtype(
            self.deps['raw_records_nv'].dtype_for('raw_records_nv'))

        nveto_records_dtype = strax.raw_record_dtype(self.record_length)
        nveto_diagnostic_lone_records_dtype = strax.record_dtype(self.record_length)
        nveto_lone_records_statistics_dtype = lone_record_statistics_dtype(self.config['n_nveto_pmts'])

        dtypes = [nveto_records_dtype,
                  nveto_diagnostic_lone_records_dtype,
                  nveto_lone_records_statistics_dtype]

        return {k: v for k, v in zip(self.provides, dtypes)}
Exemplo n.º 8
0
    def __init__(self, config, rawdata_generator=wfsim.RawData, **kwargs):
        log.debug(f'Starting {self.__class__.__name__}')
        self.config = config
        log.debug(f'Setting raw data with {rawdata_generator.__name__}')
        self.rawdata = rawdata_generator(self.config, **kwargs)
        self.record_buffer = np.zeros(
            5000000,
            dtype=strax.raw_record_dtype(
                samples_per_record=strax.DEFAULT_RECORD_LENGTH))
        self.truth_buffer = np.zeros(10000,
                                     dtype=instruction_dtype +
                                     truth_extra_dtype + [('fill', bool)])

        self.blevel = 0  # buffer_filled_level
Exemplo n.º 9
0
    def __init__(self, config):
        log.debug(f'Starting {self.__class__.__name__} with {config}')
        self.config = config
        log.debug(f'Setting raw data')
        self.rawdata = wfsim.RawData(self.config)
        log.debug(f'Raw data is set')
        self.record_buffer = np.zeros(
            5000000,
            dtype=strax.raw_record_dtype(
                samples_per_record=strax.DEFAULT_RECORD_LENGTH)
        )  # 2*250 ms buffer
        self.truth_buffer = np.zeros(10000,
                                     dtype=instruction_dtype +
                                     truth_extra_dtype + [('fill', bool)])

        self.blevel = buffer_filled_level = 0
        log.debug(f'Starting {self.__class__.__name__} initiated')
Exemplo n.º 10
0
    def infer_dtype(self):
        self.record_length = strax.record_length_from_dtype(
            self.deps['raw_records_nv'].dtype_for('raw_records_nv'))

        channel_range = self.config['channel_map']['nveto']
        n_channel = (channel_range[1] - channel_range[0]) + 1
        nveto_records_dtype = strax.raw_record_dtype(self.record_length)
        nveto_diagnostic_lone_records_dtype = strax.record_dtype(
            self.record_length)
        nveto_lone_records_statistics_dtype = lone_record_statistics_dtype(
            n_channel)

        dtypes = [
            nveto_records_dtype, nveto_diagnostic_lone_records_dtype,
            nveto_lone_records_statistics_dtype
        ]

        return {k: v for k, v in zip(self.provides, dtypes)}
Exemplo n.º 11
0
 def infer_dtype(self):
     return {
         d: strax.raw_record_dtype(
             samples_per_record=self.config["record_length"])
         for d in self.provides
     }
Exemplo n.º 12
0
class DummyRawRecords(strax.Plugin):
    """
    Provide dummy raw records for the mayor raw_record types
    """
    provides = straxen.daqreader.DAQReader.provides
    parallel = 'process'
    depends_on = tuple()
    data_kind = immutabledict(zip(provides, provides))
    rechunk_on_save = False
    dtype = {p: strax.raw_record_dtype() for p in provides}

    def setup(self):
        self.channel_map_keys = {
            'he': 'he',
            'nv': 'nveto',
            'aqmon': 'aqmon',
            'aux_mv': 'aux_mv',
            's_mv': 'mv',
        }  # s_mv otherwise same as aux in endswith

    def source_finished(self):
        return True

    def is_ready(self, chunk_i):
        return chunk_i < self.config['n_chunks']

    def compute(self, chunk_i):
        t0 = chunk_i + self.config['secret_time_offset']
        if chunk_i < self.config['n_chunks'] - 1:
            # One filled chunk
            r = np.zeros(self.config['recs_per_chunk'],
                         self.dtype['raw_records'])
            r['time'] = t0
            r['length'] = r['dt'] = 1
            r['channel'] = np.arange(len(r))
        else:
            # One empty chunk
            r = np.zeros(0, self.dtype['raw_records'])

        res = {}
        for p in self.provides:
            rr = np.copy(r)
            # Add detector specific channel offset:
            for key, channel_key in self.channel_map_keys.items():
                if channel_key not in self.config['channel_map']:
                    # Channel map for 1T is different.
                    continue
                if p.endswith(key):
                    first_channel, last_channel = self.config['channel_map'][
                        channel_key]
                    rr['channel'] += first_channel
                    if key == 'aqmon':
                        # explicitly clip these channels as we have an additional check higher in the chain
                        first_channel = int(
                            min(straxen.plugins.acqmon_processing.AqmonChannels
                                ))
                        last_channel = int(
                            max(straxen.plugins.acqmon_processing.AqmonChannels
                                ))

                    rr = rr[(rr['channel'] >= first_channel)
                            & (rr['channel'] < last_channel)]
            res[p] = self.chunk(start=t0, end=t0 + 1, data=rr, data_type=p)
        return res
Exemplo n.º 13
0
import uproot
import nestpy

import numpy as np
import pandas as pd

import strax
from straxen.common import get_resource
from straxen import get_to_pe
import wfsim
from immutabledict import immutabledict

export, __all__ = strax.exporter()
__all__ += ['raw_records_nv_dtype']

raw_records_nv_dtype = strax.raw_record_dtype(110)

log = logging.getLogger('SimulationNvCore')


@strax.takes_config(
    strax.Option('seed',
                 default=False,
                 track=True,
                 help="Option for setting the seed of the random number"), )
class WfsimN(strax.Plugin):

    provides = ('raw_records_nv')
    #data_kind = immutabledict(zip(provides, provides))
    depends_on = tuple()
    rechunk_on_save = False
Exemplo n.º 14
0
    def compute(self):
        log.debug('Full chain plugin calling compute')
        if 'tpc' in self.config['targets']:
            try:
                result = next(self.sim_iter)
            except StopIteration:
                if self.sim.source_finished():
                    log.debug('TPC instructions are already depleted')
                    result = dict([(data_type,
                                    np.zeros(0, self.dtype_for(data_type)))
                                   for data_type in self.provides
                                   if 'nv' not in data_type])

                    num_of_results = sum([len(v) for _, v in result.items()])
                    if num_of_results != 0:
                        self.sim.chunk_time = self.sim_nv.chunk_time
                        self.sim.chunk_time_pre = self.sim_nv.chunk_time_pre
                else:
                    raise RuntimeError("Bug in getting source finished")

        if 'nveto' in self.config['targets']:
            try:
                result_nv = next(self.sim_nv_iter)
                result_nv['raw_records']['channel'] += self.config[
                    'channel_map']['nveto'][0]
            except StopIteration:
                if self.sim_nv.source_finished():
                    log.debug('nVeto instructions are already depleted')
                    result_nv = dict([(data_type.strip('_nv'),
                                       np.zeros(0, self.dtype_for(data_type)))
                                      for data_type in self.provides
                                      if 'nv' in data_type])
                    self.sim_nv.chunk_time = self.sim.chunk_time
                    self.sim_nv.chunk_time_pre = self.sim.chunk_time_pre
                else:
                    raise RuntimeError("Bug in getting source finished")

        exist_tpc_result, exist_nveto_result = False, False
        for data_type in self.provides:
            if 'nv' in data_type:
                if len(result_nv[data_type.strip('_nv')]) > 0:
                    exist_nveto_result = True
            else:
                if len(result[data_type]) > 0:
                    exist_tpc_result = True
        chunk = {}
        for data_type in self.provides:
            if 'nv' in data_type:
                if exist_nveto_result:
                    chunk[data_type] = self.chunk(
                        start=self.sim_nv.chunk_time_pre,
                        end=self.sim_nv.chunk_time,
                        data=result_nv[data_type.strip('_nv')],
                        data_type=data_type)
                # If nv is not one of the targets just return an empty chunk
                # If there is TPC event, set TPC time for the start and end
                else:
                    dummy_dtype = wfsim.truth_extra_dtype if 'truth' in data_type else strax.raw_record_dtype(
                    )
                    if exist_tpc_result:
                        chunk[data_type] = self.chunk(
                            start=self.sim.chunk_time_pre,
                            end=self.sim.chunk_time,
                            data=np.array([], dtype=dummy_dtype),
                            data_type=data_type)
                    else:
                        chunk[data_type] = self.chunk(start=0,
                                                      end=0,
                                                      data=np.array(
                                                          [],
                                                          dtype=dummy_dtype),
                                                      data_type=data_type)
            else:
                if exist_tpc_result:
                    chunk[data_type] = self.chunk(
                        start=self.sim.chunk_time_pre,
                        end=self.sim.chunk_time,
                        data=result[data_type],
                        data_type=data_type)
                else:
                    dummy_dtype = wfsim.truth_extra_dtype if 'truth' in data_type else strax.raw_record_dtype(
                    )
                    if exist_nveto_result:
                        chunk[data_type] = self.chunk(
                            start=self.sim_nv.chunk_time_pre,
                            end=self.sim_nv.chunk_time,
                            data=np.array([], dtype=dummy_dtype),
                            data_type=data_type)
                    else:
                        chunk[data_type] = self.chunk(start=0,
                                                      end=0,
                                                      data=np.array(
                                                          [],
                                                          dtype=dummy_dtype),
                                                      data_type=data_type)

        self._sort_check(
            [chunk[data_type].data for data_type in self.provides])

        return chunk
Exemplo n.º 15
0
    def infer_dtype(self):

        dtype = strax.raw_record_dtype()
        return dtype
Exemplo n.º 16
0
 def infer_dtype(self):
     return strax.raw_record_dtype(self.config['samples_per_record'])
Exemplo n.º 17
0
    def __call__(self, instructions, time_zero=None, **kwargs):
        """
        :param instructions: Structured array with instruction dtype in strax_interface module
        :param time_zero: Starting time of the first chunk
        """
        samples_per_record = strax.DEFAULT_RECORD_LENGTH
        if len(instructions) == 0:  # Empty
            yield from np.array([],
                                dtype=strax.raw_record_dtype(
                                    samples_per_record=samples_per_record))
            self.rawdata.source_finished = True
            return
        dt = self.config['sample_duration']
        buffer_length = len(self.record_buffer)
        rext = int(self.config['right_raw_extension'])
        cksz = int(self.config['chunk_size'] * 1e9)

        # Save the constants as privates
        self.blevel = 0  # buffer_filled_level
        self.chunk_time_pre = time_zero - rext if time_zero else np.min(
            instructions['time']) - rext
        self.chunk_time = self.chunk_time_pre + cksz  # Starting chunk
        self.current_digitized_right = self.last_digitized_right = 0
        for channel, left, right, data in self.rawdata(
                instructions=instructions,
                truth_buffer=self.truth_buffer,
                **kwargs):
            pulse_length = right - left + 1
            records_needed = int(np.ceil(pulse_length / samples_per_record))

            if self.rawdata.right != self.current_digitized_right:
                self.last_digitized_right = self.current_digitized_right
                self.current_digitized_right = self.rawdata.right

            if self.rawdata.left * dt > self.chunk_time + rext:
                next_left_time = self.rawdata.left * dt
                log.debug(
                    f'Pause sim loop at {self.chunk_time}, next pulse start at {next_left_time}'
                )
                if (self.last_digitized_right + 1) * dt > self.chunk_time:
                    extend = (self.last_digitized_right +
                              1) * dt - self.chunk_time
                    self.chunk_time += extend
                    log.debug(
                        f'Chunk happenned during event, extending {extend} ns')
                yield from self.final_results()
                self.chunk_time_pre = self.chunk_time
                self.chunk_time += cksz

            if self.blevel + records_needed > buffer_length:
                log.warning(
                    'Chunck size too large, insufficient record buffer \n'
                    'No longer in sync if simulating nVeto with TPC \n'
                    'Consider reducing the chunk size')
                next_left_time = self.rawdata.left * dt
                self.chunk_time = (self.last_digitized_right + 1) * dt
                log.debug(
                    f'Pause sim loop at {self.chunk_time}, next pulse start at {next_left_time}'
                )
                yield from self.final_results()
                self.chunk_time_pre = self.chunk_time
                self.chunk_time += cksz

            if self.blevel + records_needed > buffer_length:
                log.warning(
                    'Pulse length too large, insufficient record buffer, skipping pulse'
                )
                continue

            # WARNING baseline and area fields are zeros before finish_results
            s = slice(self.blevel, self.blevel + records_needed)
            self.record_buffer[s]['channel'] = channel
            self.record_buffer[s]['dt'] = dt
            self.record_buffer[s]['time'] = dt * (
                left + samples_per_record * np.arange(records_needed))
            self.record_buffer[s]['length'] = [
                min(pulse_length, samples_per_record * (i + 1)) -
                samples_per_record * i for i in range(records_needed)
            ]
            self.record_buffer[s]['pulse_length'] = pulse_length
            self.record_buffer[s]['record_i'] = np.arange(records_needed)
            self.record_buffer[s]['data'] = np.pad(
                data, (0, records_needed * samples_per_record - pulse_length),
                'constant').reshape((-1, samples_per_record))
            self.blevel += records_needed

        self.last_digitized_right = self.current_digitized_right
        self.chunk_time = max((self.last_digitized_right + 1) * dt,
                              self.chunk_time_pre + dt)
        yield from self.final_results()
Exemplo n.º 18
0
def pax_to_records(input_filename,
                   samples_per_record=strax.DEFAULT_RECORD_LENGTH,
                   events_per_chunk=10):
    """Return pulse records array from pax zip input_filename

    This only works if you have pax installed in your strax environment,
    which is somewhat tricky.
    """

    # Monkeypatch matplotlib so pax is importable
    # See https://github.com/XENON1T/pax/pull/734
    import matplotlib
    matplotlib._cntr = None

    from pax import core  # Pax is not a dependency

    mypax = core.Processor(
        'XENON1T',
        config_dict=dict(
            pax=dict(look_for_config_in_runs_db=False,
                     plugin_group_names=['input'],
                     encoder_plugin=None,
                     input_name=input_filename),
            # Fast startup: skip loading big maps
            WaveformSimulator=dict(s1_light_yield_map='placeholder_map.json',
                                   s2_light_yield_map='placeholder_map.json',
                                   s1_patterns_file=None,
                                   s2_patterns_file=None)))

    print(f"Starting conversion, {events_per_chunk} evt/chunk")

    results = []

    def finish_results():
        nonlocal results
        records = np.concatenate(results)
        # In strax data, records are always stored
        # sorted, baselined and integrated
        records = strax.sort_by_time(records)
        print("Returning %d records" % len(records))
        results = []
        return records

    for event in mypax.get_events():
        event = mypax.process_event(event)

        if not len(event.pulses):
            # Triggerless pax data contains many empty events
            # at the end. With the fixed events per chunk setting
            # this can lead to empty files, which confuses strax.
            continue

        pulse_lengths = np.array([p.length for p in event.pulses])

        n_records_tot = records_needed(pulse_lengths, samples_per_record).sum()
        records = np.zeros(n_records_tot,
                           dtype=strax.raw_record_dtype(samples_per_record))
        output_record_index = 0  # Record offset in data

        for p in event.pulses:
            n_records = records_needed(p.length, samples_per_record)

            for rec_i in range(n_records):
                r = records[output_record_index]
                r['time'] = (event.start_time + p.left * 10 +
                             rec_i * samples_per_record * 10)
                r['channel'] = p.channel
                r['pulse_length'] = p.length
                r['record_i'] = rec_i
                r['dt'] = 10

                # How much are we storing in this record?
                if rec_i != n_records - 1:
                    # There's more chunks coming, so we store a full chunk
                    n_store = samples_per_record
                    assert p.length > samples_per_record * (rec_i + 1)
                else:
                    # Just enough to store the rest of the data
                    # Note it's not p.length % samples_per_record!!!
                    # (that would be zero if we have to store a full record)
                    n_store = p.length - samples_per_record * rec_i

                assert 0 <= n_store <= samples_per_record
                r['length'] = n_store

                offset = rec_i * samples_per_record
                r['data'][:n_store] = p.raw_data[offset:offset + n_store]
                output_record_index += 1

        results.append(records)
        if len(results) >= events_per_chunk:
            yield finish_results()

    mypax.shutdown()

    if len(results):
        y = finish_results()
        if len(y):
            yield y