Esempio n. 1
0
"""Convert pax .zip files to flat records format
"""
import numpy as np
import os
import glob

import strax
export, __all__ = strax.exporter()


def records_needed(pulse_length, samples_per_record):
    """Return records needed to store pulse_length samples"""
    return np.ceil(pulse_length / samples_per_record).astype(np.int)


@export
def pax_to_records(input_filename,
                   samples_per_record=strax.DEFAULT_RECORD_LENGTH,
                   events_per_chunk=10):
    """Return pulse records array from pax zip input_filename

    This only works if you have pax installed in your strax environment,
    which is somewhat tricky.
    """

    # Monkeypatch matplotlib so pax is importable
    # See https://github.com/XENON1T/pax/pull/734
    import matplotlib
    matplotlib._cntr = None

    from pax import core  # Pax is not a dependency
Esempio n. 2
0
import numba
import numpy as np
import pandas as pd
from copy import deepcopy
from scipy.interpolate import interp1d

import strax
export, __all__ = strax.exporter(export_self=False)
PULSE_MAX_DURATION = int(1e3)
N_SPLIT_LOOP = 5


@export
@numba.jit(numba.int32(numba.int64[:], numba.int64, numba.int64,
                       numba.int64[:, :]),
           nopython=True)
def find_intervals_below_threshold(w, threshold, holdoff, result_buffer):
    """Fills result_buffer with l, r bounds of intervals in w < threshold.
    :param w: Waveform to do hitfinding in
    :param threshold: Threshold for including an interval
    :param holdoff: Holdoff number of samples after the pulse return back down to threshold
    :param result_buffer: numpy N*2 array of ints, will be filled by function.
                          if more than N intervals are found, none past the first N will be processed.
    :returns : number of intervals processed
    Boundary indices are inclusive, i.e. the right boundary is the last index which was < threshold
    """
    result_buffer_size = len(result_buffer)
    last_index_in_w = len(w) - 1

    in_interval = False
    current_interval = 0
Esempio n. 3
0
from straxen.rundb import get_mongo_url, default_mongo_dbname
from strax import MongoFrontend, exporter
from socket import getfqdn

export, __all__ = exporter()

default_online_collection = 'online_monitor'


@export
class OnlineMonitor(MongoFrontend):
    """
    Online monitor Frontend for Saving data temporarily to the
    database
    """
    def __init__(self,
                 uri=None,
                 take_only=None,
                 database=default_mongo_dbname,
                 col_name=default_online_collection,
                 readonly=True,
                 *args,
                 **kwargs):
        if take_only is None:
            raise ValueError(f'Specify which data_types to accept! Otherwise '
                             f'the DataBase will be overloaded')
        if uri is None:
            host = getfqdn()
            uri = get_mongo_url(host)

        super().__init__(uri=uri,
Esempio n. 4
0
import numba
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d

import strax

export, __all__ = strax.exporter(export_self=True)


def init_spe_scaling_factor_distributions(file):
    # Extract the spe pdf from a csv file into a pandas dataframe
    spe_shapes = pd.read_csv(file)

    # Create a converter array from uniform random numbers to SPE gains (one interpolator per channel)
    # Scale the distributions so that they have an SPE mean of 1 and then calculate the cdf
    uniform_to_pe_arr = []
    for ch in spe_shapes.columns[
            1:]:  # skip the first element which is the 'charge' header
        if spe_shapes[ch].sum() > 0:
            mean_spe = (spe_shapes['charge'] *
                        spe_shapes[ch]).sum() / spe_shapes[ch].sum()
            scaled_bins = spe_shapes['charge'] / mean_spe
            cdf = np.cumsum(spe_shapes[ch]) / np.sum(spe_shapes[ch])
        else:
            # if sum is 0, just make some dummy axes to pass to interpolator
            cdf = np.linspace(0, 1, 10)
            scaled_bins = np.zeros_like(cdf)

        uniform_to_pe_arr.append(interp1d(cdf, scaled_bins))